summaryrefslogtreecommitdiffstats
path: root/private/ntos/mm
diff options
context:
space:
mode:
authorAdam <you@example.com>2020-05-17 05:51:50 +0200
committerAdam <you@example.com>2020-05-17 05:51:50 +0200
commite611b132f9b8abe35b362e5870b74bce94a1e58e (patch)
treea5781d2ec0e085eeca33cf350cf878f2efea6fe5 /private/ntos/mm
downloadNT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.gz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.bz2
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.lz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.xz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.zst
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.zip
Diffstat (limited to 'private/ntos/mm')
-rw-r--r--private/ntos/mm/acceschk.c392
-rw-r--r--private/ntos/mm/addrsup.c1432
-rw-r--r--private/ntos/mm/allocpag.c1613
-rw-r--r--private/ntos/mm/allocvm.c1615
-rw-r--r--private/ntos/mm/alpha/datalpha.c116
-rw-r--r--private/ntos/mm/alpha/debugsup.c193
-rw-r--r--private/ntos/mm/alpha/hypermap.c382
-rw-r--r--private/ntos/mm/alpha/inialpha.c1183
-rw-r--r--private/ntos/mm/alpha/mialpha.h2048
-rw-r--r--private/ntos/mm/alpha/physsect.c562
-rw-r--r--private/ntos/mm/alpha/setdirty.c126
-rw-r--r--private/ntos/mm/alpha/sources6
-rw-r--r--private/ntos/mm/checkpfn.c538
-rw-r--r--private/ntos/mm/checkpte.c235
-rw-r--r--private/ntos/mm/creasect.c3638
-rw-r--r--private/ntos/mm/deleteva.c985
-rw-r--r--private/ntos/mm/dirs24
-rw-r--r--private/ntos/mm/dmpaddr.c879
-rw-r--r--private/ntos/mm/extsect.c667
-rw-r--r--private/ntos/mm/flushbuf.c288
-rw-r--r--private/ntos/mm/flushsec.c1883
-rw-r--r--private/ntos/mm/forksup.c1853
-rw-r--r--private/ntos/mm/freevm.c1363
-rw-r--r--private/ntos/mm/i386/data386.c147
-rw-r--r--private/ntos/mm/i386/debugsup.c163
-rw-r--r--private/ntos/mm/i386/hypermap.c370
-rw-r--r--private/ntos/mm/i386/init386.c1326
-rw-r--r--private/ntos/mm/i386/mi386.h2105
-rw-r--r--private/ntos/mm/i386/probewrt.c138
-rw-r--r--private/ntos/mm/i386/setmodfy.c242
-rw-r--r--private/ntos/mm/i386/sources5
-rw-r--r--private/ntos/mm/iosup.c4027
-rw-r--r--private/ntos/mm/lockvm.c810
-rw-r--r--private/ntos/mm/mapcache.c1677
-rw-r--r--private/ntos/mm/mapview.c3388
-rw-r--r--private/ntos/mm/mi.h3301
-rw-r--r--private/ntos/mm/miglobal.c795
-rw-r--r--private/ntos/mm/mips/datamips.c191
-rw-r--r--private/ntos/mm/mips/debugsup.c207
-rw-r--r--private/ntos/mm/mips/hypermap.c325
-rw-r--r--private/ntos/mm/mips/initmips.c1047
-rw-r--r--private/ntos/mm/mips/mir3000.h1021
-rw-r--r--private/ntos/mm/mips/mir4000.h2075
-rw-r--r--private/ntos/mm/mips/setdirty.c125
-rw-r--r--private/ntos/mm/mips/sources5
-rw-r--r--private/ntos/mm/mmfault.c938
-rw-r--r--private/ntos/mm/mminit.c1980
-rw-r--r--private/ntos/mm/mmquota.c1050
-rw-r--r--private/ntos/mm/mmsup.c1160
-rw-r--r--private/ntos/mm/modwrite.c4025
-rw-r--r--private/ntos/mm/mp/makefile6
-rw-r--r--private/ntos/mm/mp/sources29
-rw-r--r--private/ntos/mm/pagfault.c3406
-rw-r--r--private/ntos/mm/pfndec.c613
-rw-r--r--private/ntos/mm/pfnlist.c1707
-rw-r--r--private/ntos/mm/ppc/datappc.c136
-rw-r--r--private/ntos/mm/ppc/debugsup.c199
-rw-r--r--private/ntos/mm/ppc/hypermap.c344
-rw-r--r--private/ntos/mm/ppc/initppc.c874
-rw-r--r--private/ntos/mm/ppc/mippc.h2034
-rw-r--r--private/ntos/mm/ppc/setdirty.c124
-rw-r--r--private/ntos/mm/ppc/sources5
-rw-r--r--private/ntos/mm/procsup.c3327
-rw-r--r--private/ntos/mm/protect.c1861
-rw-r--r--private/ntos/mm/querysec.c242
-rw-r--r--private/ntos/mm/queryvm.c920
-rw-r--r--private/ntos/mm/readwrt.c971
-rw-r--r--private/ntos/mm/sectsup.c2796
-rw-r--r--private/ntos/mm/shutdown.c366
-rw-r--r--private/ntos/mm/sources.inc87
-rw-r--r--private/ntos/mm/super.c651
-rw-r--r--private/ntos/mm/sysload.c2533
-rw-r--r--private/ntos/mm/sysptes.c1376
-rw-r--r--private/ntos/mm/umapview.c660
-rw-r--r--private/ntos/mm/up/makefile6
-rw-r--r--private/ntos/mm/up/sources27
-rw-r--r--private/ntos/mm/vadtree.c466
-rw-r--r--private/ntos/mm/wrtfault.c400
-rw-r--r--private/ntos/mm/wslist.c2973
-rw-r--r--private/ntos/mm/wsmanage.c1190
-rw-r--r--private/ntos/mm/wstree.c1418
-rw-r--r--private/ntos/mm/zeropage.c158
82 files changed, 86569 insertions, 0 deletions
diff --git a/private/ntos/mm/acceschk.c b/private/ntos/mm/acceschk.c
new file mode 100644
index 000000000..c93c86979
--- /dev/null
+++ b/private/ntos/mm/acceschk.c
@@ -0,0 +1,392 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ acceschk.c
+
+Abstract:
+
+ This module contains the access check routines for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+//
+// MmReadWrite yields 0 if no-access, 10 if read-only, 11 if read-write.
+// It is indexed by a page protection. The value of this array is added
+// to the !WriteOperation value. If the value is 10 or less an access
+// violation is issued (read-only - write_operation) = 9,
+// (read_only - read_operation) = 10, etc.
+//
+
+CCHAR MmReadWrite[32] = {1, 10, 10, 10, 11, 11, 11, 11,
+ 1, 10, 10, 10, 11, 11, 11, 11,
+ 1, 10, 10, 10, 11, 11, 11, 11,
+ 1, 10, 10, 10, 11, 11, 11, 11 };
+
+//
+// this is the csrss process !
+//
+
+extern PEPROCESS ExpDefaultErrorPortProcess;
+
+extern ULONG MmExtendedCommit;
+
+
+
+NTSTATUS
+MiAccessCheck (
+ IN PMMPTE PointerPte,
+ IN BOOLEAN WriteOperation,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN ULONG Protection
+
+ )
+
+/*++
+
+Routine Description:
+
+
+
+Arguments:
+
+ PointerPte - Supplies the pointer to the PTE which caused the
+ page fault.
+
+ WriteOperation - Supplies 1 if the operation is a write, 0 if
+ the operation is a read.
+
+ PreviousMode - Supplies the previous mode, one of UserMode or KernelMode.
+
+ Protection - Supplies the protection mask to check.
+
+Return Value:
+
+ Returns TRUE if access to the page is allowed, FALSE otherwise.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ MMPTE PteContents;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+
+ //
+ // Check to see if the owner bit allows access to the previous mode.
+ // Access is not allowed if the owner is kernel and the previous
+ // mode is user. Access is also disallowed if the write operation
+ // is true and the write field in the PTE is false.
+ //
+
+ //
+ // If both an access violation and a guard page violation could
+ // occur for the page, the access violation must be returned.
+ //
+
+ if (PreviousMode == UserMode) {
+ if (PointerPte > MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+ }
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Valid pages cannot be guard page violations.
+ //
+
+ if (WriteOperation) {
+ if ((PteContents.u.Hard.Write == 1) ||
+ (PteContents.u.Hard.CopyOnWrite == 1)) {
+ return STATUS_SUCCESS;
+ } else {
+ return STATUS_ACCESS_VIOLATION;
+ }
+ } else {
+ return STATUS_SUCCESS;
+ }
+
+ } else {
+
+ if ((MmReadWrite[Protection] - (CCHAR)WriteOperation) < 10) {
+ return STATUS_ACCESS_VIOLATION;
+ } else {
+
+ //
+ // Check for a guard page fault.
+ //
+
+ if (Protection & MM_GUARD_PAGE) {
+
+ //
+ // If this thread is attached to a different process,
+ // return an access violation rather than a guard
+ // page exception. The prevents problems with unwanted
+ // stack expansion and unexpect guard page behavior
+ // from debuggers.
+
+ if (KeIsAttachedProcess()) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ //
+ // Check to see if this is a transition PTE, if so,
+ // the PFN database original contents field needs
+ // updated.
+ //
+
+ if ((PteContents.u.Soft.Transition == 1) &&
+ (PteContents.u.Soft.Prototype == 0)) {
+
+ //
+ // Acquire the PFN mutex and check to see if the
+ // PTE is still in the transition state, and, if so
+ // update the original PTE in the pfn database.
+ //
+
+ LOCK_PFN (OldIrql);
+ PteContents = *(volatile MMPTE *)PointerPte;
+ if ((PteContents.u.Soft.Transition == 1) &&
+ (PteContents.u.Soft.Prototype == 0)) {
+
+ //
+ // Still in transition, update the PFN database.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (
+ PteContents.u.Trans.PageFrameNumber);
+
+ ASSERT (Pfn1->u3.e1.PrototypePte == 0);
+ Pfn1->OriginalPte.u.Soft.Protection =
+ Protection & ~MM_GUARD_PAGE;
+ }
+ UNLOCK_PFN (OldIrql);
+ }
+
+ PointerPte->u.Soft.Protection = Protection & ~MM_GUARD_PAGE;
+
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+ return STATUS_SUCCESS;
+ }
+ }
+}
+
+NTSTATUS
+FASTCALL
+MiCheckForUserStackOverflow (
+ IN PVOID FaultingAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the faulting address is within
+ the stack limits and if so tries to create another guard
+ page on the stack. A stack over flow is returned if the
+ creation of a new guard page fails or if the stack is in
+ the following form:
+
+
+ stack +----------------+
+ growth | | StackBase
+ | +----------------+
+ v | |
+ | allocated |
+ | |
+ | ... |
+ | |
+ +----------------+
+ | old guard page | <- faulting address is in this page.
+ +----------------+
+ | |
+ +----------------+
+ | | last page of stack (always no access)
+ +----------------+
+
+ In this case, the page before the last page is committed, but
+ not as a guard page and a STACK_OVERFLOW condition is returned.
+
+Arguments:
+
+ FaultingAddress - Supplies the virtual address of the page which
+ was a guard page.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. No mutexes held.
+
+--*/
+
+{
+ PTEB Teb;
+ ULONG NextPage;
+ ULONG RegionSize;
+ NTSTATUS status;
+ KIRQL OldIrql;
+
+ PVOID DeallocationStack;
+ PVOID *StackLimit;
+
+#if defined(WX86)
+ PWX86TIB Wx86Tib;
+#endif
+
+
+
+
+ //
+ // Create an exception handler as the Teb is within the user's
+ // address space.
+ //
+
+ try {
+
+#if defined(i386) || defined(ALPHA)
+ Teb = NtCurrentTeb();
+#else
+ Teb = PCR->Teb;
+#endif
+
+ DeallocationStack = Teb->DeallocationStack;
+ StackLimit = &Teb->NtTib.StackLimit;
+
+ //
+ // The stack base and the stack limit are both within the stack.
+ //
+
+ if ((Teb->NtTib.StackBase < FaultingAddress) ||
+ (DeallocationStack > FaultingAddress)) {
+
+#if defined(WX86)
+ //
+ // Also check the Wx86 i386 stack on risc
+ //
+ if (!(Wx86Tib = Teb->Vdm) ||
+ Wx86Tib->Size != sizeof(WX86TIB) ||
+ Wx86Tib->StackBase < FaultingAddress ||
+ Wx86Tib->DeallocationStack > FaultingAddress)
+
+#endif
+ {
+ //
+ // Not within the stack.
+ //
+
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+
+#if defined(WX86)
+ DeallocationStack = Wx86Tib->DeallocationStack;
+ StackLimit = &Wx86Tib->StackLimit;
+#endif
+
+ }
+
+
+ //
+ // This address is within the current stack, check to see
+ // if there is ample room for another guard page and
+ // if so attempt to commit a new guard page.
+ //
+
+ NextPage = ((ULONG)PAGE_ALIGN(FaultingAddress) - PAGE_SIZE);
+
+ RegionSize = PAGE_SIZE;
+
+ if ((NextPage - PAGE_SIZE) <= (ULONG)PAGE_ALIGN(DeallocationStack)) {
+
+ //
+ // There is no more room for expansion, attempt to
+ // commit the page before the last page of the
+ // stack.
+ //
+
+ NextPage = (ULONG)PAGE_ALIGN(DeallocationStack) + PAGE_SIZE;
+
+ status = ZwAllocateVirtualMemory (NtCurrentProcess(),
+ (PVOID *)&NextPage,
+ 0,
+ &RegionSize,
+ MEM_COMMIT,
+ PAGE_READWRITE);
+ if ( NT_SUCCESS(status) ) {
+
+ *StackLimit = (PVOID)( (PUCHAR)NextPage);
+
+ }
+
+ return STATUS_STACK_OVERFLOW;
+ }
+ *StackLimit = (PVOID)((PUCHAR)(NextPage + PAGE_SIZE));
+retry:
+ status = ZwAllocateVirtualMemory (NtCurrentProcess(),
+ (PVOID *)&NextPage,
+ 0,
+ &RegionSize,
+ MEM_COMMIT,
+ PAGE_READWRITE | PAGE_GUARD);
+
+
+ if (NT_SUCCESS(status) || (status == STATUS_ALREADY_COMMITTED)) {
+
+ //
+ // The guard page is now committed or stack space is
+ // already present, return success.
+ //
+
+ return STATUS_PAGE_FAULT_GUARD_PAGE;
+ }
+
+ if (PsGetCurrentProcess() == ExpDefaultErrorPortProcess) {
+
+ //
+ // Don't let CSRSS process get any stack overflows due to
+ // commitment. Increase the commitment by a page and
+ // try again.
+ //
+
+ ASSERT (status == STATUS_COMMITMENT_LIMIT);
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+ MmTotalCommitLimit += 1;
+ MmExtendedCommit += 1;
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ goto retry;
+ }
+
+ return STATUS_STACK_OVERFLOW;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception has occurred during the referencing of the
+ // TEB or TIB, just return a guard page violation and
+ // don't deal with the stack overflow.
+ //
+
+ return STATUS_GUARD_PAGE_VIOLATION;
+ }
+}
diff --git a/private/ntos/mm/addrsup.c b/private/ntos/mm/addrsup.c
new file mode 100644
index 000000000..8d3b34772
--- /dev/null
+++ b/private/ntos/mm/addrsup.c
@@ -0,0 +1,1432 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ addrsup.c
+
+Abstract:
+
+ This module contains the routine to manipulate the virtual address
+ descriptor tree.
+
+Author:
+
+ Lou Perazzoli (loup) 19-May-1989
+
+ Ripped off and modified from timersup.c
+ The support for siblings was removed and a routine to locate
+ the corresponding virtual address descriptor for a given address
+ was added.
+
+Environment:
+
+ Kernel mode only, working set mutex held, APC's disabled.
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if (_MSC_VER >= 800)
+#pragma warning(disable:4010) /* Allow pretty pictures without the noise */
+#endif
+
+VOID
+MiReorderTree (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ );
+
+
+VOID
+MiReorderTree (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ )
+
+/*++
+
+Routine Description:
+
+ This function reorders the Node tree by applying various splay functions
+ to the tree. This is a local function that is called by the insert Node
+ routine.
+
+Arguments:
+
+ Node - Supplies a pointer to a virtual address descriptor.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMADDRESS_NODE GrandParent;
+ PMMADDRESS_NODE Parent;
+ PMMADDRESS_NODE SplayNode;
+
+ //
+ // Reorder Node tree to make it as balanced as possible with as little
+ // work as possible.
+ //
+
+ SplayNode = Node;
+
+ while (SplayNode != *Root) {
+
+ Parent = SplayNode->Parent;
+ if (Parent == *Root) {
+
+ //
+ // Splay node's parent is the root of the tree. Rotate the tree
+ // left or right depending on whether the splay node is the left
+ // of right child of its parent.
+ //
+ // Pictorially:
+ //
+ // Right Left
+ //
+ // P X P X
+ // / \ / \ / \ / \
+ // X C -> A P C X -> P A
+ // / \ / \ / \ / \
+ // A B B C B A C B
+ //
+
+ *Root = SplayNode;
+ SplayNode->Parent = (PMMADDRESS_NODE)NULL;
+ Parent->Parent = SplayNode;
+ if (SplayNode == Parent->LeftChild) {
+
+ //
+ // Splay node is the left child of its parent. Rotate tree
+ // right.
+ //
+
+ Parent->LeftChild = SplayNode->RightChild;
+ if (SplayNode->RightChild) {
+ SplayNode->RightChild->Parent = Parent;
+ }
+ SplayNode->RightChild = Parent;
+ } else {
+
+ //
+ // Splay node is the right child of its parent. Rotate tree
+ // left.
+ //
+
+ Parent->RightChild = SplayNode->LeftChild;
+ if (SplayNode->LeftChild) {
+ SplayNode->LeftChild->Parent = Parent;
+ }
+ SplayNode->LeftChild = Parent;
+ }
+ break;
+ } else {
+ GrandParent = Parent->Parent;
+ if ((SplayNode == Parent->LeftChild) &&
+ (Parent == GrandParent->LeftChild)) {
+
+ //
+ // Both the splay node and the parent node are left children
+ // of their parents. Rotate tree right and make the parent
+ // the root of the new subtree.
+ //
+ // Pictorially:
+ //
+ // G P
+ // / \ / \
+ // P D X G
+ // / \ -> / \ / \
+ // X C A B C D
+ // / \
+ // A B
+ //
+
+ if (GrandParent == *Root) {
+ *Root = Parent;
+ Parent->Parent = (PMMADDRESS_NODE)NULL;
+ } else {
+ Parent->Parent = GrandParent->Parent;
+ if (GrandParent == GrandParent->Parent->LeftChild) {
+ GrandParent->Parent->LeftChild = Parent;
+ } else {
+ GrandParent->Parent->RightChild = Parent;
+ }
+ }
+ GrandParent->LeftChild = Parent->RightChild;
+ if (Parent->RightChild) {
+ Parent->RightChild->Parent = GrandParent;
+ }
+ GrandParent->Parent = Parent;
+ Parent->RightChild = GrandParent;
+ SplayNode = Parent;
+ } else if ((SplayNode == Parent->RightChild) &&
+ (Parent == GrandParent->RightChild)) {
+
+ //
+ // Both the splay node and the parent node are right children
+ // of their parents. Rotate tree left and make the parent
+ // the root of the new subtree.
+ //
+ // Pictorially:
+ //
+ // G P
+ // / \ / \
+ // D P G X
+ // / \ -> / \ / \
+ // C X D C B A
+ // / \
+ // A B
+ //
+
+ if (GrandParent == *Root) {
+ *Root = Parent;
+ Parent->Parent = (PMMADDRESS_NODE)NULL;
+ } else {
+ Parent->Parent = GrandParent->Parent;
+ if (GrandParent == GrandParent->Parent->LeftChild) {
+ GrandParent->Parent->LeftChild = Parent;
+ } else {
+ GrandParent->Parent->RightChild = Parent;
+ }
+ }
+ GrandParent->RightChild = Parent->LeftChild;
+ if (Parent->LeftChild) {
+ Parent->LeftChild->Parent = GrandParent;
+ }
+ GrandParent->Parent = Parent;
+ Parent->LeftChild = GrandParent;
+ SplayNode = Parent;
+ } else if ((SplayNode == Parent->LeftChild) &&
+ (Parent == GrandParent->RightChild)) {
+
+ //
+ // Splay node is the left child of its parent and parent is
+ // the right child of its parent. Rotate tree left and make
+ // splay node the root of the new subtree.
+ //
+ // Pictorially:
+ //
+ // G X
+ // / \ / \
+ // A P G P
+ // / \ -> / \ / \
+ // X D A B C D
+ // / \
+ // B C
+ //
+
+ if (GrandParent == *Root) {
+ *Root = SplayNode;
+ SplayNode->Parent = (PMMADDRESS_NODE)NULL;
+ } else {
+ SplayNode->Parent = GrandParent->Parent;
+ if (GrandParent == GrandParent->Parent->LeftChild) {
+ GrandParent->Parent->LeftChild = SplayNode;
+ } else {
+ GrandParent->Parent->RightChild = SplayNode;
+ }
+ }
+ Parent->LeftChild = SplayNode->RightChild;
+ if (SplayNode->RightChild) {
+ SplayNode->RightChild->Parent = Parent;
+ }
+ GrandParent->RightChild = SplayNode->LeftChild;
+ if (SplayNode->LeftChild) {
+ SplayNode->LeftChild->Parent = GrandParent;
+ }
+ Parent->Parent = SplayNode;
+ GrandParent->Parent = SplayNode;
+ SplayNode->LeftChild = GrandParent;
+ SplayNode->RightChild = Parent;
+ } else {
+
+ //
+ // Splay node is the right child of its parent and parent is
+ // the left child of its parent. Rotate tree right and make
+ // splay node the root of the new subtree.
+ //
+ // Pictorially:
+ //
+ // G X
+ // / \ / \
+ // P A P G
+ // / \ -> / \ / \
+ // D X D C B A
+ // / \
+ // C B
+ //
+
+ if (GrandParent == *Root) {
+ *Root = SplayNode;
+ SplayNode->Parent = (PMMADDRESS_NODE)NULL;
+ } else {
+ SplayNode->Parent = GrandParent->Parent;
+ if (GrandParent == GrandParent->Parent->LeftChild) {
+ GrandParent->Parent->LeftChild = SplayNode;
+ } else {
+ GrandParent->Parent->RightChild = SplayNode;
+ }
+ }
+ Parent->RightChild = SplayNode->LeftChild;
+ if (SplayNode->LeftChild) {
+ SplayNode->LeftChild->Parent = Parent;
+ }
+ GrandParent->LeftChild = SplayNode->RightChild;
+ if (SplayNode->RightChild) {
+ SplayNode->RightChild->Parent = GrandParent;
+ }
+ Parent->Parent = SplayNode;
+ GrandParent->Parent = SplayNode;
+ SplayNode->LeftChild = Parent;
+ SplayNode->RightChild = GrandParent;
+ }
+ }
+ }
+ return;
+}
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetNextNode (
+ IN PMMADDRESS_NODE Node
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the virtual address descriptor which contains
+ the address range which logically follows the specified address range.
+
+Arguments:
+
+ Node - Supplies a pointer to a virtual address descriptor.
+
+Return Value:
+
+ Returns a pointer to the virtual address descriptor containing the
+ next address range, NULL if none.
+
+--*/
+
+{
+ PMMADDRESS_NODE Next;
+ PMMADDRESS_NODE Parent;
+ PMMADDRESS_NODE Left;
+
+ Next = Node;
+
+ if (Next->RightChild == (PMMADDRESS_NODE)NULL) {
+
+ while ((Parent = Next->Parent) != (PMMADDRESS_NODE)NULL) {
+
+ //
+ // Locate the first ancestor of this node of which this
+ // node is the left child of and return that node as the
+ // next element.
+ //
+
+ if (Parent->LeftChild == Next) {
+ return Parent;
+ }
+
+ Next = Parent;
+
+ }
+
+ return (PMMADDRESS_NODE)NULL;
+ }
+
+ //
+ // A right child exists, locate the left most child of that right child.
+ //
+
+ Next = Next->RightChild;
+
+ while ((Left = Next->LeftChild) != (PMMADDRESS_NODE)NULL) {
+ Next = Left;
+ }
+ return Next;
+
+}
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetPreviousNode (
+ IN PMMADDRESS_NODE Node
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the virtual address descriptor which contains
+ the address range which logically precedes the specified virtual
+ address descriptor.
+
+Arguments:
+
+ Node - Supplies a pointer to a virtual address descriptor.
+
+Return Value:
+
+ Returns a pointer to the virtual address descriptor containing the
+ next address range, NULL if none.
+
+--*/
+
+{
+ PMMADDRESS_NODE Previous;
+
+ Previous = Node;
+
+ if (Previous->LeftChild == (PMMADDRESS_NODE)NULL) {
+
+
+ while (Previous->Parent != (PMMADDRESS_NODE)NULL) {
+
+ //
+ // Locate the first ancestor of this node of which this
+ // node is the right child of and return that node as the
+ // Previous element.
+ //
+
+ if (Previous->Parent->RightChild == Previous) {
+ return Previous->Parent;
+ }
+
+ Previous = Previous->Parent;
+
+ }
+ return (PMMADDRESS_NODE)NULL;
+ }
+
+ //
+ // A left child exists, locate the right most child of that left child.
+ //
+
+ Previous = Previous->LeftChild;
+ while (Previous->RightChild != (PMMADDRESS_NODE)NULL) {
+ Previous = Previous->RightChild;
+ }
+ return Previous;
+}
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetFirstNode (
+ IN PMMADDRESS_NODE Root
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the virtual address descriptor which contains
+ the address range which logically is first within the address space.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Returns a pointer to the virtual address descriptor containing the
+ first address range, NULL if none.
+
+--*/
+
+{
+ PMMADDRESS_NODE First;
+
+ First = Root;
+
+ if (First == (PMMADDRESS_NODE)NULL) {
+ return (PMMADDRESS_NODE)NULL;
+ }
+
+ while (First->LeftChild != (PMMADDRESS_NODE)NULL) {
+ First = First->LeftChild;
+ }
+
+ return First;
+}
+
+VOID
+FASTCALL
+MiInsertNode (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a virtual address descriptor into the tree and
+ reorders the splay tree as appropriate.
+
+Arguments:
+
+ Node - Supplies a pointer to a virtual address descriptor
+
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ ULONG Level = 0;
+ PMMADDRESS_NODE Parent;
+
+
+ //
+ // Initialize virtual address descriptor child links.
+ //
+
+ Node->LeftChild = (PMMADDRESS_NODE)NULL;
+ Node->RightChild = (PMMADDRESS_NODE)NULL;
+
+ //
+ // If the tree is empty, then establish this virtual address descriptor
+ // as the root of the tree.
+ // Otherwise descend the tree to find the correct place to
+ // insert the descriptor.
+ //
+
+ Parent = *Root;
+ if (!Parent) {
+ *Root = Node;
+ Node->Parent = (PMMADDRESS_NODE)NULL;
+ } else {
+ for (;;) {
+
+ Level += 1;
+ if (Level == 15) {
+ MiReorderTree(Parent, Root);
+ }
+
+ //
+ // If the starting address for this virtual address descriptor
+ // is less than the parent starting address, then
+ // follow the left child link. Else follow the right child link.
+ //
+
+ if (Node->StartingVa < Parent->StartingVa) {
+
+ //
+ // Starting address of the virtual address descriptor is less
+ // than the parent starting virtual address.
+ // Follow left child link if not null. Otherwise
+ // insert the descriptor as the left child of the parent and
+ // reorder the tree.
+ //
+
+ if (Parent->LeftChild) {
+ Parent = Parent->LeftChild;
+ } else {
+ Parent->LeftChild = Node;
+ Node->Parent = Parent;
+ // MiReorderTree(Node, Root);
+ break;
+ }
+ } else {
+
+ //
+ // Starting address of the virtual address descriptor is greater
+ // than the parent starting virtual address.
+ // Follow right child link if not null. Otherwise
+ // insert the descriptor as the right child of the parent and
+ // reorder the tree.
+ //
+
+ if (Parent->RightChild) {
+ Parent = Parent->RightChild;
+ } else {
+ Parent->RightChild = Node;
+ Node->Parent = Parent;
+ // MiReorderTree(Node, Root);
+ break;
+ }
+ }
+ }
+ }
+ return;
+}
+
+VOID
+FASTCALL
+MiRemoveNode (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes a virtual address descriptor from the tree and
+ reorders the splay tree as appropriate.
+
+Arguments:
+
+ Node - Supplies a pointer to a virtual address descriptor.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PMMADDRESS_NODE LeftChild;
+ PMMADDRESS_NODE RightChild;
+ PMMADDRESS_NODE SplayNode;
+
+
+ LeftChild = Node->LeftChild;
+ RightChild = Node->RightChild;
+
+ //
+ // If the Node is the root of the tree, then establish new root. Else
+ // isolate splay case and perform splay tree transformation.
+ //
+
+ if (Node == *Root) {
+
+ //
+ // This Node is the root of the tree. There are four cases to
+ // handle:
+ //
+ // 1. the descriptor has no children
+ // 2. the descriptor has a left child but no right child
+ // 3. the descriptor has a right child but no left child
+ // 4. the descriptor has both a right child and a left child
+ //
+
+ if (LeftChild) {
+ if (RightChild) {
+
+ //
+ // The descriptor has both a left child and a right child.
+ //
+
+ if (LeftChild->RightChild) {
+
+ //
+ // The left child has a right child. Make the right most
+ // descendent of the right child of the left child the
+ // new root of the tree.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | |
+ // X Z
+ // / \ / \
+ // A B -> A B
+ // \ \
+ // . .
+ // \
+ // Z
+ //
+
+ SplayNode = LeftChild->RightChild;
+ while (SplayNode->RightChild) {
+ SplayNode = SplayNode->RightChild;
+ }
+ *Root = SplayNode;
+ SplayNode->Parent->RightChild = SplayNode->LeftChild;
+ if (SplayNode->LeftChild) {
+ SplayNode->LeftChild->Parent = SplayNode->Parent;
+ }
+ SplayNode->Parent = (PMMADDRESS_NODE)NULL;
+ LeftChild->Parent = SplayNode;
+ RightChild->Parent = SplayNode;
+ SplayNode->LeftChild = LeftChild;
+ SplayNode->RightChild = RightChild;
+ } else if (RightChild->LeftChild) {
+
+ //
+ // The right child has a left child. Make the left most
+ // descendent of the left child of the right child the
+ // new root of the tree.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | |
+ // X Z
+ // / \ / \
+ // A B -> A B
+ // / /
+ // . .
+ // /
+ // Z
+ //
+
+ SplayNode = RightChild->LeftChild;
+ while (SplayNode->LeftChild) {
+ SplayNode = SplayNode->LeftChild;
+ }
+ *Root = SplayNode;
+ SplayNode->Parent->LeftChild = SplayNode->RightChild;
+ if (SplayNode->RightChild) {
+ SplayNode->RightChild->Parent = SplayNode->Parent;
+ }
+ SplayNode->Parent = (PMMADDRESS_NODE)NULL;
+ LeftChild->Parent = SplayNode;
+ RightChild->Parent = SplayNode;
+ SplayNode->LeftChild = LeftChild;
+ SplayNode->RightChild = RightChild;
+ } else {
+
+ //
+ // The left child of the descriptor does not have a right child,
+ // and the right child of the descriptor does not have a left
+ // child. Make the left child of the descriptor the new root of
+ // the tree.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | |
+ // X A
+ // / \ / \
+ // A B -> . B
+ // / /
+ // .
+ //
+
+ *Root = LeftChild;
+ LeftChild->Parent = (PMMADDRESS_NODE)NULL;
+ LeftChild->RightChild = RightChild;
+ LeftChild->RightChild->Parent = LeftChild;
+ }
+ } else {
+
+ //
+ // The descriptor has a left child, but does not have a right child.
+ // Make the left child the new root of the tree.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | |
+ // X -> A
+ // /
+ // A
+ //
+
+ *Root = LeftChild;
+ LeftChild->Parent = (PMMADDRESS_NODE)NULL;
+ }
+ } else if (RightChild) {
+
+ //
+ // The descriptor has a right child, but does not have a left child.
+ // Make the right child the new root of the tree.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | |
+ // X -> A
+ // \
+ // A
+ //
+
+ *Root = RightChild;
+ RightChild->Parent = (PMMADDRESS_NODE)NULL;
+ while (RightChild->LeftChild) {
+ RightChild = RightChild->LeftChild;
+ }
+ } else {
+
+ //
+ // The descriptor has neither a left child nor a right child. The
+ // tree will be empty after removing the descriptor.
+ //
+ // Pictorially:
+ //
+ // R R
+ // | ->
+ // X
+ //
+
+ *Root = NULL;
+ }
+ } else if (LeftChild) {
+ if (RightChild) {
+
+ //
+ // The descriptor has both a left child and a right child.
+ //
+
+ if (LeftChild->RightChild) {
+
+ //
+ // The left child has a right child. Make the right most
+ // descendent of the right child of the left child the new
+ // root of the subtree.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X X
+ // / \ / \
+ // A B or A B
+ // \ \
+ // . .
+ // \ \
+ // Z Z
+ //
+ // |
+ // v
+ //
+ // P P
+ // / \
+ // Z Z
+ // / \ / \
+ // A B or A B
+ // \ \
+ // . .
+ //
+
+ SplayNode = LeftChild->RightChild;
+ while (SplayNode->RightChild) {
+ SplayNode = SplayNode->RightChild;
+ }
+ SplayNode->Parent->RightChild = SplayNode->LeftChild;
+ if (SplayNode->LeftChild) {
+ SplayNode->LeftChild->Parent = SplayNode->Parent;
+ }
+ SplayNode->Parent = Node->Parent;
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = SplayNode;
+ } else {
+ Node->Parent->RightChild = SplayNode;
+ }
+ LeftChild->Parent = SplayNode;
+ RightChild->Parent = SplayNode;
+ SplayNode->LeftChild = LeftChild;
+ SplayNode->RightChild = RightChild;
+ } else if (RightChild->LeftChild) {
+
+ //
+ // The right child has a left child. Make the left most
+ // descendent of the left child of the right child the
+ // new root of the subtree.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X X
+ // / \ / \
+ // A B or A B
+ // / /
+ // . .
+ // / /
+ // Z Z
+ //
+ // |
+ // v
+ //
+ // P P
+ // / \
+ // Z Z
+ // / \ / \
+ // A B or A B
+ // / /
+ // . .
+ //
+
+ SplayNode = RightChild->LeftChild;
+ while (SplayNode->LeftChild) {
+ SplayNode = SplayNode->LeftChild;
+ }
+ SplayNode->Parent->LeftChild = SplayNode->RightChild;
+ if (SplayNode->RightChild) {
+ SplayNode->RightChild->Parent = SplayNode->Parent;
+ }
+ SplayNode->Parent = Node->Parent;
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = SplayNode;
+ } else {
+ Node->Parent->RightChild = SplayNode;
+ }
+ LeftChild->Parent = SplayNode;
+ RightChild->Parent = SplayNode;
+ SplayNode->LeftChild = LeftChild;
+ SplayNode->RightChild = RightChild;
+ } else {
+
+ //
+ // The left child of the descriptor does not have a right child,
+ // and the right child of the descriptor does node have a left
+ // child. Make the left child of the descriptor the new root of
+ // the subtree.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X X
+ // / \ / \
+ // A B or A B
+ // / /
+ // . .
+ //
+ // |
+ // v
+ //
+ // P P
+ // / \
+ // A A
+ // / \ / \
+ // . B or . B
+ // / /
+ //
+
+ SplayNode = LeftChild;
+ SplayNode->Parent = Node->Parent;
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = SplayNode;
+ } else {
+ Node->Parent->RightChild = SplayNode;
+ }
+ SplayNode->RightChild = RightChild;
+ RightChild->Parent = SplayNode;
+ }
+ } else {
+
+ //
+ // The descriptor has a left child, but does not have a right child.
+ // Make the left child the new root of the subtree.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X or X
+ // / /
+ // A A
+ //
+ // |
+ // v
+ //
+ // P P
+ // / \
+ // A A
+ //
+
+ LeftChild->Parent = Node->Parent;
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = LeftChild;
+ } else {
+ Node->Parent->RightChild = LeftChild;
+ }
+ }
+ } else if (RightChild) {
+
+ //
+ // descriptor has a right child, but does not have a left child. Make
+ // the right child the new root of the subtree.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X or X
+ // \ \
+ // A A
+ //
+ // |
+ // v
+ //
+ // P P
+ // / \
+ // A A
+ //
+
+ RightChild->Parent = Node->Parent;
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = RightChild;
+ } else {
+ Node->Parent->RightChild = RightChild;
+ }
+ } else {
+
+ //
+ // The descriptor has neither a left child nor a right child. Delete
+ // the descriptor from the tree and adjust its parent right or left
+ // link.
+ //
+ // Pictorially:
+ //
+ // P P
+ // / \
+ // X or X
+ //
+ // |
+ // v
+ //
+ // P P
+ //
+
+ if (Node == Node->Parent->LeftChild) {
+ Node->Parent->LeftChild = (PMMADDRESS_NODE)NULL;
+ } else {
+ Node->Parent->RightChild = (PMMADDRESS_NODE)NULL;
+ }
+ }
+ return;
+}
+
+PMMADDRESS_NODE
+FASTCALL
+MiLocateAddressInTree (
+ IN PVOID VirtualAddress,
+ IN PMMADDRESS_NODE *Root
+ )
+
+/*++
+
+Routine Description:
+
+ The function locates the virtual address descriptor which describes
+ a given address.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to locate a descriptor
+ for.
+
+Return Value:
+
+ Returns a pointer to the virtual address descriptor which contains
+ the supplied virtual address or NULL if none was located.
+
+--*/
+
+{
+
+ PMMADDRESS_NODE Parent;
+ ULONG Level = 0;
+
+ Parent = *Root;
+
+ for (;;) {
+
+ if (Parent == (PMMADDRESS_NODE)NULL) {
+ return (PMMADDRESS_NODE)NULL;
+ }
+
+ if (Level == 20) {
+
+ //
+ // There are 20 nodes above this point, reorder the
+ // tree with this node as the root.
+ //
+
+ MiReorderTree(Parent, Root);
+ }
+
+ if (VirtualAddress < Parent->StartingVa) {
+ Parent = Parent->LeftChild;
+ Level += 1;
+
+ } else if (VirtualAddress > Parent->EndingVa) {
+ Parent = Parent->RightChild;
+ Level += 1;
+
+ } else {
+
+ //
+ // The virtual address is within the start and end range.
+ //
+
+ return Parent;
+ }
+ }
+}
+
+PMMADDRESS_NODE
+MiCheckForConflictingNode (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMADDRESS_NODE Root
+ )
+
+/*++
+
+Routine Description:
+
+ The function determines if any addresses between a given starting and
+ ending address is contained within a virtual address descriptor.
+
+Arguments:
+
+ StartingAddress - Supplies the virtual address to locate a containing
+ descriptor.
+
+ EndingAddress - Supplies the virtual address to locate a containing
+ descriptor.
+
+Return Value:
+
+ Returns a pointer to the first conflicting virtual address descriptor
+ if one is found, othersize a NULL value is returned.
+
+--*/
+
+{
+
+ PMMADDRESS_NODE Node;
+
+ Node = Root;
+
+ for (;;) {
+
+ if (Node == (PMMADDRESS_NODE)NULL) {
+ return (PMMADDRESS_NODE)NULL;
+ }
+
+ if (StartingAddress > Node->EndingVa) {
+ Node = Node->RightChild;
+
+ } else if (EndingAddress < Node->StartingVa) {
+ Node = Node->LeftChild;
+
+ } else {
+
+ //
+ // The starting address is less than or equal to the end VA
+ // and the ending address is greater than or equal to the
+ // start va. Return this node.
+ //
+
+ return Node;
+ }
+ }
+}
+
+PVOID
+MiFindEmptyAddressRangeInTree (
+ IN ULONG SizeOfRange,
+ IN ULONG Alignment,
+ IN PMMADDRESS_NODE Root,
+ OUT PMMADDRESS_NODE *PreviousVad
+ )
+
+/*++
+
+Routine Description:
+
+ The function examines the virtual address descriptors to locate
+ an unused range of the specified size and returns the starting
+ address of the range.
+
+Arguments:
+
+ SizeOfRange - Supplies the size in bytes of the range to locate.
+
+ Alignment - Supplies the alignment for the address. Must be
+ a power of 2 and greater than the page_size.
+
+ Root - Supplies the root of the tree to search through.
+
+ PreviousVad - Supplies the Vad which is before this the found
+ address range.
+
+Return Value:
+
+ Returns the starting address of a suitable range.
+
+--*/
+
+{
+
+ PMMADDRESS_NODE Node;
+ PMMADDRESS_NODE NextNode;
+
+ //
+ // Locate the Node with the lowest starting address.
+ //
+
+ Node = Root;
+
+ if (Node == (PMMADDRESS_NODE)NULL) {
+ return MM_LOWEST_USER_ADDRESS;
+ }
+ while (Node->LeftChild != (PMMADDRESS_NODE)NULL) {
+ Node = Node->LeftChild;
+ }
+
+ //
+ // Check to see if a range exists between the lowest address VAD
+ // and lowest user address.
+ //
+
+ if (Node->StartingVa > MM_LOWEST_USER_ADDRESS) {
+ if ( SizeOfRange <
+ ((ULONG)Node->StartingVa - (ULONG)MM_LOWEST_USER_ADDRESS )) {
+
+ *PreviousVad = NULL;
+ return MM_LOWEST_USER_ADDRESS;
+ }
+ }
+
+ for (;;) {
+
+ NextNode = MiGetNextNode (Node);
+
+ if (NextNode != (PMMADDRESS_NODE)NULL) {
+ if (SizeOfRange <=
+ ((ULONG)NextNode->StartingVa -
+ (ULONG)MI_ROUND_TO_SIZE(Node->EndingVa, Alignment))) {
+
+ //
+ // Check to ensure that the ending address aligned upwards
+ // is not greater than the starting address.
+ //
+
+ if ((ULONG)NextNode->StartingVa >
+ (ULONG)MI_ROUND_TO_SIZE(Node->EndingVa,Alignment)) {
+
+ *PreviousVad = Node;
+ return (PMMADDRESS_NODE)MI_ROUND_TO_SIZE(Node->EndingVa,
+ Alignment);
+ }
+ }
+
+ } else {
+
+ //
+ // No more descriptors, check to see if this fits into the remainder
+ // of the address space.
+ //
+
+ if ((((ULONG)Node->EndingVa + X64K) <
+ (ULONG)MM_HIGHEST_VAD_ADDRESS)
+ &&
+ (SizeOfRange <=
+ ((ULONG)MM_HIGHEST_VAD_ADDRESS -
+ (ULONG)MI_ROUND_TO_SIZE(Node->EndingVa, Alignment)))) {
+
+ *PreviousVad = Node;
+ return (PMMADDRESS_NODE)MI_ROUND_TO_SIZE(Node->EndingVa,
+ Alignment);
+ } else {
+ ExRaiseStatus (STATUS_NO_MEMORY);
+ }
+ }
+ Node = NextNode;
+ }
+}
+
+PVOID
+MiFindEmptyAddressRangeDownTree (
+ IN ULONG SizeOfRange,
+ IN PVOID HighestAddressToEndAt,
+ IN ULONG Alignment,
+ IN PMMADDRESS_NODE Root
+ )
+
+/*++
+
+Routine Description:
+
+ The function examines the virtual address descriptors to locate
+ an unused range of the specified size and returns the starting
+ address of the range. The function examines from the high
+ addresses down and ensures that starting address is less than
+ the specified address.
+
+Arguments:
+
+ SizeOfRange - Supplies the size in bytes of the range to locate.
+
+ HighestAddressToEndAt - Supplies the virtual address that limits
+ the value of the ending address. The ending
+ address of the located range must be less
+ than this address.
+
+ Alignment - Supplies the alignment for the address. Must be
+ a power of 2 and greater than the page_size.
+
+ Root - Supplies the root of the tree to search through.
+
+Return Value:
+
+ Returns the starting address of a suitable range.
+
+--*/
+
+{
+ PMMADDRESS_NODE Node;
+ PMMADDRESS_NODE PreviousNode;
+ ULONG AlignedEndingVa;
+ PVOID OptimalStart;
+
+ ASSERT (HighestAddressToEndAt != NULL);
+ ASSERT (HighestAddressToEndAt <= (PVOID)((ULONG)MM_HIGHEST_VAD_ADDRESS + 1));
+
+ //
+ // Locate the Node with the highest starting address.
+ //
+
+ OptimalStart = (PVOID)(MI_ALIGN_TO_SIZE(
+ (((ULONG)HighestAddressToEndAt + 1) - SizeOfRange),
+ Alignment));
+ Node = Root;
+
+
+ if (Node == (PMMADDRESS_NODE)NULL) {
+
+ //
+ // The tree is empty, any range is okay.
+ //
+
+ return (PMMADDRESS_NODE)(OptimalStart);
+ }
+
+ //
+ // See if an empty slot exists to hold this range, locate the largest
+ // element in the tree.
+ //
+
+ while (Node->RightChild != (PMMADDRESS_NODE)NULL) {
+ Node = Node->RightChild;
+ }
+
+ //
+ // Check to see if a range exists between the highest address VAD
+ // and the highest address to end at.
+ //
+
+ AlignedEndingVa = (ULONG)MI_ROUND_TO_SIZE(Node->EndingVa, Alignment);
+
+ if (AlignedEndingVa < (ULONG)HighestAddressToEndAt) {
+
+ if ( SizeOfRange < ((ULONG)HighestAddressToEndAt - AlignedEndingVa)) {
+
+ return (PMMADDRESS_NODE)(MI_ALIGN_TO_SIZE(
+ ((ULONG)HighestAddressToEndAt - SizeOfRange),
+ Alignment));
+ }
+ }
+
+ //
+ // Walk the tree backwards looking for a fit.
+ //
+
+ for (;;) {
+
+ PreviousNode = MiGetPreviousNode (Node);
+
+ if (PreviousNode != (PMMADDRESS_NODE)NULL) {
+
+ //
+ // Is the ending Va below the top of the address to end at.
+ //
+
+ if (PreviousNode->EndingVa < OptimalStart) {
+ if (SizeOfRange <=
+ ((ULONG)Node->StartingVa -
+ (ULONG)MI_ROUND_TO_SIZE(PreviousNode->EndingVa,
+ Alignment))) {
+
+ //
+ // See if the optimal start will fit between these
+ // two VADs.
+ //
+
+ if ((OptimalStart > PreviousNode->EndingVa) &&
+ (HighestAddressToEndAt < Node->StartingVa)) {
+ return (PMMADDRESS_NODE)(OptimalStart);
+ }
+
+ //
+ // Check to ensure that the ending address aligned upwards
+ // is not greater than the starting address.
+ //
+
+ if ((ULONG)Node->StartingVa >
+ (ULONG)MI_ROUND_TO_SIZE(PreviousNode->EndingVa,
+ Alignment)) {
+
+ return (PMMADDRESS_NODE)MI_ALIGN_TO_SIZE(
+ (ULONG)Node->StartingVa - SizeOfRange,
+ Alignment);
+ }
+ }
+ }
+ } else {
+
+ //
+ // No more descriptors, check to see if this fits into the remainder
+ // of the address space.
+ //
+
+ if (Node->StartingVa > MM_LOWEST_USER_ADDRESS) {
+ if (SizeOfRange <=
+ ((ULONG)Node->StartingVa - (ULONG)MM_LOWEST_USER_ADDRESS)) {
+
+ //
+ // See if the optimal start will fit between these
+ // two VADs.
+ //
+
+ if (HighestAddressToEndAt < Node->StartingVa) {
+ return (PMMADDRESS_NODE)(OptimalStart);
+ }
+
+ return (PMMADDRESS_NODE)MI_ALIGN_TO_SIZE(
+ (ULONG)Node->StartingVa - SizeOfRange,
+ Alignment);
+ }
+ } else {
+ ExRaiseStatus (STATUS_NO_MEMORY);
+ }
+ }
+ Node = PreviousNode;
+ }
+}
+
+
+#if DBG
+
+VOID
+NodeTreeWalk (
+ PMMADDRESS_NODE Start
+ )
+
+{
+ if (Start == (PMMADDRESS_NODE)NULL) {
+ return;
+ }
+
+ NodeTreeWalk(Start->LeftChild);
+
+ DbgPrint("Node at 0x%lx start 0x%lx end 0x%lx \n",
+ (ULONG)Start, (ULONG)Start->StartingVa,
+ (ULONG)Start->EndingVa);
+
+
+ NodeTreeWalk(Start->RightChild);
+ return;
+}
+#endif //DBG
diff --git a/private/ntos/mm/allocpag.c b/private/ntos/mm/allocpag.c
new file mode 100644
index 000000000..2baf5dc4e
--- /dev/null
+++ b/private/ntos/mm/allocpag.c
@@ -0,0 +1,1613 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ allocpag.c
+
+Abstract:
+
+ This module contains the routines which allocate and deallocate
+ one or more pages from paged or nonpaged pool.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiInitializeNonPagedPool)
+#if DBG || (i386 && !FPO)
+#pragma alloc_text(PAGELK, MmSnapShotPool)
+#endif // DBG || (i386 && !FPO)
+#endif
+
+ULONG MmPagedPoolHint;
+
+ULONG MmPagedPoolCommit;
+
+ULONG MmAllocatedPagedPool;
+
+ULONG MmAllocatedNonPagedPool;
+
+PVOID MmNonPagedPoolExpansionStart;
+
+LIST_ENTRY MmNonPagedPoolFreeListHead;
+
+extern ULONG MmSystemPageDirectory;
+
+extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
+
+#define MM_SMALL_ALLOCATIONS 4
+
+
+POOL_TYPE
+MmDeterminePoolType (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function determines which pool a virtual address resides within.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to determine which pool
+ it resides within.
+
+Return Value:
+
+ Returns the POOL_TYPE (PagedPool or NonPagedPool), it never returns
+ any information about MustSucceed pool types.
+
+Environment:
+
+ Kernel Mode Only.
+
+--*/
+
+{
+ if ((VirtualAddress >= MmPagedPoolStart) &&
+ (VirtualAddress <= MmPagedPoolEnd)) {
+ return PagedPool;
+ }
+ return NonPagedPool;
+}
+
+
+PVOID
+MiAllocatePoolPages (
+ IN POOL_TYPE PoolType,
+ IN ULONG SizeInBytes
+ )
+
+/*++
+
+Routine Description:
+
+ This function allocates a set of pages from the specified pool
+ and returns the starting virtual address to the caller.
+
+ For the NonPagedPoolMustSucceed case, the caller must first
+ attempt to get NonPagedPool and if and ONLY IF that fails, then
+ MiAllocatePoolPages should be called again with the PoolType of
+ NonPagedPoolMustSucceed.
+
+Arguments:
+
+ PoolType - Supplies the type of pool from which to obtain pages.
+
+ SizeInBytes - Supplies the size of the request in bytes. The actual
+ size returned is rounded up to a page boundary.
+
+Return Value:
+
+ Returns a pointer to the allocated pool, or NULL if no more pool is
+ available.
+
+Environment:
+
+ These functions are used by the general pool allocation routines
+ and should not be called directly.
+
+ Mutexes guarding the pool databases must be held when calling
+ these functions.
+
+ Kernel mode, IRQP at DISPATCH_LEVEL.
+
+--*/
+
+{
+ ULONG SizeInPages;
+ ULONG StartPosition;
+ ULONG EndPosition;
+ PMMPTE StartingPte;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PVOID BaseVa;
+ KIRQL OldIrql;
+ ULONG i;
+ PLIST_ENTRY Entry;
+ PMMFREE_POOL_ENTRY FreePageInfo;
+
+ SizeInPages = BYTES_TO_PAGES (SizeInBytes);
+
+ ASSERT (SizeInPages < 10000);
+
+ if (PoolType == NonPagedPoolMustSucceed) {
+
+ //
+ // Pool expansion failed, see if any Must Succeed
+ // pool is still left.
+ //
+
+ if (MmNonPagedMustSucceed == NULL) {
+
+ //
+ // No more pool exists. Bug Check.
+ //
+
+ KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY,
+ SizeInBytes,
+ NonPagedPoolDescriptor.TotalPages,
+ NonPagedPoolDescriptor.TotalBigPages,
+ MmAvailablePages);
+ }
+
+ //
+ // Remove a page from the must succeed pool.
+ //
+
+ ASSERT (SizeInBytes <= PAGE_SIZE);
+
+ BaseVa = MmNonPagedMustSucceed;
+
+ MmNonPagedMustSucceed = (PVOID)(*(PULONG)BaseVa);
+ return BaseVa;
+ }
+
+ if (PoolType == NonPagedPool) {
+
+ //
+ // NonPaged pool is linked together through the pages themselves.
+ //
+
+ Entry = MmNonPagedPoolFreeListHead.Flink;
+
+ while (Entry != &MmNonPagedPoolFreeListHead) {
+
+ //
+ // The list is not empty, see if this one has enough
+ // space.
+ //
+
+ FreePageInfo = CONTAINING_RECORD(Entry,
+ MMFREE_POOL_ENTRY,
+ List);
+
+ ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
+ if (FreePageInfo->Size >= SizeInPages) {
+
+ //
+ // This entry has sufficient space, remove
+ // the pages from the end of the allocation.
+ //
+
+ FreePageInfo->Size -= SizeInPages;
+
+ if (FreePageInfo->Size == 0) {
+ RemoveEntryList (&FreePageInfo->List);
+ }
+
+ //
+ // Adjust the number of free pages remaining in the pool.
+ //
+
+ MmNumberOfFreeNonPagedPool -= SizeInPages;
+ ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
+
+ BaseVa = (PVOID)((PCHAR)FreePageInfo +
+ (FreePageInfo->Size << PAGE_SHIFT));
+
+ //
+ // Mark start and end of allocation in the PFN database.
+ //
+
+ if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa);
+ } else {
+ PointerPte = MiGetPteAddress(BaseVa);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ }
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ //
+ // Calculate the ending PTE's address.
+ //
+
+ if (SizeInPages != 1) {
+
+ if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) {
+ Pfn1 += SizeInPages - 1;
+ } else {
+ PointerPte += SizeInPages - 1;
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+
+ } else {
+
+#if defined(_ALPHA_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((BaseVa > (PVOID)KSEG2_BASE) &&
+ (PageFrameIndex < MmSubsectionTopPage)) {
+ BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+#endif //ALPHA
+
+#if defined(_MIPS_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((BaseVa > (PVOID)KSEG1_BASE) &&
+ (MI_GET_PAGE_COLOR_FROM_VA (BaseVa) ==
+ (MM_COLOR_MASK & PageFrameIndex)) &&
+ (PageFrameIndex < MmSubsectionTopPage)) {
+ BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+#endif //MIPS
+
+#if defined(_X86_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((BaseVa > (PVOID)MM_KSEG2_BASE) &&
+ (PageFrameIndex < MmSubsectionTopPage)) {
+ BaseVa = (PVOID)(MM_KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+#endif //X86
+
+ NOTHING;
+
+ }
+ ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ MmAllocatedNonPagedPool += SizeInPages;
+ return BaseVa;
+ }
+ Entry = FreePageInfo->List.Flink;
+ }
+
+ //
+ // No more entries on the list, expand nonpaged pool if
+ // possible to satisfy this request.
+ //
+
+ //
+ // Check to see if there are too many unused segments laying
+ // around, and if so, set an event so they get deleted.
+ //
+
+ if (MmUnusedSegmentCount > MmUnusedSegmentCountMaximum) {
+ KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
+ }
+
+ LOCK_PFN2 (OldIrql);
+
+ //
+ // Make sure we have 1 more than the number of pages
+ // requested available.
+ //
+
+ if (MmAvailablePages <= SizeInPages) {
+
+ UNLOCK_PFN2 (OldIrql);
+
+ //
+ // There are free physical pages to expand
+ // nonpaged pool.
+ //
+
+ return NULL;
+ }
+
+ //
+ // Try to find system ptes to expand the pool into.
+ //
+
+ StartingPte = MiReserveSystemPtes (SizeInPages,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ FALSE);
+
+ if (StartingPte == NULL) {
+
+ UNLOCK_PFN2 (OldIrql);
+
+ //
+ // There are no free physical PTEs to expand
+ // nonpaged pool.
+ //
+
+ return NULL;
+ }
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ MmResidentAvailablePages -= SizeInPages;
+
+ //
+ // Charge commitment as non paged pool uses physical memory.
+ //
+
+ MiChargeCommitmentCantExpand (SizeInPages, TRUE);
+
+ //
+ // Expand the pool.
+ //
+
+ PointerPte = StartingPte;
+ TempPte = ValidKernelPte;
+ MmAllocatedNonPagedPool += SizeInPages;
+ i= SizeInPages;
+
+ do {
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ Pfn1->PteFrame = MiGetPteAddress(PointerPte)->u.Hard.PageFrameNumber;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ PointerPte += 1;
+ SizeInPages -= 1;
+ } while (SizeInPages > 0);
+
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT (StartingPte->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ UNLOCK_PFN2 (OldIrql);
+
+ BaseVa = MiGetVirtualAddressMappedByPte (StartingPte);
+
+#if defined(_ALPHA_)
+ if (i == 1) {
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if (PageFrameIndex < MmSubsectionTopPage) {
+ BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+ }
+#endif //ALPHA
+
+#if defined(_MIPS_)
+ if (i == 1) {
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((MI_GET_PAGE_COLOR_FROM_VA (BaseVa) ==
+ (MM_COLOR_MASK & PageFrameIndex)) &&
+ (PageFrameIndex < MmSubsectionTopPage)) {
+ BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+ }
+#endif //MIPS
+
+#if defined(_X86_)
+ if (i == 1) {
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if (PageFrameIndex < MmSubsectionTopPage) {
+ BaseVa = (PVOID)(MM_KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+ }
+#endif //X86
+
+ return BaseVa;
+ }
+
+ //
+ // Paged Pool.
+ //
+
+ StartPosition = RtlFindClearBitsAndSet (
+ MmPagedPoolAllocationMap,
+ SizeInPages,
+ MmPagedPoolHint
+ );
+
+ if ((StartPosition == 0xFFFFFFFF) &&
+ (MmPagedPoolHint != 0)) {
+
+ if (MmUnusedSegmentCount > MmUnusedSegmentCountMaximum) {
+ KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
+ }
+
+ //
+ // No free bits were found, check from the start of
+ // the bit map.
+
+ StartPosition = RtlFindClearBitsAndSet (
+ MmPagedPoolAllocationMap,
+ SizeInPages,
+ 0
+ );
+ }
+
+ //
+ // If start position = -1, no room in pool. Attempt to
+ // expand NonPagedPool.
+ //
+
+ if (StartPosition == 0xFFFFFFFF) {
+
+
+ //
+ // Attempt to expand the paged pool.
+ //
+
+ StartPosition = ((SizeInPages - 1) / PTE_PER_PAGE) + 1;
+
+ //
+ // Make sure there are enough space to create the prototype PTEs.
+ //
+
+ if (((StartPosition - 1) + MmNextPteForPagedPoolExpansion) >
+ MiGetPteAddress (MmLastPteForPagedPool)) {
+
+ //
+ // Can't expand pool any more.
+ //
+
+ return NULL;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Make sure we have 1 more than the number of pages
+ // requested available.
+ //
+
+ if (MmAvailablePages <= StartPosition) {
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // There are free physical pages to expand
+ // paged pool.
+ //
+
+ return NULL;
+ }
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ MmResidentAvailablePages -= StartPosition;
+
+ //
+ // Expand the pool.
+ //
+
+ EndPosition = (MmNextPteForPagedPoolExpansion -
+ MiGetPteAddress(MmFirstPteForPagedPool)) *
+ PTE_PER_PAGE;
+
+ RtlClearBits (MmPagedPoolAllocationMap,
+ EndPosition,
+ StartPosition * PTE_PER_PAGE);
+
+ PointerPte = MmNextPteForPagedPoolExpansion;
+ StartingPte =
+ (PMMPTE)MiGetVirtualAddressMappedByPte(PointerPte);
+ MmNextPteForPagedPoolExpansion += StartPosition;
+
+ TempPte = ValidKernelPde;
+
+ do {
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ MiChargeCommitmentCantExpand (1, TRUE);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ //
+ // Map valid PDE into system address space as well.
+ //
+
+ MmSystemPagePtes [((ULONG)PointerPte &
+ ((sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] =
+ TempPte;
+
+ MiInitializePfnForOtherProcess (PageFrameIndex,
+ PointerPte,
+ MmSystemPageDirectory);
+
+ RtlFillMemoryUlong (StartingPte,
+ PAGE_SIZE,
+ MM_KERNEL_DEMAND_ZERO_PTE);
+
+ PointerPte += 1;
+ StartingPte += PAGE_SIZE / sizeof(MMPTE);
+ StartPosition -= 1;
+ } while (StartPosition > 0);
+
+ UNLOCK_PFN (OldIrql);
+
+ StartPosition = RtlFindClearBitsAndSet (
+ MmPagedPoolAllocationMap,
+ SizeInPages,
+ EndPosition
+ );
+ ASSERT (StartPosition != 0xffffffff);
+ }
+ MmPagedPoolHint = StartPosition + SizeInPages - 1;
+
+ BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PoolType] +
+ (StartPosition * PAGE_SIZE));
+
+ //
+ // This is paged pool, the start and end can't be saved
+ // in the PFN database as the page isn't always resident
+ // in memory. The ideal place to save the start and end
+ // would be in the prototype PTE, but there are no free
+ // bits. To solve this problem, a bitmap which parallels
+ // the allocation bitmap exists which contains set bits
+ // in the positions where an allocation ends. This
+ // allows pages to be deallocated with only their starting
+ // address.
+ //
+ // For sanity's sake, the starting address can be verified
+ // from the 2 bitmaps as well. If the page before the starting
+ // address is not allocated (bit is zero in allocation bitmap)
+ // then this page is obviously a start of an allocation block.
+ // If the page before is allocated and the other bit map does
+ // not indicate the previous page is the end of an allocation,
+ // then the starting address is wrong and a bug check should
+ // be issued.
+ //
+
+ try {
+
+ MiChargeCommitmentCantExpand (SizeInPages, FALSE);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ RtlClearBits (MmPagedPoolAllocationMap,
+ StartPosition,
+ SizeInPages);
+
+ //
+ // Could not commit the page, return NULL indicating
+ // no pool was allocated.
+ //
+
+ return(NULL);
+ }
+
+ MmPagedPoolCommit += SizeInPages;
+ EndPosition = StartPosition + SizeInPages - 1;
+ RtlSetBits (MmEndOfPagedPoolBitmap, EndPosition, 1L);
+
+ MmAllocatedPagedPool += SizeInPages;
+ return BaseVa;
+}
+
+ULONG
+MiFreePoolPages (
+ IN PVOID StartingAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns a set of pages back to the pool from
+ which they were obtained. Once the pages have been deallocated
+ the region provided by the allocation becomes available for
+ allocation to other callers, i.e. any data in the region is now
+ trashed and cannot be referenced.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address which was returned
+ in a previous call to VmAllocatePages.
+
+Return Value:
+
+ Returns the number of pages deallocated.
+
+Environment:
+
+ These functions are used by the general pool allocation routines
+ and should not be called directly.
+
+ Mutexes guarding the pool databases must be held when calling
+ these functions.
+
+--*/
+
+{
+ ULONG StartPosition;
+ ULONG i;
+ ULONG NumberOfPages = 1;
+ POOL_TYPE PoolType;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+ PMMFREE_POOL_ENTRY Entry;
+ PMMFREE_POOL_ENTRY NextEntry;
+
+ //
+ // Determine Pool type base on the virtual address of the block
+ // to deallocate.
+ //
+ // This assumes NonPagedPool starts at a higher virtual address
+ // then PagedPool.
+ //
+
+ if ((StartingAddress >= MmPagedPoolStart) &&
+ (StartingAddress <= MmPagedPoolEnd)) {
+ PoolType = PagedPool;
+ } else {
+ PoolType = NonPagedPool;
+ }
+
+ StartPosition = ((ULONG)StartingAddress -
+ (ULONG)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT;
+
+ //
+ // Check to insure this page is really a start of allocation.
+ //
+
+ if (PoolType == NonPagedPool) {
+
+ if (StartPosition < MmMustSucceedPoolBitPosition) {
+
+ PULONG NextList;
+
+ //
+ // This is must succeed pool, don't free it, just
+ // add it to the front of the list.
+ //
+ // Note - only a single page can be released at a time.
+ //
+
+ NextList = (PULONG)StartingAddress;
+ *NextList = (ULONG)MmNonPagedMustSucceed;
+ MmNonPagedMustSucceed = StartingAddress;
+ return NumberOfPages;
+ }
+
+ if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress));
+ if (StartPosition >= MmExpandedPoolBitPosition) {
+ PointerPte = Pfn1->PteAddress;
+ StartingAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ }
+ } else {
+ PointerPte = MiGetPteAddress (StartingAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+
+ ASSERT (Pfn1->u3.e1.StartOfAllocation != 0);
+ Pfn1->u3.e1.StartOfAllocation = 0;
+
+#if DBG
+ if ((Pfn1->u3.e2.ReferenceCount > 1) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ DbgPrint ("MM:ALLOCPAGE - deleting pool locked for I/O %lx\n",
+ PageFrameIndex);
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
+ }
+#endif //DBG
+
+ //
+ // Find end of allocation and release the pages.
+ //
+
+ while (Pfn1->u3.e1.EndOfAllocation == 0) {
+ if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) {
+ Pfn1 += 1;
+ } else {
+ PointerPte++;
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+ NumberOfPages++;
+#if DBG
+ if ((Pfn1->u3.e2.ReferenceCount > 1) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ DbgPrint ("MM:ALLOCPAGE - deleting pool locked for I/O %lx\n",
+ PageFrameIndex);
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
+ }
+#endif //DBG
+ }
+
+ MmAllocatedNonPagedPool -= NumberOfPages;
+
+ Pfn1->u3.e1.EndOfAllocation = 0;
+#if DBG
+ RtlFillMemoryUlong (StartingAddress,
+ PAGE_SIZE * NumberOfPages,
+ 0x23456789);
+#endif //DBG
+
+ if (StartingAddress > MmNonPagedPoolExpansionStart) {
+
+ //
+ // This page was from the expanded pool, should
+ // it be freed?
+ //
+ // NOTE: all pages in the expanded pool area have PTEs
+ // so no physical address checks need to be performed.
+ //
+
+ if ((NumberOfPages > 3) || (MmNumberOfFreeNonPagedPool > 5)) {
+
+ //
+ // Free these pages back to the free page list.
+ //
+
+ MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
+
+ PointerPte = MiGetPteAddress (StartingAddress);
+
+ //
+ // Return commitment.
+ //
+
+ MiReturnCommitment (NumberOfPages);
+
+ LOCK_PFN2 (OldIrql);
+
+ for (i=0; i < NumberOfPages; i++) {
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn1->u2.ShareCount == 1);
+ Pfn1->u2.ShareCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiDecrementReferenceCount (PageFrameIndex);
+
+ (VOID)KeFlushSingleTb (StartingAddress,
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ ZeroKernelPte.u.Flush);
+ StartingAddress = (PVOID)((ULONG)StartingAddress +
+ PAGE_SIZE);
+ PointerPte += 1;
+ }
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ MmResidentAvailablePages += NumberOfPages;
+
+ UNLOCK_PFN2(OldIrql);
+
+ PointerPte -= NumberOfPages;
+
+ MiReleaseSystemPtes (PointerPte,
+ NumberOfPages,
+ NonPagedPoolExpansion);
+
+ return NumberOfPages;
+ }
+ }
+
+ //
+ // Add the pages to the list of free pages.
+ //
+
+ MmNumberOfFreeNonPagedPool += NumberOfPages;
+
+ //
+ // Check to see if the next allocation is free.
+ //
+
+ i = NumberOfPages;
+
+ if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) {
+ Pfn1 += 1;
+ } else {
+ PointerPte += 1;
+ if (PointerPte->u.Hard.Valid == 1) {
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ } else {
+ Pfn1 = NULL;
+ }
+ }
+
+ if (Pfn1 != NULL) {
+ if (Pfn1->u3.e1.StartOfAllocation == 0) {
+
+ //
+ // This range of pages is free. Remove this entry
+ // from the list and add these pages to the current
+ // range being freed.
+ //
+
+ Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress
+ + (NumberOfPages << PAGE_SHIFT));
+ ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
+ ASSERT (Entry->Owner == Entry);
+#if DBG
+ {
+ PMMPTE DebugPte;
+ PMMPFN DebugPfn;
+ if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry));
+ DebugPfn += Entry->Size;
+ ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
+ } else {
+ DebugPte = PointerPte + Entry->Size;
+ if (DebugPte->u.Hard.Valid == 1) {
+ DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
+ ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1);
+ }
+ }
+ }
+#endif //DBG
+
+ i += Entry->Size;
+ RemoveEntryList (&Entry->List);
+ }
+ }
+
+ //
+ // Check to see if the previous page is the end of an allocation.
+ // If it is not then end of an allocation, it must be free and
+ // therefore this allocation can be tagged onto the end of
+ // that allocation.
+ //
+
+ Entry = (PMMFREE_POOL_ENTRY)StartingAddress;
+
+ if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) {
+ Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (
+ (PVOID)((PCHAR)Entry - PAGE_SIZE)));
+ } else {
+ PointerPte -= NumberOfPages + 1;
+ if (PointerPte->u.Hard.Valid == 1) {
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ } else {
+ Pfn1 = NULL;
+ }
+ }
+ if (Pfn1 != NULL) {
+ if (Pfn1->u3.e1.EndOfAllocation == 0) {
+
+ //
+ // This range of pages is free, add these pages to
+ // this entry. The owner field points to the address
+ // of the list entry which is linked into the free pool
+ // pages list.
+ //
+
+ Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress - PAGE_SIZE);
+ ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE);
+ Entry = Entry->Owner;
+ ASSERT (Entry->Owner == Entry);
+
+ //
+ // If this entry became larger than MM_SMALL_ALLOCATIONS
+ // pages, move it to the tail of the list. This keeps the
+ // small allocations at the front of the list.
+ //
+
+ if ((Entry->Size < MM_SMALL_ALLOCATIONS) &&
+ (Entry->Size + i) >= MM_SMALL_ALLOCATIONS) {
+
+ RemoveEntryList (&Entry->List);
+ InsertTailList (&MmNonPagedPoolFreeListHead, &Entry->List);
+ }
+
+ //
+ // Add these pages to the previous entry.
+ //
+
+ Entry->Size += i;
+ }
+ }
+
+ if (Entry == (PMMFREE_POOL_ENTRY)StartingAddress) {
+
+ //
+ // This entry was not combined with the previous, insert it
+ // into the list.
+ //
+
+ Entry->Size = i;
+ if (Entry->Size < MM_SMALL_ALLOCATIONS) {
+
+ //
+ // Small number of pages, insert this at the head of the list.
+ //
+
+ InsertHeadList (&MmNonPagedPoolFreeListHead, &Entry->List);
+ } else {
+ InsertTailList (&MmNonPagedPoolFreeListHead, &Entry->List);
+ }
+ }
+
+ //
+ // Set the owner field in all these pages.
+ //
+
+ NextEntry = (PMMFREE_POOL_ENTRY)StartingAddress;
+ while (i > 0) {
+ NextEntry->Owner = Entry;
+#if DBG
+ NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
+#endif
+
+ NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
+ i -= 1;
+ }
+
+#if DBG
+ NextEntry = Entry;
+ for (i=0;i<Entry->Size ;i++ ) {
+ {
+ PMMPTE DebugPte;
+ PMMPFN DebugPfn;
+ if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (NextEntry));
+ } else {
+
+ DebugPte = MiGetPteAddress (NextEntry);
+ DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber);
+ }
+ ASSERT (DebugPfn->u3.e1.StartOfAllocation == 0);
+ ASSERT (DebugPfn->u3.e1.EndOfAllocation == 0);
+ ASSERT (NextEntry->Owner == Entry);
+ NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE);
+ }
+ }
+#endif
+
+ return NumberOfPages;
+
+ } else {
+
+ //
+ // Paged pool. Need to verify start of allocation using
+ // end of allocation bitmap.
+ //
+
+ ASSERT (RtlCheckBit (MmPagedPoolAllocationMap, StartPosition));
+
+#if DBG
+ if (StartPosition > 0) {
+ if (RtlCheckBit (MmPagedPoolAllocationMap, StartPosition - 1)) {
+ if (!RtlCheckBit (MmEndOfPagedPoolBitmap, StartPosition - 1)) {
+
+ //
+ // In the middle of an allocation... bugcheck.
+ //
+
+ DbgPrint("paged pool in middle of allocation\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+ }
+ }
+#endif
+
+ i = StartPosition;
+ PointerPte = MmFirstPteForPagedPool + i;
+
+ //
+ // Find the last allocated page and check to see if any
+ // of the pages being deallocated are in the paging file.
+ //
+
+ while (!RtlCheckBit (MmEndOfPagedPoolBitmap, i)) {
+ NumberOfPages++;
+ i++;
+ }
+
+ MiDeleteSystemPagableVm (PointerPte,
+ NumberOfPages,
+ MM_KERNEL_DEMAND_ZERO_PTE,
+ &PageFrameIndex);
+
+ //
+ // Clear the end of allocation bit in the bit map.
+ //
+
+ RtlClearBits (MmEndOfPagedPoolBitmap, i, 1L);
+ MiReturnCommitment (NumberOfPages);
+ MmPagedPoolCommit -= NumberOfPages;
+ MmAllocatedPagedPool -= NumberOfPages;
+
+ //
+ // Clear the allocation bits in the bit map.
+ //
+
+ RtlClearBits (
+ MmPagedPoolAllocationMap,
+ StartPosition,
+ NumberOfPages
+ );
+
+ MmPagedPoolHint = StartPosition;
+
+ return NumberOfPages;
+ }
+}
+
+VOID
+MiInitializeNonPagedPool (
+ PVOID StartOfNonPagedPool
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the NonPaged pool.
+
+ NonPaged Pool is linked together through the pages.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, during initialization.
+
+--*/
+
+{
+ ULONG PagesInPool;
+ ULONG Size;
+ PMMFREE_POOL_ENTRY FreeEntry;
+ PMMFREE_POOL_ENTRY FirstEntry;
+ PMMPTE PointerPte;
+ ULONG i;
+ PULONG ThisPage;
+ PULONG NextPage;
+
+ //
+ // Initialize the list head for free pages.
+ //
+
+ InitializeListHead (&MmNonPagedPoolFreeListHead);
+
+ //
+ // Initialize the must succeed pool (this occupies the first
+ // pages of the pool area).
+ //
+
+ //
+ // Allocate NonPage pool for the NonPagedPoolMustSucceed pool.
+ //
+
+ MmNonPagedMustSucceed = (PCHAR)MmNonPagedPoolStart;
+
+ i = MmSizeOfNonPagedMustSucceed - PAGE_SIZE;
+
+ MmMustSucceedPoolBitPosition = BYTES_TO_PAGES(MmSizeOfNonPagedMustSucceed);
+
+ ThisPage = (PULONG)MmNonPagedMustSucceed;
+
+ while (i > 0) {
+ NextPage = (PULONG)((ULONG)ThisPage + PAGE_SIZE);
+ *ThisPage = (ULONG)NextPage;
+ ThisPage = NextPage;
+ i -= PAGE_SIZE;
+ }
+ *ThisPage = 0;
+
+ //
+ // Set up the remaining pages as non paged pool pages.
+ // NOTE - that on MIPS the initial nonpaged pool could be physical,
+ // so use the NonPagedPoolStart parameter to get the virtual
+ // address for building expanded pool.
+ //
+
+ ASSERT ((MmSizeOfNonPagedMustSucceed & (PAGE_SIZE - 1)) == 0);
+ FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)MmNonPagedPoolStart +
+ MmSizeOfNonPagedMustSucceed);
+ FirstEntry = FreeEntry;
+
+ PagesInPool = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes -
+ MmSizeOfNonPagedMustSucceed);
+
+ //
+ // Set the location of expanded pool.
+ //
+
+ MmExpandedPoolBitPosition = BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes);
+
+ MmNumberOfFreeNonPagedPool = PagesInPool;;
+
+ InsertHeadList (&MmNonPagedPoolFreeListHead, &FreeEntry->List);
+
+ FreeEntry->Size = PagesInPool;
+#if DBG
+ FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
+#endif
+ FreeEntry->Owner = FirstEntry;
+
+ while (PagesInPool > 1) {
+ FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)FreeEntry + PAGE_SIZE);
+#if DBG
+ FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
+#endif
+ FreeEntry->Owner = FirstEntry;
+ PagesInPool -= 1;
+ }
+
+ //
+ // Set up the system PTEs for nonpaged pool expansion.
+ //
+
+ PointerPte = MiGetPteAddress (MmNonPagedPoolExpansionStart);
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ Size = BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes -
+ MmSizeOfNonPagedPoolInBytes) - 1;
+
+ MiInitializeSystemPtes (PointerPte,
+ Size,
+ NonPagedPoolExpansion
+ );
+
+ //
+ // Build a guard PTE.
+ //
+
+ PointerPte += Size;
+ *PointerPte = ZeroKernelPte;
+
+ return;
+}
+
+#if DBG || (i386 && !FPO)
+
+//
+// This only works on checked builds, because the TraceLargeAllocs array is
+// kept in that case to keep track of page size pool allocations. Otherwise
+// we will call ExpSnapShotPoolPages with a page size pool allocation containing
+// arbitrary data and it will potentially go off in the weeds trying to interpret
+// it as a suballocated pool page. Ideally, there would be another bit map
+// that identified single page pool allocations so ExpSnapShotPoolPages would NOT
+// be called for those.
+//
+
+NTSTATUS
+MmSnapShotPool(
+ IN POOL_TYPE PoolType,
+ IN PMM_SNAPSHOT_POOL_PAGE SnapShotPoolPage,
+ IN PSYSTEM_POOL_INFORMATION PoolInformation,
+ IN ULONG Length,
+ IN OUT PULONG RequiredLength
+ )
+{
+ NTSTATUS Status;
+ NTSTATUS xStatus;
+ PCHAR p, pStart;
+ PVOID *pp;
+ ULONG Size;
+ ULONG BusyFlag;
+ ULONG CurrentPage, NumberOfPages;
+ PSYSTEM_POOL_ENTRY PoolEntryInfo;
+ PLIST_ENTRY Entry;
+ PMMFREE_POOL_ENTRY FreePageInfo;
+ ULONG StartPosition;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+
+ Status = STATUS_SUCCESS;
+ PoolEntryInfo = &PoolInformation->Entries[ 0 ];
+ if (PoolType == PagedPool) {
+ PoolInformation->TotalSize = (ULONG)MmPagedPoolEnd -
+ (ULONG)MmPagedPoolStart;
+ PoolInformation->FirstEntry = MmPagedPoolStart;
+ p = MmPagedPoolStart;
+ CurrentPage = 0;
+ while (p < (PCHAR)MmPagedPoolEnd) {
+ pStart = p;
+ BusyFlag = RtlCheckBit( MmPagedPoolAllocationMap, CurrentPage );
+ while ( ~(BusyFlag ^ RtlCheckBit( MmPagedPoolAllocationMap, CurrentPage )) ) {
+ p += PAGE_SIZE;
+ if (RtlCheckBit( MmEndOfPagedPoolBitmap, CurrentPage )) {
+ CurrentPage++;
+ break;
+ }
+
+ CurrentPage++;
+ if (p > (PCHAR)MmPagedPoolEnd) {
+ break;
+ }
+ }
+
+ Size = p - pStart;
+ if (BusyFlag) {
+ xStatus = (*SnapShotPoolPage)( pStart,
+ Size,
+ PoolInformation,
+ &PoolEntryInfo,
+ Length,
+ RequiredLength
+ );
+ if ( xStatus != STATUS_COMMITMENT_LIMIT ) {
+ Status = xStatus;
+ }
+ }
+ else {
+ PoolInformation->NumberOfEntries += 1;
+ *RequiredLength += sizeof( SYSTEM_POOL_ENTRY );
+ if (Length < *RequiredLength) {
+ Status = STATUS_INFO_LENGTH_MISMATCH;
+ }
+ else {
+ PoolEntryInfo->Allocated = FALSE;
+ PoolEntryInfo->Size = Size;
+ PoolEntryInfo->AllocatorBackTraceIndex = 0;
+ PoolEntryInfo->TagUlong = 0;
+ PoolEntryInfo++;
+ Status = STATUS_SUCCESS;
+ }
+ }
+ }
+ }
+ else
+ if (PoolType == NonPagedPool) {
+ PoolInformation->TotalSize = MmSizeOfNonPagedPoolInBytes;
+ PoolInformation->FirstEntry = MmNonPagedPoolStart;
+
+ p = MmNonPagedPoolStart;
+ while (p < (PCHAR)MmNonPagedPoolEnd) {
+
+ //
+ // NonPaged pool is linked together through the pages themselves.
+ //
+
+ pp = (PVOID *)MmNonPagedMustSucceed;
+ while (pp) {
+ if (p == (PCHAR)pp) {
+ PoolInformation->NumberOfEntries += 1;
+ *RequiredLength += sizeof( SYSTEM_POOL_ENTRY );
+ if (Length < *RequiredLength) {
+ Status = STATUS_INFO_LENGTH_MISMATCH;
+ }
+ else {
+ PoolEntryInfo->Allocated = FALSE;
+ PoolEntryInfo->Size = PAGE_SIZE;
+ PoolEntryInfo->AllocatorBackTraceIndex = 0;
+ PoolEntryInfo->TagUlong = 0;
+ PoolEntryInfo++;
+ Status = STATUS_SUCCESS;
+ }
+
+ p += PAGE_SIZE;
+ pp = (PVOID *)MmNonPagedMustSucceed;
+ }
+ else {
+ pp = (PVOID *)*pp;
+ }
+ }
+
+ Entry = MmNonPagedPoolFreeListHead.Flink;
+ while (Entry != &MmNonPagedPoolFreeListHead) {
+ FreePageInfo = CONTAINING_RECORD( Entry,
+ MMFREE_POOL_ENTRY,
+ List
+ );
+
+ ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
+ if (p == (PCHAR)FreePageInfo) {
+ Size = (FreePageInfo->Size * PAGE_SIZE);
+ PoolInformation->NumberOfEntries += 1;
+ *RequiredLength += sizeof( SYSTEM_POOL_ENTRY );
+ if (Length < *RequiredLength) {
+ Status = STATUS_INFO_LENGTH_MISMATCH;
+ }
+ else {
+ PoolEntryInfo->Allocated = FALSE;
+ PoolEntryInfo->Size = Size;
+ PoolEntryInfo->AllocatorBackTraceIndex = 0;
+ PoolEntryInfo->TagUlong = 0;
+ PoolEntryInfo++;
+ Status = STATUS_SUCCESS;
+ }
+
+ p += Size;
+ break;
+ }
+
+ Entry = FreePageInfo->List.Flink;
+ }
+
+ StartPosition = BYTES_TO_PAGES((ULONG)p -
+ (ULONG)MmPageAlignedPoolBase[NonPagedPool]);
+ if (StartPosition >= MmExpandedPoolBitPosition) {
+ break;
+ }
+
+ if (StartPosition < MmMustSucceedPoolBitPosition) {
+ Size = PAGE_SIZE;
+ xStatus = (*SnapShotPoolPage)( p,
+ Size,
+ PoolInformation,
+ &PoolEntryInfo,
+ Length,
+ RequiredLength
+ );
+ if ( xStatus != STATUS_COMMITMENT_LIMIT ) {
+ Status = xStatus;
+ }
+ }
+ else {
+ if (MI_IS_PHYSICAL_ADDRESS(p)) {
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+ PointerPte = NULL;
+ Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (p));
+ } else {
+ PointerPte = MiGetPteAddress (p);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+ ASSERT (Pfn1->u3.e1.StartOfAllocation != 0);
+
+ //
+ // Find end of allocation and determine size.
+ //
+
+ NumberOfPages = 1;
+ while (Pfn1->u3.e1.EndOfAllocation == 0) {
+ NumberOfPages++;
+ if (PointerPte == NULL) {
+ Pfn1 += 1;
+ }
+ else {
+ PointerPte++;
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+ }
+
+ Size = NumberOfPages * PAGE_SIZE;
+ xStatus = (*SnapShotPoolPage)( p,
+ Size,
+ PoolInformation,
+ &PoolEntryInfo,
+ Length,
+ RequiredLength
+ );
+ if ( xStatus != STATUS_COMMITMENT_LIMIT ) {
+ Status = xStatus;
+ }
+ }
+
+ p += Size;
+ }
+ }
+ else {
+ Status = STATUS_NOT_IMPLEMENTED;
+ }
+
+ return( Status );
+}
+
+
+ULONG MmSpecialPoolTag;
+PVOID MmSpecialPoolStart;
+PVOID MmSpecialPoolEnd;
+PMMPTE SpecialPoolFirstPte;
+PMMPTE SpecialPoolLastPte;
+
+VOID
+MmInitializeSpecialPool (
+ VOID
+ )
+
+{
+ KIRQL OldIrql;
+ PMMPTE pte;
+
+ LOCK_PFN (OldIrql);
+ SpecialPoolFirstPte = MiReserveSystemPtes (25000, SystemPteSpace, 0, 0, TRUE);
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // build list of pte pairs.
+ //
+
+ SpecialPoolLastPte = SpecialPoolFirstPte + 25000;
+ MmSpecialPoolStart = MiGetVirtualAddressMappedByPte (SpecialPoolFirstPte);
+
+ pte = SpecialPoolFirstPte;
+ while (pte < SpecialPoolLastPte) {
+ pte->u.List.NextEntry = ((pte+2) - MmSystemPteBase);
+ pte += 2;
+ }
+ pte -= 2;
+ pte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
+ SpecialPoolLastPte = pte;
+ MmSpecialPoolEnd = MiGetVirtualAddressMappedByPte (SpecialPoolLastPte + 1);
+}
+
+
+PVOID
+MmAllocateSpecialPool (
+ IN ULONG NumberOfBytes,
+ IN ULONG Tag
+ )
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPTE PointerPte;
+ KIRQL OldIrql2;
+ PULONG Entry;
+
+
+ TempPte = ValidKernelPte;
+
+ LOCK_PFN2 (OldIrql2);
+ if (MmAvailablePages == 0) {
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+
+ PointerPte = SpecialPoolFirstPte;
+
+ ASSERT (SpecialPoolFirstPte->u.List.NextEntry != MM_EMPTY_PTE_LIST);
+
+ SpecialPoolFirstPte = PointerPte->u.List.NextEntry + MmSystemPteBase;
+
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+ UNLOCK_PFN2 (OldIrql2);
+
+ Entry = (PULONG)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ Entry = (PULONG)(PVOID)(((ULONG)Entry + (PAGE_SIZE - (NumberOfBytes + 8))) &
+ 0xfffffff8L);
+
+ *Entry = MmSpecialPoolTag;
+ Entry += 1;
+ *Entry = NumberOfBytes;
+ Entry += 1;
+ return (PVOID)(Entry);
+}
+
+VOID
+MmFreeSpecialPool (
+ IN PVOID P
+ )
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PULONG Entry;
+ KIRQL OldIrql;
+
+ Entry = (PULONG)((PCH)P - 8);
+
+ PointerPte = MiGetPteAddress (P);
+
+ if (PointerPte->u.Hard.Valid == 0) {
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+
+ ASSERT (*Entry == MmSpecialPoolTag);
+
+ KeSweepDcache(TRUE);
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ MI_SET_PFN_DELETED (Pfn1);
+
+ LOCK_PFN2 (OldIrql);
+ MiDecrementShareCount (PointerPte->u.Hard.PageFrameNumber);
+ KeFlushSingleTb (PAGE_ALIGN(P),
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ ZeroKernelPte.u.Flush);
+
+ ASSERT (SpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
+ SpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;
+
+ SpecialPoolLastPte = PointerPte;
+ SpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
+
+ UNLOCK_PFN2 (OldIrql);
+
+ return;
+}
+
+#endif // DBG || (i386 && !FPO)
+
diff --git a/private/ntos/mm/allocvm.c b/private/ntos/mm/allocvm.c
new file mode 100644
index 000000000..380ffd6f8
--- /dev/null
+++ b/private/ntos/mm/allocvm.c
@@ -0,0 +1,1615 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ allocvm.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtAllocateVirtualMemory service.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+
+extern ULONG MmSharedCommit;
+
+ULONG MMVADKEY = ' daV'; //Vad
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtAllocateVirtualMemory)
+#endif
+
+NTSTATUS
+MiResetVirtualMemory (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ );
+
+
+NTSTATUS
+NtAllocateVirtualMemory(
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN ULONG ZeroBits,
+ IN OUT PULONG RegionSize,
+ IN ULONG AllocationType,
+ IN ULONG Protect
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates a region of pages within the virtual address
+ space of a subject process.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address of the allocated region of pages.
+ If the initial value of this argument is not null,
+ then the region will be allocated starting at the
+ specified virtual address rounded down to the next
+ host page size address boundary. If the initial
+ value of this argument is null, then the operating
+ system will determine where to allocate the
+ region.
+
+ ZeroBits - Supplies the number of high order address bits that
+ must be zero in the base address of the section
+ view. The value of this argument must be less than
+ 21 and is only used when the operating system
+ determines where to allocate the view (i.e. when
+ BaseAddress is null).
+
+ RegionSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the allocated region
+ of pages. The initial value of this argument
+ specifies the size in bytes of the region and is
+ rounded up to the next host page size boundary.
+
+ AllocationType - Supplies a set of flags that describe the type
+ of allocation that is to be performed for the
+ specified region of pages. Flags are:
+
+
+ MEM_COMMIT - The specified region of pages is to
+ be committed.
+
+ MEM_RESERVE - The specified region of pages is to
+ be reserved.
+
+ MEM_TOP_DOWN - The specified region should be created at the
+ highest virtual address possible based on ZeroBits.
+
+ MEM_RESET - Reset the state of the specified region so
+ that if the pages are in page paging file, they
+ are discarded and pages of zeroes are brought in.
+ If the pages are in memory and modified, they are marked
+ as not modified so they will not be written out to
+ the paging file. The contents are NOT zeroed.
+
+ The Protect argument is ignored, but a valid protection
+ must be specified.
+
+ Protect - Supplies the protection desired for the committed
+ region of pages.
+
+ Protect Values:
+
+
+ PAGE_NOACCESS - No access to the committed region
+ of pages is allowed. An attempt to read,
+ write, or execute the committed region
+ results in an access violation (i.e. a GP
+ fault).
+
+ PAGE_EXECUTE - Execute access to the committed
+ region of pages is allowed. An attempt to
+ read or write the committed region results in
+ an access violation.
+
+ PAGE_READONLY - Read only and execute access to the
+ committed region of pages is allowed. An
+ attempt to write the committed region results
+ in an access violation.
+
+ PAGE_READWRITE - Read, write, and execute access to
+ the committed region of pages is allowed. If
+ write access to the underlying section is
+ allowed, then a single copy of the pages are
+ shared. Otherwise the pages are shared read
+ only/copy on write.
+
+ PAGE_NOCACHE - The region of pages should be allocated
+ as non-cachable.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PMMVAD Vad;
+ PMMVAD FoundVad;
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ NTSTATUS Status;
+ PVOID TopAddress;
+ PVOID CapturedBase;
+ ULONG CapturedRegionSize;
+ PMMPTE PointerPte;
+ PMMPTE CommitLimitPte;
+ ULONG ProtectionMask;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ PMMPTE StartingPte;
+ MMPTE TempPte;
+ ULONG OldProtect;
+ LONG QuotaCharge;
+ ULONG QuotaFree;
+ ULONG CopyOnWriteCharge;
+ BOOLEAN PageFileChargeSucceeded;
+ BOOLEAN Attached = FALSE;
+ MMPTE DecommittedPte;
+ ULONG ChangeProtection;
+
+ PAGED_CODE();
+
+ //
+ // Check the zero bits argument for correctness.
+ //
+
+ if (ZeroBits > 21) {
+ return STATUS_INVALID_PARAMETER_3;
+ }
+
+ //
+ // Check the AllocationType for correctness.
+ //
+
+ if ((AllocationType & ~(MEM_COMMIT | MEM_RESERVE |
+ MEM_TOP_DOWN | MEM_RESET)) != 0) {
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ //
+ // One of MEM_COMMIT, MEM_RESET or MEM_RESERVE must be set.
+ //
+
+ if ((AllocationType & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) == 0) {
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ if ((AllocationType & MEM_RESET) && (AllocationType != MEM_RESET)) {
+
+ //
+ // MEM_RESET may not be used with any other flag.
+ //
+
+ return STATUS_INVALID_PARAMETER_5;
+ }
+
+ //
+ // Check the protection field. This could raise an exception.
+ //
+
+ try {
+ ProtectionMask = MiMakeProtectionMask (Protect);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ PreviousMode = KeGetPreviousMode();
+ ChangeProtection = FALSE;
+
+ //
+ // Establish an exception handler, probe the specified addresses
+ // for write access and capture the initial values.
+ //
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ }
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( MmWatchProcess ) {
+ ;
+ } else {
+ DbgPrint("allocvm process handle %lx base address %lx zero bits %lx\n",
+ ProcessHandle, CapturedBase, ZeroBits);
+ DbgPrint(" region size %lx alloc type %lx protect %lx\n",
+ CapturedRegionSize, AllocationType, Protect);
+ }
+ }
+#endif
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_VAD_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER_2;
+ }
+
+ if ((((ULONG)MM_HIGHEST_VAD_ADDRESS + 1) - (ULONG)CapturedBase) <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ if (CapturedRegionSize == 0) {
+
+ //
+ // Region size cannot be 0.
+ //
+
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // Reference the specified process handle for VM_OPERATION access.
+ //
+
+ if ( ProcessHandle == NtCurrentProcess() ) {
+ Process = PsGetCurrentProcess();
+ } else {
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+ }
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ if (PsGetCurrentProcess() != Process) {
+ KeAttachProcess (&Process->Pcb);
+ Attached = TRUE;
+ }
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked. Block APCs so an APC which takes a page
+ // fault does not corrupt various structures.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ if ((CapturedBase == NULL) || (AllocationType & MEM_RESERVE)) {
+
+ //
+ // PAGE_WRITECOPY is not valid for private pages.
+ //
+
+ if ((Protect & PAGE_WRITECOPY) ||
+ (Protect & PAGE_EXECUTE_WRITECOPY)) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto ErrorReturn;
+ }
+
+ //
+ // Reserve the address space.
+ //
+
+ if (CapturedBase == NULL) {
+
+ //
+ // No base address was specified. This MUST be a reserve or
+ // reserve and commit.
+ //
+
+ CapturedRegionSize = ROUND_TO_PAGES (CapturedRegionSize);
+
+ //
+ // If the zero bits is greater than 2, calculate the
+ // proper starting value, for values of 0, 1, and 2, use
+ // the highest address.
+ //
+ // NOTE THIS IS ONLY TRUE FOR MACHINES WITH 2GB USER VA.
+ //
+
+ if (ZeroBits >= 2) {
+ TopAddress = (PVOID)((ULONG)0xFFFFFFFF >> ZeroBits);
+ } else {
+ TopAddress = (PVOID)MM_HIGHEST_VAD_ADDRESS;
+ }
+
+ //
+ // Establish exception handler as MiFindEmptyAddressRange
+ // will raise and exception if it fails.
+ //
+
+ try {
+
+ if (AllocationType & MEM_TOP_DOWN) {
+
+ //
+ // Start from the top of memory downward.
+ //
+
+ StartingAddress = MiFindEmptyAddressRangeDown (
+ CapturedRegionSize,
+ TopAddress,
+ X64K);
+
+ } else {
+
+ StartingAddress = MiFindEmptyAddressRange (
+ CapturedRegionSize,
+ X64K,
+ ZeroBits );
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ goto ErrorReturn;
+ }
+
+ //
+ // Calculate the ending address based on the top address.
+ //
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ CapturedRegionSize - 1L) | (PAGE_SIZE - 1L));
+
+ if (EndingAddress > TopAddress) {
+
+ //
+ // The allocation does not honor the zero bits argument.
+ //
+
+ Status = STATUS_NO_MEMORY;
+ goto ErrorReturn;
+ }
+
+ } else {
+
+ //
+ // A non-NULL base address was specified. Check to make sure
+ // the specified base address to ending address is currently
+ // unused.
+ //
+
+ EndingAddress = (PVOID)(((ULONG)CapturedBase +
+ CapturedRegionSize - 1L) | (PAGE_SIZE - 1L));
+
+ //
+ // Align the starting address on a 64k boundary.
+ //
+
+ StartingAddress = (PVOID)MI_64K_ALIGN(CapturedBase);
+
+ //
+ // See if a VAD overlaps with this starting/ending addres pair.
+ //
+
+ if (MiCheckForConflictingVad (StartingAddress, EndingAddress) !=
+ (PMMVAD)NULL) {
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorReturn;
+ }
+ }
+
+ //
+ // Calculate the page file quota for this address range.
+ //
+
+ if (AllocationType & MEM_COMMIT) {
+ QuotaCharge = (LONG)(BYTES_TO_PAGES ((ULONG)EndingAddress -
+ (ULONG)StartingAddress));
+ } else {
+ QuotaCharge = 0;
+ }
+
+ //
+ // An unoccuppied address range has been found, build the virtual
+ // address descriptor to describe this range.
+ //
+
+ //
+ // Establish an exception handler and attempt to allocate
+ // the pool and charge quota. Note that the InsertVad routine
+ // will also charge quota which could raise an exception.
+ //
+
+ try {
+
+ Vad = (PMMVAD)NULL;
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD_SHORT),
+ 'SdaV');
+
+ Vad->StartingVa = StartingAddress;
+ Vad->EndingVa = EndingAddress;
+
+ Vad->u.LongFlags = 0;
+ if (AllocationType & MEM_COMMIT) {
+ Vad->u.VadFlags.MemCommit = 1;
+ }
+
+ Vad->u.VadFlags.Protection = ProtectionMask;
+ Vad->u.VadFlags.PrivateMemory = 1;
+
+ Vad->u.VadFlags.CommitCharge = (ULONG)QuotaCharge;
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The pool allocation suceeded, but the quota charge
+ // in InsertVad failed, deallocate the pool and return
+ // and error.
+ //
+
+ ExFreePool (Vad);
+ Status = GetExceptionCode();
+ } else {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ }
+ goto ErrorReturn;
+ }
+
+ //
+ // Unlock the working set lock, page faults can now be taken.
+ //
+
+ UNLOCK_WS (Process);
+
+ //
+ // Update the current virtual size in the process header, the
+ // address space lock protects this operation.
+ //
+
+ CapturedRegionSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ Process->VirtualSize += CapturedRegionSize;
+
+ if (Process->VirtualSize > Process->PeakVirtualSize) {
+ Process->PeakVirtualSize = Process->VirtualSize;
+ }
+
+ //
+ // Release the address space lock, lower IRQL, detach, and dereference
+ // the process object.
+ //
+
+ UNLOCK_ADDRESS_SPACE(Process);
+ if (Attached) {
+ KeDetachProcess();
+ }
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = StartingAddress;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Return success at this point even if the results
+ // cannot be written.
+ //
+
+ NOTHING;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( MmWatchProcess ) {
+ if ( MmWatchProcess == PsGetCurrentProcess() ) {
+ DbgPrint("\n+++ ALLOC Type %lx Base %lx Size %lx\n",
+ AllocationType,StartingAddress, CapturedRegionSize);
+ MmFooBar();
+ }
+ } else {
+ DbgPrint("return allocvm status %lx baseaddr %lx size %lx\n",
+ Status, StartingAddress, CapturedRegionSize);
+ }
+ }
+#endif
+
+#if DBG
+ if (RtlAreLogging( RTL_EVENT_CLASS_VM )) {
+ RtlLogEvent( MiAllocVmEventId,
+ RTL_EVENT_CLASS_VM,
+ StartingAddress,
+ CapturedRegionSize,
+ AllocationType,
+ Protect,
+ Protect
+ );
+
+ }
+#endif // DBG
+
+ return STATUS_SUCCESS;
+
+ } else {
+
+ //
+ // Commit previously reserved pages. Note that these pages could
+ // be either private or a section.
+ //
+
+ if (AllocationType == MEM_RESET) {
+
+ //
+ // Round up to page boundaries so good data is not reset.
+ //
+
+ EndingAddress = (PVOID)((ULONG)PAGE_ALIGN ((ULONG)CapturedBase +
+ CapturedRegionSize) - 1);
+ StartingAddress = (PVOID)PAGE_ALIGN((PUCHAR)CapturedBase + PAGE_SIZE - 1);
+ } else {
+ EndingAddress = (PVOID)(((ULONG)CapturedBase +
+ CapturedRegionSize - 1) | (PAGE_SIZE - 1));
+ StartingAddress = (PVOID)PAGE_ALIGN(CapturedBase);
+ }
+
+ CapturedRegionSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1;
+
+ FoundVad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+
+ if (FoundVad == (PMMVAD)NULL) {
+
+ //
+ // No virtual address is reserved at the specified base address,
+ // return an error.
+ //
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorReturn;
+ }
+
+ //
+ // Ensure that the starting and ending addresses are all within
+ // the same virtual address descriptor.
+ //
+
+ if ((StartingAddress < FoundVad->StartingVa) ||
+ (EndingAddress > FoundVad->EndingVa)) {
+
+ //
+ // Not withing the section virtual address descriptor,
+ // return an error.
+ //
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorReturn;
+ }
+
+ if (AllocationType == MEM_RESET) {
+ Status = MiResetVirtualMemory (StartingAddress,
+ EndingAddress,
+ FoundVad,
+ Process);
+ goto done;
+
+ } else if (FoundVad->u.VadFlags.PrivateMemory == 0) {
+
+ if (FoundVad->ControlArea->FilePointer != NULL) {
+
+ //
+ // Only page file backed sections can be committed.
+ //
+
+ Status = STATUS_ALREADY_COMMITTED;
+ goto ErrorReturn;
+ }
+
+ //
+ // The no cache option is not allowed for sections.
+ //
+
+ if (Protect & PAGE_NOCACHE) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto ErrorReturn;
+ }
+
+ if (FoundVad->u.VadFlags.NoChange == 1) {
+
+ //
+ // An attempt is made at changing the protection
+ // of a SEC_NO_CHANGE section.
+ //
+
+ Status = MiCheckSecuredVad (FoundVad,
+ CapturedBase,
+ CapturedRegionSize,
+ ProtectionMask);
+
+ if (!NT_SUCCESS (Status)) {
+ goto ErrorReturn;
+ }
+ }
+
+ StartingPte = MiGetProtoPteAddress (FoundVad, StartingAddress);
+ PointerPte = StartingPte;
+ LastPte = MiGetProtoPteAddress (FoundVad, EndingAddress);
+
+ UNLOCK_WS (Process);
+
+ ExAcquireFastMutex (&MmSectionCommitMutex);
+
+#if 0
+ if (AllocationType & MEM_CHECK_COMMIT_STATE) {
+
+ //
+ // Make sure none of the pages are already committed.
+ //
+
+ while (PointerPte <= LastPte) {
+
+ //
+ // Check to see if the prototype PTE is committed.
+ // Note that prototype PTEs cannot be decommited so
+ // PTE only needs checked for zeroes.
+ //
+ //
+
+ if (PointerPte->u.Long != 0) {
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+ Status = STATUS_ALREADY_COMMITTED;
+ goto ErrorReturn1;
+ }
+ PointerPte += 1;
+ }
+ }
+#endif //0
+
+ PointerPte = StartingPte;
+
+
+ //
+ // Check to ensure these pages can be committed if this
+ // is a page file backed segment. Note that page file quota
+ // has already been charged for this.
+ //
+
+ QuotaCharge = 1 + LastPte - StartingPte;
+
+ CopyOnWriteCharge = 0;
+
+ if (MI_IS_PTE_PROTECTION_COPY_WRITE(ProtectionMask)) {
+
+ //
+ // If the protection is copy on write, charge for
+ // the copy on writes.
+ //
+
+ CopyOnWriteCharge = (ULONG)QuotaCharge;
+ }
+
+ //
+ // Charge commitment for the range. Establish an
+ // exception handler as this could raise an exception.
+ //
+
+ QuotaFree = 0;
+ Status = STATUS_SUCCESS;
+
+ for (; ; ) {
+ try {
+ PageFileChargeSucceeded = FALSE;
+ MiChargePageFileQuota ((ULONG)CopyOnWriteCharge, Process);
+
+ PageFileChargeSucceeded = TRUE;
+ MiChargeCommitment ((ULONG)QuotaCharge + CopyOnWriteCharge,
+ NULL);
+ break;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception has occurred during the charging
+ // of commitment. Release the held mutexes and return
+ // the exception status to the user.
+ //
+
+ if (PageFileChargeSucceeded) {
+ MiReturnPageFileQuota ((ULONG)CopyOnWriteCharge, Process);
+ }
+
+ if (Status != STATUS_SUCCESS) {
+
+ //
+ // We have already tried for the precise charge,
+ // return an error.
+ //
+
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+ goto ErrorReturn1;
+ }
+
+ //
+ // Quota charge failed, calculate the exact quota
+ // taking into account pages that may already be
+ // committed and retry the operation.
+
+ while (PointerPte <= LastPte) {
+
+ //
+ // Check to see if the prototype PTE is committed.
+ // Note that prototype PTEs cannot be decommited so
+ // PTE only needs checked for zeroes.
+ //
+ //
+
+ if (PointerPte->u.Long != 0) {
+ QuotaFree -= 1;
+ }
+ PointerPte += 1;
+ }
+
+ PointerPte = StartingPte;
+
+ QuotaCharge += QuotaFree;
+ Status = GetExceptionCode();
+ }
+ }
+
+ FoundVad->ControlArea->Segment->NumberOfCommittedPages +=
+ (ULONG)QuotaCharge;
+
+ FoundVad->u.VadFlags.CommitCharge += CopyOnWriteCharge;
+ Process->CommitCharge += CopyOnWriteCharge;
+ MmSharedCommit += QuotaCharge;
+
+ //
+ // Commit all the pages.
+ //
+
+ TempPte = FoundVad->ControlArea->Segment->SegmentPteTemplate;
+ while (PointerPte <= LastPte) {
+
+ if (PointerPte->u.Long != 0) {
+
+ //
+ // Page is already committed, back out commitment.
+ //
+
+ QuotaFree += 1;
+ } else {
+ *PointerPte = TempPte;
+ }
+ PointerPte += 1;
+ }
+
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+
+ if (QuotaFree != 0) {
+ MiReturnCommitment (
+ (CopyOnWriteCharge ? 2*QuotaFree : QuotaFree));
+ FoundVad->ControlArea->Segment->NumberOfCommittedPages -= QuotaFree;
+ MmSharedCommit -= QuotaFree;
+ ASSERT ((LONG)FoundVad->ControlArea->Segment->NumberOfCommittedPages >= 0);
+
+ if (CopyOnWriteCharge != 0) {
+ FoundVad->u.VadFlags.CommitCharge -= QuotaFree;
+ Process->CommitCharge -= QuotaFree;
+ MiReturnPageFileQuota (
+ QuotaFree,
+ Process);
+ }
+ ASSERT ((LONG)FoundVad->u.VadFlags.CommitCharge >= 0);
+ }
+
+ //
+ // Change all the protection to be protected as specified.
+ //
+
+ LOCK_WS (Process);
+
+ MiSetProtectionOnSection (Process,
+ FoundVad,
+ StartingAddress,
+ EndingAddress,
+ Protect,
+ &OldProtect,
+ TRUE);
+
+ UNLOCK_WS (Process);
+
+ UNLOCK_ADDRESS_SPACE(Process);
+ if (Attached) {
+ KeDetachProcess();
+ }
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = StartingAddress;
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( MmWatchProcess ) {
+ if ( MmWatchProcess == PsGetCurrentProcess() ) {
+ DbgPrint("\n+++ ALLOC Type %lx Base %lx Size %lx\n",
+ AllocationType,StartingAddress, CapturedRegionSize);
+ MmFooBar();
+ }
+ } else {
+ DbgPrint("return allocvm status %lx baseaddr %lx size %lx\n",
+ Status, CapturedRegionSize, StartingAddress);
+ }
+ }
+#endif
+
+#if DBG
+ if (RtlAreLogging( RTL_EVENT_CLASS_VM )) {
+ RtlLogEvent( MiAllocVmEventId,
+ RTL_EVENT_CLASS_VM,
+ StartingAddress,
+ CapturedRegionSize,
+ AllocationType,
+ Protect
+ );
+
+ }
+#endif // DBG
+
+ return STATUS_SUCCESS;
+
+ } else {
+
+ //
+ // PAGE_WRITECOPY is not valid for private pages.
+ //
+
+ if ((Protect & PAGE_WRITECOPY) ||
+ (Protect & PAGE_EXECUTE_WRITECOPY)) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto ErrorReturn;
+ }
+
+ //
+ // Ensure none of the pages are already committed as described
+ // in the virtual address descriptor.
+ //
+#if 0
+ if (AllocationType & MEM_CHECK_COMMIT_STATE) {
+ if ( !MiIsEntireRangeDecommitted(StartingAddress,
+ EndingAddress,
+ FoundVad,
+ Process)) {
+
+ //
+ // Previously reserved pages have been committed, or
+ // an error occurred, release mutex and return status.
+ //
+
+ Status = STATUS_ALREADY_COMMITTED;
+ goto ErrorReturn;
+ }
+ }
+#endif //0
+
+ //
+ // The address range has not been committed, commit it now.
+ // Note, that for private pages, commitment is handled by
+ // explicitly updating PTEs to contain Demand Zero entries.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ //
+ // Check to ensure these pages can be committed.
+ //
+
+ QuotaCharge = 1 + LastPte - PointerPte;
+
+ //
+ // Charge quota and commitment for the range. Establish an
+ // exception handler as this could raise an exception.
+ //
+
+ QuotaFree = 0;
+ Status = STATUS_SUCCESS;
+
+ for (; ; ) {
+ try {
+ PageFileChargeSucceeded = FALSE;
+
+ MiChargeCommitment ((ULONG)QuotaCharge, Process);
+ PageFileChargeSucceeded = TRUE;
+ MiChargePageFileQuota ((ULONG)QuotaCharge, Process);
+
+ FoundVad->u.VadFlags.CommitCharge += (ULONG)QuotaCharge;
+ Process->CommitCharge += QuotaCharge;
+ break;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception has occurred during the charging
+ // of commitment. Release the held mutexes and return
+ // the exception status to the user.
+ //
+
+ if (PageFileChargeSucceeded) {
+ MiReturnCommitment ((ULONG)QuotaCharge);
+ }
+
+ if (Status != STATUS_SUCCESS) {
+
+ //
+ // We have already tried for the precise charge,
+ // return an error.
+ //
+
+ goto ErrorReturn;
+ }
+
+ Status = GetExceptionCode();
+
+ //
+ // Quota charge failed, calculate the exact quota
+ // taking into account pages that may already be
+ // committed and retry the operation.
+
+ QuotaFree = -(LONG)MiCalculatePageCommitment (
+ StartingAddress,
+ EndingAddress,
+ FoundVad,
+ Process);
+
+ QuotaCharge += QuotaFree;
+ ASSERT (QuotaCharge >= 0);
+ }
+ }
+
+
+ //
+ // Build a demand zero PTE with the proper protection.
+ //
+
+ TempPte = ZeroPte;
+ TempPte.u.Soft.Protection = ProtectionMask;
+
+ DecommittedPte = ZeroPte;
+ DecommittedPte.u.Soft.Protection = MM_DECOMMIT;
+
+ //
+ // Fill in all the page table pages with the demand zero PTE.
+ //
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ if (FoundVad->u.VadFlags.MemCommit) {
+ CommitLimitPte = MiGetPteAddress (FoundVad->EndingVa);
+ } else {
+ CommitLimitPte = NULL;
+ }
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // Pointing to the next page table page, make
+ // a page table page exist and make it valid.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+ }
+
+ if (PointerPte->u.Long == 0) {
+
+ if (PointerPte <= CommitLimitPte) {
+
+ //
+ // This page is implicitly committed.
+ //
+
+ QuotaFree += 1;
+
+ }
+
+ *PointerPte = TempPte;
+
+ //
+ // Increment the count of non-zero page table entires
+ // for this page table and the number of private pages
+ // for the process.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+ } else {
+ if (PointerPte->u.Long == DecommittedPte.u.Long) {
+
+ //
+ // Only commit the page if it is already decommitted.
+ //
+
+ *PointerPte = TempPte;
+ } else {
+ QuotaFree += 1;
+
+ //
+ // Make sure the protection for the page is
+ // right.
+ //
+
+ if (!ChangeProtection &&
+ (Protect != MiGetPageProtection (PointerPte,
+ Process))) {
+ ChangeProtection = TRUE;
+ }
+ }
+ }
+ PointerPte += 1;
+ }
+ }
+
+ if (QuotaFree != 0) {
+ ASSERT (QuotaFree >= 0);
+ MiReturnCommitment (QuotaFree);
+ MiReturnPageFileQuota (QuotaFree, Process);
+ FoundVad->u.VadFlags.CommitCharge -= QuotaFree;
+ Process->CommitCharge -= QuotaFree;
+ ASSERT ((LONG)FoundVad->u.VadFlags.CommitCharge >= 0);
+ }
+
+ //
+ // Previously reserved pages have been committed, or an error occurred,
+ // release working set lock, address creation lock, detach,
+ // dererence process and return status.
+ //
+
+done:
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE(Process);
+
+ if (ChangeProtection) {
+ PVOID Start;
+ ULONG Size;
+ Start = StartingAddress;
+ Size = CapturedRegionSize;
+ MiProtectVirtualMemory (Process,
+ &Start,
+ &Size,
+ Protect,
+ &Size);
+ }
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = StartingAddress;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( MmWatchProcess ) {
+ if ( MmWatchProcess == PsGetCurrentProcess() ) {
+ DbgPrint("\n+++ ALLOC Type %lx Base %lx Size %lx\n",
+ AllocationType,StartingAddress, CapturedRegionSize);
+ MmFooBar();
+ }
+ } else {
+ DbgPrint("return allocvm status %lx baseaddr %lx size %lx\n",
+ Status, CapturedRegionSize, StartingAddress);
+ }
+ }
+#endif
+#if DBG
+ if (RtlAreLogging( RTL_EVENT_CLASS_VM )) {
+ RtlLogEvent( MiAllocVmEventId,
+ RTL_EVENT_CLASS_VM,
+ StartingAddress,
+ CapturedRegionSize,
+ AllocationType,
+ Protect
+ );
+
+ }
+#endif // DBG
+
+ return STATUS_SUCCESS;
+ }
+
+ErrorReturn:
+ UNLOCK_WS (Process);
+
+ErrorReturn1:
+
+ UNLOCK_ADDRESS_SPACE (Process);
+ if (Attached) {
+ KeDetachProcess();
+ }
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+ return Status;
+}
+
+NTSTATUS
+MiResetVirtualMemory (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ RegionsSize - Supplies the size.
+
+ Process - Supplies the current process.
+
+Return Value:
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE ProtoPte;
+ PMMPTE PointerPde;
+ PMMPTE LastPte;
+ MMPTE PteContents;
+ ULONG PfnHeld = FALSE;
+ ULONG First;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+
+ if (Vad->u.VadFlags.PrivateMemory == 0) {
+
+ if (Vad->ControlArea->FilePointer != NULL) {
+
+ //
+ // Only page file backed sections can be committed.
+ //
+
+ return STATUS_USER_MAPPED_FILE;
+ }
+ }
+
+ First = TRUE;
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ //
+ // Examine all the PTEs in the range.
+ //
+
+ while (PointerPte <= LastPte) {
+
+ if ((((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) ||
+ (First)) {
+
+ //
+ // Pointing to the next page table page, make
+ // a page table page exist and make it valid.
+ //
+
+ First = FALSE;
+ PointerPde = MiGetPteAddress (PointerPte);
+ if (!MiDoesPdeExistAndMakeValid(PointerPde,
+ Process,
+ (BOOLEAN)PfnHeld)) {
+
+ //
+ // This page directory entry is empty, go to the next one.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ continue;
+ }
+ }
+
+ PteContents = *PointerPte;
+ ProtoPte = NULL;
+
+ if ((PteContents.u.Hard.Valid == 0) &&
+ (PteContents.u.Soft.Prototype == 1)) {
+
+ //
+ // This is a prototype PTE, evaluate the
+ // prototype PTE.
+ //
+
+ ProtoPte = MiGetProtoPteAddress(Vad,
+ MiGetVirtualAddressMappedByPte(PointerPte));
+ if (!PfnHeld) {
+ PfnHeld = TRUE;
+ LOCK_PFN (OldIrql);
+ }
+ MiMakeSystemAddressValidPfnWs (ProtoPte, Process);
+ PteContents = *ProtoPte;
+ }
+ if (PteContents.u.Hard.Valid == 1) {
+ if (!PfnHeld) {
+ LOCK_PFN (OldIrql);
+ PfnHeld = TRUE;
+ continue;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ if (Pfn1->u3.e2.ReferenceCount == 1) {
+
+ //
+ // Only this process has the page mapped.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ if ((!ProtoPte) && (MI_IS_PTE_DIRTY (PteContents))) {
+
+ //
+ // Clear the dirty bit and flush tb if it is NOT a prototype
+ // PTE.
+ //
+
+ MI_SET_PTE_CLEAN (PteContents);
+ KeFlushSingleTb (MiGetVirtualAddressMappedByPte (PointerPte),
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ PteContents.u.Flush);
+ }
+
+ } else if (PteContents.u.Soft.Transition == 1) {
+ if (!PfnHeld) {
+ LOCK_PFN (OldIrql);
+ PfnHeld = TRUE;
+ continue;
+ }
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+ if ((Pfn1->u3.e1.PageLocation == ModifiedPageList) &&
+ (Pfn1->u3.e2.ReferenceCount == 0)) {
+
+ //
+ // Remove from the modified list, release the page
+ // file space and insert on the standby list.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ MiInsertPageInList (MmPageLocationList[StandbyPageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+ } else {
+ if (PteContents.u.Soft.PageFileHigh != 0) {
+ if (!PfnHeld) {
+ LOCK_PFN (OldIrql);
+ }
+ PfnHeld = FALSE;
+ MiReleasePageFileSpace (PteContents);
+ UNLOCK_PFN (OldIrql);
+ if (ProtoPte) {
+ ProtoPte->u.Soft.PageFileHigh = 0;
+ } else {
+ PointerPte->u.Soft.PageFileHigh = 0;
+ }
+ } else {
+ if (PfnHeld) {
+ UNLOCK_PFN (OldIrql);
+ }
+ PfnHeld = FALSE;
+ }
+ }
+ PointerPte += 1;
+ }
+ if (PfnHeld) {
+ UNLOCK_PFN (OldIrql);
+ }
+ return STATUS_SUCCESS;
+}
+
+
+//
+// Commented out, no longer used.
+//
+#if 0
+BOOLEAN
+MiIsEntireRangeDecommitted (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the range of pages from the starting address
+ up to and including the ending address and returns TRUE if every
+ page in the range is either not committed or decommitted, FALSE otherwise.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ EndingAddress - Supplies the ending address of the range.
+
+ Vad - Supplies the virtual address descriptor which describes the range.
+
+ Process - Supplies the current process.
+
+Return Value:
+
+ TRUE if the entire range is either decommitted or not committed.
+ FALSE if any page within the range is committed.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ ULONG FirstTime = TRUE;
+ PVOID Va;
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ //
+ // Set the Va to the starting address + 8, this solves problems
+ // associated with address 0 (NULL) being used as a valid virtual
+ // address and NULL in the VAD commitment field indicating no pages
+ // are committed.
+ //
+
+ Va = (PVOID)((PCHAR)StartingAddress + 8);
+
+ //
+ // A page table page exists, examine the individual PTEs to ensure
+ // none are in the committed state.
+ //
+
+ while (PointerPte <= LastPte) {
+
+ //
+ // Check to see if a page table page (PDE) exists if the PointerPte
+ // address is on a page boundary or this is the first time through
+ // the loop.
+ //
+
+ if ((((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) ||
+ (FirstTime)) {
+
+ //
+ // This is a PDE boundary, check to see if the entire
+ // PDE page exists.
+ //
+
+ FirstTime = FALSE;
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, check the VAD
+ // to see whether the pages are committed or not.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ if (PointerPte > LastPte) {
+
+ //
+ // No page table page exists, if explict commitment
+ // via VAD indicates PTEs of zero should be committed,
+ // return an error.
+ //
+
+ if (EndingAddress <= Vad->CommittedAddress) {
+
+ //
+ // The entire range is committed, return an errror.
+ //
+
+ return FALSE;
+ } else {
+
+ //
+ // All pages are decommitted, return TRUE.
+ //
+
+ return TRUE;
+ }
+ }
+
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ //
+ // Make sure the range thus far is not committed.
+ //
+
+ if (Va <= Vad->CommittedAddress) {
+
+ //
+ // This range is committed, return an errror.
+ //
+
+ return FALSE;
+ }
+ }
+ }
+
+ //
+ // The page table page exists, check each PTE for commitment.
+ //
+
+ if (PointerPte->u.Long == 0) {
+
+ //
+ // This PTE for the page is zero, check the VAD.
+ //
+
+ if (Va <= Vad->CommittedAddress) {
+
+ //
+ // The entire range is committed, return an errror.
+ //
+
+ return FALSE;
+ }
+ } else {
+
+ //
+ // Has this page been explicitly decommited?
+ //
+
+ if (!MiIsPteDecommittedPage (PointerPte)) {
+
+ //
+ // This page is committed, return an error.
+ //
+
+ return FALSE;
+ }
+ }
+ PointerPte += 1;
+ Va = (PVOID)((PCHAR)(Va) + PAGE_SIZE);
+ }
+ return TRUE;
+}
+#endif //0
+
+#if DBG
+VOID
+MmFooBar(VOID){}
+#endif
diff --git a/private/ntos/mm/alpha/datalpha.c b/private/ntos/mm/alpha/datalpha.c
new file mode 100644
index 000000000..15efd58e4
--- /dev/null
+++ b/private/ntos/mm/alpha/datalpha.c
@@ -0,0 +1,116 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ dataalpha.c
+
+Abstract:
+
+ This module contains the private hardware specific global storage for
+ the memory management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 27-Mar-1990
+ Joe Notarangelo 23-April-1992
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// A zero Pte.
+//
+
+MMPTE ZeroPte = { 0 };
+
+//
+// A kernel zero PTE.
+//
+
+MMPTE ZeroKernelPte = { 0 };
+
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_GLOBAL_MASK };
+
+MMPTE ValidUserPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_OWNER_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidPtePte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidPdePde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidKernelPde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_GLOBAL_MASK };
+
+MMPTE DemandZeroPde = { MM_READWRITE << 3 };
+
+MMPTE DemandZeroPte = { MM_READWRITE << 3 };
+
+MMPTE TransitionPde = { MM_PTE_TRANSITION_MASK | (MM_READWRITE << 3) };
+
+MMPTE PrototypePte = { 0xFFFFF000 | (MM_READWRITE << 3) | MM_PTE_PROTOTYPE_MASK };
+
+//
+// PTE which generates an access violation when referenced.
+//
+
+MMPTE NoAccessPte = {MM_NOACCESS << 3};
+
+//
+// Pool start and end.
+//
+
+PVOID MmNonPagedPoolStart;
+
+PVOID MmNonPagedPoolEnd = (PVOID)(MM_NONPAGED_POOL_END);
+
+PVOID MmPagedPoolStart = (PVOID)(MM_PAGED_POOL_START);
+
+PVOID MmPagedPoolEnd;
+
+//
+// PTE reserved for mapping physical data for debugger.
+//
+
+PMMPTE MmDebugPte = MiGetPteAddress( 0xfffdf000 );
+
+//
+// 16 PTEs reserved for mapping MDLs (128k max).
+//
+
+PMMPTE MmCrashDumpPte = (MiGetPteAddress(MM_NONPAGED_POOL_END));
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST};
+
+ULONG MmSecondaryColorMask;
+
+//
+// Color tables for modified pages destined for the paging file.
+//
+
+ULONG MmTotalPagesForPagingFile;
+
diff --git a/private/ntos/mm/alpha/debugsup.c b/private/ntos/mm/alpha/debugsup.c
new file mode 100644
index 000000000..bab567a74
--- /dev/null
+++ b/private/ntos/mm/alpha/debugsup.c
@@ -0,0 +1,193 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ debugsup.c
+
+Abstract:
+
+ This module contains routines which provide support for the
+ kernel debugger.
+
+Author:
+
+ Lou Perazzoli (loup) 02-Aug-90
+ Joe Notarangelo 23-Apr-1992
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PVOID
+MmDbgReadCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+
+ ALPHA implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for read access.
+
+ If the address is valid and readable and not within KSEG0
+ the physical address within KSEG0 is returned. If the adddress
+ is within KSEG0 then the called address is returned.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the physical address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgWriteCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ ALPHA implementation specific:
+
+ This routine returns the phyiscal address for a virtual address
+ which is valid (mapped) for write access.
+
+ If the address is valid and writable and not within KSEG0
+ the physical address within KSEG0 is returned. If the adddress
+ is within KSEG0 then the called address is returned.
+
+ NOTE: The physical address must only be used while the interrupt
+ level on ALL processors is above DISPATCH_LEVEL, otherwise the
+ binding between the virtual address and the physical address can
+ change due to paging.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the physical address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ if ((VirtualAddress <= MM_HIGHEST_USER_ADDRESS) &&
+ (PointerPte->u.Hard.PageFrameNumber < MM_PAGES_IN_KSEG0)) {
+
+ //
+ // User mode - return the phyiscal address. This prevents
+ // copy on write faults for breakpoints on user-mode pages.
+ // IGNORE write protection.
+ //
+ // N.B. - The physical address must be less than 1GB to allow this
+ // short-cut mapping.
+ //
+
+ return (PVOID)
+ ((ULONG)MmGetPhysicalAddress(VirtualAddress).LowPart + KSEG0_BASE);
+ }
+
+ if (PointerPte->u.Hard.Write == 0) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgTranslatePhysicalAddress (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ ALPHA implementation specific:
+
+ This routine maps the specified physical address and returns
+ the virtual address which maps the physical address.
+
+ The next call to MmDbgTranslatePhyiscalAddress removes the
+ previous phyiscal address translation, hence on a single
+ physical address can be examined at a time (can't cross page
+ boundaries).
+
+Arguments:
+
+ PhysicalAddress - Supplies the phyiscal address to map and translate.
+
+Return Value:
+
+ The virtual address which corresponds to the phyiscal address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PVOID BaseAddress;
+ LARGE_INTEGER LiTmp;
+
+ BaseAddress = MiGetVirtualAddressMappedByPte (MmDebugPte);
+
+ KiFlushSingleTb (TRUE, BaseAddress);
+
+ *MmDebugPte = ValidKernelPte;
+ LiTmp.QuadPart = PhysicalAddress.QuadPart >> PAGE_SHIFT;
+ MmDebugPte->u.Hard.PageFrameNumber = LiTmp.LowPart;
+
+ return (PVOID)((ULONG)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart));
+}
diff --git a/private/ntos/mm/alpha/hypermap.c b/private/ntos/mm/alpha/hypermap.c
new file mode 100644
index 000000000..803bf6ff3
--- /dev/null
+++ b/private/ntos/mm/alpha/hypermap.c
@@ -0,0 +1,382 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ hypermap.c
+
+Abstract:
+
+ This module contains the routines which map physical pages into
+ reserved PTEs within hyper space.
+
+ This module is machine dependent. This version is targetted
+ for ALPHA uses KSEG0 (32bit super-page) to map the pages at their physical
+ addresses.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+ Joe Notarangelo 23-Apr-1992 ALPHA version from MIPS version
+
+Revision History:
+
+ Chao Chen 21-Aug-1995 Fixed accessing pages above 1 gig problem through
+ hyperspace.
+
+--*/
+
+#include "mi.h"
+
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ IN PKIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns the physical address of the page.
+
+ ************************************
+ * *
+ * Returns with a spin lock held!!! *
+ * *
+ ************************************
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the address where the requested page was mapped.
+
+ RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
+
+ The routine MiUnmapHyperSpaceMap MUST be called to release the lock!!!!
+
+Environment:
+
+ kernel mode.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ ULONG offset;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // If the page is below 1GB physical, then it can be mapped via
+ // KSEG0.
+ //
+
+ LOCK_HYPERSPACE (OldIrql);
+
+ if (PageFrameIndex < MM_PAGES_IN_KSEG0) {
+ return (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+
+ PointerPte = MmFirstReservedMappingPte;
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // All the reserved PTEs have been used, make
+ // them all invalid.
+ //
+
+ MI_MAKING_MULTIPLE_PTES_INVALID (FALSE);
+
+ RtlZeroMemory (MmFirstReservedMappingPte,
+ (NUMBER_OF_MAPPING_PTES + 1) * sizeof(MMPTE));
+
+ //
+ // Use the page frame number field of the first PTE as an
+ // offset into the available mapping PTEs.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
+
+ //
+ // Flush entire TB only on this processor.
+ //
+
+ KeFlushEntireTb (TRUE, FALSE);
+ }
+
+ //
+ // Get offset to first free PTE.
+ //
+
+ offset = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Change offset for next time through.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = offset - 1;
+
+ //
+ // Point to free entry and make it valid.
+ //
+
+ PointerPte += offset;
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ //
+ // Return the VA that map the page.
+ //
+
+ return MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ The physical address of the specified page is returned.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+#if DBG
+
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte->u.Long != 0) {
+
+ //
+ // If there is no event specified, set one up.
+ //
+
+ if (MmWorkingSetList->WaitingForImageMapping == (PKEVENT)NULL) {
+
+ //
+ // Set the global event into the field and wait for it.
+ //
+
+ MmWorkingSetList->WaitingForImageMapping = &MmImageMappingPteEvent;
+ }
+
+ //
+ // Release the PFN lock and wait on the event in an
+ // atomic operation.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(MmWorkingSetList->WaitingForImageMapping,
+ Executive,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ return (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure unmaps the PTE reserved for mapping the image
+ header, flushes the TB, and, if the WaitingForImageMapping field
+ is not NULL, sets the specified event.
+
+ On ALPHA, no action is required as the super-page address of the page
+ was used.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PKEVENT Event;
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ TempPte.u.Long = 0;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the current state of the event field and clear it out.
+ //
+
+ Event = MmWorkingSetList->WaitingForImageMapping;
+
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+
+ ASSERT (PointerPte->u.Long != 0);
+
+ KeFlushSingleTb (IMAGE_MAPPING_PTE, TRUE, FALSE,
+ (PHARDWARE_PTE)PointerPte, TempPte.u.Hard);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != (PKEVENT)NULL) {
+
+ //
+ // If there was an event specified, set the event.
+ //
+
+ KePulseEvent (Event, 0, FALSE);
+ }
+
+ return;
+}
+
+
+PVOID
+MiMapPageToZeroInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and returns the virtual address which maps the page.
+
+ NOTE: it maps it into the same location reserved for fork operations!!
+ This is only to be used by the zeroing page thread.
+
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Must be holding the PFN lock.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PVOID MappedAddress;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // If the page is below 1GB physical then it can be mapped via
+ // KSEG0.
+ //
+
+ if (PageFrameIndex < MM_PAGES_IN_KSEG0) {
+ return (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+
+ MM_PFN_LOCK_ASSERT();
+
+ PointerPte = MiGetPteAddress (ZEROING_PAGE_PTE);
+ MappedAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ TempPte.u.Long = 0;
+
+ KeFlushSingleTb (MappedAddress,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte, TempPte.u.Hard);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ return MappedAddress;
+}
diff --git a/private/ntos/mm/alpha/inialpha.c b/private/ntos/mm/alpha/inialpha.c
new file mode 100644
index 000000000..79e5fe6fa
--- /dev/null
+++ b/private/ntos/mm/alpha/inialpha.c
@@ -0,0 +1,1183 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ initalpha.c
+
+Abstract:
+
+ This module contains the machine dependent initialization for the
+ memory management component. It is specifically tailored to the
+ ALPHA architecture.
+
+Author:
+
+ Lou Perazzoli (loup) 3-Apr-1990
+ Joe Notarangelo 23-Apr-1992 ALPHA version
+
+Revision History:
+
+ Chao Chen 21-Aug-1995 Use a backup memory descriptor for building of
+ non-paged pool and other data structures.
+
+--*/
+
+#include "mi.h"
+
+//
+// Local definitions
+//
+
+#define _1MB (0x100000)
+#define _16MB (0x1000000)
+#define _24MB (0x1800000)
+#define _32MB (0x2000000)
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs the necessary operations to enable virtual
+ memory. This includes building the page directory page, building
+ page table pages to map the code section, the data section, the'
+ stack section and the trap handler.
+
+ It also initializes the PFN database and populates the free list.
+
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPFN BasePfn;
+ PMMPFN BottomPfn;
+ PMMPFN TopPfn;
+ BOOLEAN PfnInKseg0;
+ BOOLEAN LowMemoryReserved=FALSE;
+ ULONG i, j;
+ ULONG HighPage;
+ ULONG PagesLeft;
+ ULONG PageNumber;
+ ULONG PdePageNumber;
+ ULONG PdePage;
+ ULONG PageFrameIndex;
+ ULONG NextPhysicalPage;
+ ULONG PfnAllocation;
+ ULONG NumberOfPages;
+ PEPROCESS CurrentProcess;
+ PVOID SpinLockPage;
+ ULONG MostFreePage = 0;
+ ULONG MostFreeLowMem = 0;
+ PLIST_ENTRY NextMd;
+ ULONG MaxPool;
+ KIRQL OldIrql;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MMPTE TempPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE CacheStackPage;
+ PMMPTE Pde;
+ PMMPTE StartPde;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PULONG PointerLong;
+ CHAR Buffer[256];
+ PMMFREE_POOL_ENTRY Entry;
+ PVOID NonPagedPoolStartVirtual;
+ ULONG Range;
+
+
+ PointerPte = MiGetPdeAddress (PDE_BASE);
+
+ PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
+
+ PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PointerPte->u.Long;
+
+ KeSweepDcache( FALSE );
+
+ //
+ // Get the lower bound of the free physical memory and the
+ // number of physical pages by walking the memory descriptor lists.
+ // In addition, find the memory descriptor with the most free pages
+ // that begins at a physical address less than 16MB. The 16 MB
+ // boundary is necessary for allocating common buffers for use by
+ // ISA devices that cannot address more than 24 bits.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount-1;
+ MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
+
+ if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
+ MmLowestPhysicalPage = MemoryDescriptor->BasePage;
+ }
+
+ if (HighPage > MmHighestPhysicalPage) {
+ MmHighestPhysicalPage = HighPage;
+ }
+
+ //
+ // Locate the largest free block starting below 16 megs
+ // and the largest free block.
+ //
+
+ if ((MemoryDescriptor->MemoryType == LoaderFree) ||
+ (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
+ (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
+ (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
+
+ if ((MemoryDescriptor->PageCount > MostFreeLowMem) &&
+ (MemoryDescriptor->BasePage < (_16MB >> PAGE_SHIFT)) &&
+ (HighPage < MM_PAGES_IN_KSEG0)) {
+
+ MostFreeLowMem = MemoryDescriptor->PageCount;
+ FreeDescriptorLowMem = MemoryDescriptor;
+
+ } else if (MemoryDescriptor->PageCount > MostFreePage) {
+
+ MostFreePage = MemoryDescriptor->PageCount;
+ FreeDescriptor = MemoryDescriptor;
+ }
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // Perform sanity checks on the results of walking the memory
+ // descriptors.
+ //
+
+ if (MmNumberOfPhysicalPages < 1024) {
+ KeBugCheckEx (INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MmLowestPhysicalPage,
+ MmHighestPhysicalPage,
+ 0);
+ }
+
+ if (FreeDescriptorLowMem == NULL){
+ HalDisplayString("MmInit *** FATAL ERROR *** no free descriptors that begin below physical address 16MB\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+
+ //
+ // Used later to build nonpaged pool.
+ //
+
+ NextPhysicalPage = FreeDescriptorLowMem->BasePage;
+ NumberOfPages = FreeDescriptorLowMem->PageCount;
+
+ //
+ // Build non-paged pool using the physical pages following the
+ // data page in which to build the pool from. Non-page pool grows
+ // from the high range of the virtual address space and expands
+ // downward.
+ //
+ // At this time non-paged pool is constructed so virtual addresses
+ // are also physically contiguous.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ (7 * (MmNumberOfPhysicalPages << 3))) {
+
+ //
+ // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = 0;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
+
+ //
+ // Calculate the size of nonpaged pool. If 8mb or less use
+ // the minimum size, then for every MB above 8mb add extra
+ // pages.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
+
+ if (MmNumberOfPhysicalPages > 1024) {
+ MmSizeOfNonPagedPoolInBytes +=
+ ( (MmNumberOfPhysicalPages - 1024) /
+ (_1MB >> PAGE_SHIFT) ) *
+ MmMinAdditionNonPagedPoolPerMb;
+ }
+ }
+
+ //
+ // Align to page size boundary.
+ //
+
+ MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
+
+ //
+ // Limit initial nonpaged pool size to MM_MAX_INITIAL_NONPAGED_POOL
+ //
+
+ if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL ){
+ MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
+ }
+
+ //
+ // If the non-paged pool that we want to allocate will not fit in
+ // the free memory descriptor that we have available then recompute
+ // the size of non-paged pool to be the size of the free memory
+ // descriptor. If the free memory descriptor cannot fit the
+ // minimum non-paged pool size (MmMinimumNonPagedPoolSize) then we
+ // cannot boot the operating system.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ NumberOfPages ) {
+
+ //
+ // Reserve all of low memory for nonpaged pool.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT;
+ LowMemoryReserved = TRUE;
+
+ //
+ // Switch to backup descriptor for all other allocations.
+ //
+
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+
+ if( MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize ){
+ HalDisplayString("MmInit *** FATAL ERROR *** cannot allocate non-paged pool\n");
+ sprintf(Buffer,
+ "Largest description = %d pages, require %d pages\n",
+ NumberOfPages,
+ MmMinimumNonPagedPoolSize >> PAGE_SHIFT);
+ HalDisplayString( Buffer );
+ KeBugCheck (MEMORY_MANAGEMENT);
+
+ }
+ }
+
+ //
+ // Calculate the maximum size of pool.
+ //
+
+ if (MmMaximumNonPagedPoolInBytes == 0) {
+
+ //
+ // Calculate the size of nonpaged pool. If 8mb or less use
+ // the minimum size, then for every MB above 8mb add extra
+ // pages.
+ //
+
+ MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
+
+ //
+ // Make sure enough expansion for pfn database exists.
+ //
+
+ MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmNumberOfPhysicalPages > 1024) {
+ MmMaximumNonPagedPoolInBytes +=
+ ( (MmNumberOfPhysicalPages - 1024) /
+ (_1MB >> PAGE_SHIFT) ) *
+ MmMaxAdditionNonPagedPoolPerMb;
+ }
+ if (MmMaximumNonPagedPoolInBytes > MM_MAX_DEFAULT_NONPAGED_POOL) {
+ MmMaximumNonPagedPoolInBytes = MM_MAX_DEFAULT_NONPAGED_POOL;
+ }
+ }
+
+ MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
+ (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmMaximumNonPagedPoolInBytes < MaxPool) {
+ MmMaximumNonPagedPoolInBytes = MaxPool;
+ }
+
+ //
+ // Limit maximum nonpaged pool to MM_MAX_ADDITIONAL_NONPAGED_POOL.
+ //
+
+ if( MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL ){
+ MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
+ }
+
+ MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
+ - (MmMaximumNonPagedPoolInBytes - 1));
+
+ MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
+ NonPagedPoolStartVirtual = MmNonPagedPoolStart;
+
+
+ //
+ // Calculate the starting PDE for the system PTE pool which is
+ // right below the nonpaged pool.
+ //
+
+ MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
+ (~PAGE_DIRECTORY_MASK));
+
+ if( MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START ){
+ MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
+ MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
+ (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
+ ASSERT (MmNumberOfSystemPtes > 1000);
+ }
+
+ //
+ // Set the global bit for all PDEs in system space.
+ //
+
+ StartPde = MiGetPdeAddress( MM_SYSTEM_SPACE_START );
+ EndPde = MiGetPdeAddress( MM_SYSTEM_SPACE_END );
+
+ while( StartPde <= EndPde ){
+ if( StartPde->u.Hard.Global == 0 ){
+
+ //
+ // Set the Global bit.
+ //
+
+ TempPte = *StartPde;
+ TempPte.u.Hard.Global = 1;
+ *StartPde = TempPte;
+
+ }
+ StartPde += 1;
+ }
+
+ StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
+
+ EndPde = MiGetPdeAddress (MmNonPagedPoolEnd);
+
+ ASSERT ((EndPde - StartPde) < (LONG)NumberOfPages);
+
+ TempPte = ValidKernelPte;
+
+ while (StartPde <= EndPde) {
+ if (StartPde->u.Hard.Valid == 0) {
+
+ //
+ // Map in a page directory page.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NumberOfPages -= 1;
+ NextPhysicalPage += 1;
+ *StartPde = TempPte;
+
+ }
+ StartPde += 1;
+ }
+
+ //
+ // Zero the PTEs before non-paged pool.
+ //
+
+ StartPde = MiGetPteAddress( MmNonPagedSystemStart );
+ PointerPte = MiGetPteAddress( MmNonPagedPoolStart );
+
+ RtlZeroMemory( StartPde, (ULONG)PointerPte - (ULONG)StartPde );
+
+ //
+ // Fill in the PTEs for non-paged pool.
+ //
+
+ PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
+ LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes - 1);
+
+ if (!LowMemoryReserved) {
+
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+
+ } else {
+
+ ULONG ReservedPage = FreeDescriptorLowMem->BasePage;
+
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = ReservedPage;
+ ReservedPage += 1;
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+ }
+
+ //
+ // Zero the remaining PTEs for non-paged pool maximum.
+ //
+
+ LastPte = MiGetPteAddress( (ULONG)MmNonPagedPoolStart +
+ MmMaximumNonPagedPoolInBytes - 1);
+
+ while( PointerPte <= LastPte ){
+ *PointerPte = ZeroKernelPte;
+ PointerPte++;
+ }
+
+ //
+ // Zero the remaining PTEs (if any).
+ //
+
+ while (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
+ *PointerPte = ZeroKernelPte;
+ PointerPte++;
+ }
+
+ PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
+ MmNonPagedPoolStart = (PVOID)((PointerPte->u.Hard.PageFrameNumber << PAGE_SHIFT) |
+ KSEG0_BASE);
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ MmSubsectionBase = (ULONG)MmNonPagedPoolStart;
+ if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) {
+ MmSubsectionBase = KSEG0_BASE;
+ MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT;
+ }
+
+ //
+ // Non-paged pages now exist, build the pool structures.
+ //
+
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
+ MmSizeOfNonPagedPoolInBytes);
+
+ MiInitializeNonPagedPool (NonPagedPoolStartVirtual);
+
+ //
+ // Before Non-paged pool can be used, the PFN database must
+ // be built. This is due to the fact that the start and end of
+ // allocation bits for nonpaged pool are maintained in the
+ // PFN elements for the corresponding pages.
+ //
+
+ //
+ // Calculate the number of pages required from page zero to
+ // the highest page.
+ //
+ // Get the number of secondary colors and add the arrary for tracking
+ // secondary colors to the end of the PFN database.
+ //
+
+ if (MmSecondaryColors == 0) {
+ MmSecondaryColors = PCR->SecondLevelCacheSize;
+ }
+
+ MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
+
+ //
+ // Make sure value is power of two and within limits.
+ //
+
+ if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
+ (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
+ (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ }
+
+ MmSecondaryColorMask = MmSecondaryColors - 1;
+
+ PfnAllocation = 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
+ (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
+ >> PAGE_SHIFT);
+
+ //
+ // If the number of pages remaining in the current descriptor is
+ // greater than the number of pages needed for the PFN database,
+ // and the descriptor is for memory below 1 gig, then allocate the
+ // PFN database from the current free descriptor.
+ // Note: FW creates a new memory descriptor for any memory above 1GB.
+ // Thus we don't need to worry if the highest page will go beyond 1GB for
+ // this memory descriptor.
+ //
+
+ if ((NumberOfPages >= PfnAllocation) &&
+ (NextPhysicalPage < MM_PAGES_IN_KSEG0)) {
+
+ //
+ // Allocate the PFN database in kseg0.
+ //
+ // Compute the address of the PFN by allocating the appropriate
+ // number of pages from the end of the free descriptor.
+ //
+
+ PfnInKseg0 = TRUE;
+ HighPage = NextPhysicalPage + NumberOfPages;
+ MmPfnDatabase = (PMMPFN)(KSEG0_BASE |
+ ((HighPage - PfnAllocation) << PAGE_SHIFT));
+ RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE);
+
+ //
+ // Mark off the chunk of memory used for the PFN database.
+ //
+
+ NumberOfPages -= PfnAllocation;
+
+ if (NextPhysicalPage >= FreeDescriptorLowMem->BasePage &&
+ NextPhysicalPage < (FreeDescriptorLowMem->BasePage +
+ FreeDescriptorLowMem->PageCount)) {
+
+ //
+ // We haven't used the other descriptor.
+ //
+
+ FreeDescriptorLowMem->PageCount -= PfnAllocation;
+
+ } else {
+
+ FreeDescriptor->PageCount -= PfnAllocation;
+ }
+
+ } else {
+
+ //
+ // Calculate the start of the Pfn Database (it starts a physical
+ // page zero, even if the Lowest physical page is not zero).
+ //
+
+ PfnInKseg0 = FALSE;
+ PointerPte = MiReserveSystemPtes (PfnAllocation,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+
+ MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
+
+ //
+ // Go through the memory descriptors and for each physical page
+ // make the PFN database has a valid PTE to map it. This allows
+ // machines with sparse physical memory to have a minimal PFN
+ // database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage));
+
+ LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage +
+ MemoryDescriptor->PageCount))) - 1);
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+ }
+
+ //
+ // Initialize support for colored pages.
+ //
+
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
+ &MmPfnDatabase[MmHighestPhysicalPage + 1];
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ //
+ // Make sure the PTEs are mapped.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
+
+ PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
+
+ LastPte = MiGetPteAddress (
+ (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors]-1));
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ for (i = 0; i < MmSecondaryColors; i++) {
+ MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
+ MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
+ }
+#endif
+
+ //
+ // Go through the page table entries and for any page which is
+ // valid, update the corresponding PFN database element.
+ //
+
+ PointerPde = MiGetPdeAddress (PTE_BASE);
+
+ PdePage = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PdePage;
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde));
+
+ //
+ // Add the pages which were used to construct nonpaged pool to
+ // the pfn database.
+ //
+
+ Pde = MiGetPdeAddress ((ULONG)NonPagedPoolStartVirtual -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE));
+
+ EndPde = MiGetPdeAddress(NON_PAGED_SYSTEM_END);
+
+ while (Pde <= EndPde) {
+ if (Pde->u.Hard.Valid == 1) {
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde));
+
+ PointerPte = MiGetVirtualAddressMappedByPte (Pde);
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn2->PteFrame = PdePage;
+ Pfn2->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
+ Pfn2->u2.ShareCount += 1;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+
+ Pfn2->PteAddress =
+ (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT));
+
+ Pfn2->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn2->PteAddress));
+ }
+ PointerPte++;
+ }
+ }
+ Pde++;
+ }
+
+ //
+ // If page zero is still unused, mark it as in use. This is
+ // temporary as we want to find bugs where a physical page
+ // is specified as zero.
+ //
+
+ Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Make the reference count non-zero and point it into a
+ // page directory.
+ //
+
+ Pde = MiGetPdeAddress (0xb0000000);
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde));
+ }
+
+ // end of temporary set to physical page zero.
+
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ i = MemoryDescriptor->PageCount;
+ NextPhysicalPage = MemoryDescriptor->BasePage;
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderBad:
+ while (i != 0) {
+ MiInsertPageInList (MmPageLocationList[BadPageList],
+ NextPhysicalPage);
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress =
+ (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ default:
+
+ PointerPte = MiGetPteAddress (KSEG0_BASE |
+ (NextPhysicalPage << PAGE_SHIFT));
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ //
+ // Set page as in use.
+ //
+
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ PointerPte += 1;
+ }
+
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // Indicate that the PFN database is allocated in NonPaged pool.
+ //
+ if (PfnInKseg0 == FALSE) {
+
+ //
+ // The PFN database is allocated in virtual memory
+ //
+ // Set the start and end of allocation.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ } else {
+
+ //
+ // The PFN database is allocated in KSEG0.
+ //
+ // Mark all pfn entries for the pfn pages in use.
+ //
+
+ PageNumber = ((ULONG)MmPfnDatabase - KSEG0_BASE) >> PAGE_SHIFT;
+ Pfn1 = MI_PFN_ELEMENT(PageNumber);
+ do {
+ Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
+ Pfn1 += 1;
+ PfnAllocation -= 1;
+ } while ( PfnAllocation != 0 );
+
+ //
+ // Scan the PFN database backward for pages that are completely zero.
+ // These pages are unused and can be added to the free list
+ //
+
+ BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
+ do {
+
+ //
+ // Compute the address of the start of the page that is next
+ // lower in memory and scan backwards until that page address
+ // is reached or just crossed.
+ //
+
+ if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1));
+ TopPfn = BottomPfn + 1;
+
+ } else {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE);
+ TopPfn = BottomPfn;
+ }
+
+ while (BottomPfn > BasePfn) {
+ BottomPfn -= 1;
+ }
+
+ //
+ // If the entire range over which the PFN entries span is
+ // completely zero and the PFN entry that maps the page is
+ // not in the range, then add the page to the appropriate
+ // free list.
+ //
+
+ Range = (ULONG)TopPfn - (ULONG)BottomPfn;
+ if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
+
+ //
+ // Set the PTE address to the physical page for
+ // virtual address alignment checking.
+ //
+
+ PageNumber = ((ULONG)BasePfn - KSEG0_BASE) >> PAGE_SHIFT;
+ Pfn1 = MI_PFN_ELEMENT(PageNumber);
+
+ ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
+
+ PfnAllocation += 1;
+
+ Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor =
+ MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress));
+
+ MiInsertPageInList(MmPageLocationList[FreePageList],
+ PageNumber);
+ }
+
+ } while ( BottomPfn > MmPfnDatabase );
+ }
+
+ //
+ // Indicate that nonpaged pool must succeed is allocated in
+ // nonpaged pool.
+ //
+
+ i = MmSizeOfNonPagedMustSucceed;
+ Pfn1 = MI_PFN_ELEMENT(MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedMustSucceed));
+
+ while ((LONG)i > 0) {
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ i -= PAGE_SIZE;
+ Pfn1 += 1;
+ }
+
+ KeInitializeSpinLock (&MmSystemSpaceLock);
+ KeInitializeSpinLock (&MmPfnLock);
+
+ //
+ // Initialize the nonpaged available PTEs for mapping I/O space
+ // and kernel stacks.
+ //
+
+ PointerPte = MiGetPteAddress ((ULONG)NonPagedPoolStartVirtual -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE));
+
+ PointerPte = (PMMPTE)PAGE_ALIGN (PointerPte);
+
+ if (PfnInKseg0) {
+ MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1;
+ } else {
+ MmNumberOfSystemPtes = MiGetPteAddress(NonPagedPoolStartVirtual) - PointerPte - 1;
+ }
+
+ MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
+
+ //
+ // Initialize the nonpaged pool.
+ //
+
+ InitializePool (NonPagedPool, 0);
+
+ //
+ // Initialize memory management structures for this process.
+ //
+
+ //
+ // Build working set list. System initialization has created
+ // a PTE for hyperspace.
+ //
+ // Note, we can't remove a zeroed page as hyper space does not
+ // exist and we map non-zeroed pages into hyper space to zero.
+ //
+
+ PointerPte = MiGetPdeAddress(HYPER_SPACE);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PointerPte->u.Hard.Global = 0;
+ PointerPte->u.Hard.Write = 1;
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Point to the page table page we just created and zero it.
+ //
+
+
+// KeFillEntryTb ((PHARDWARE_PTE)PointerPte,
+// MiGetPteAddress(HYPER_SPACE),
+// TRUE);
+
+ PointerPte = MiGetPteAddress(HYPER_SPACE);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ //
+ // Hyper space now exists, set the necessary variables.
+ //
+
+ MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
+ MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
+
+ MmWorkingSetList = WORKING_SET_LIST;
+ MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL));
+
+ //
+ // Initialize this process's memory management structures including
+ // the working set list.
+ //
+
+ //
+ // The pfn element for the page directory has already been initialized,
+ // zero the reference count and the share count so they won't be
+ // wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+
+ //
+ // The pfn element for the PDE which maps hyperspace has already
+ // been initialized, zero the reference count and the share count
+ // so they won't be wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Get a page for the working set list and map it into the Page
+ // directory at the page after hyperspace.
+ //
+
+ PointerPte = MiGetPteAddress (HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte));
+
+ CurrentProcess->WorkingSetPage = PageFrameIndex;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
+
+ //
+ // Assert that the double mapped pages have the same alignment.
+ //
+
+ ASSERT ((PointerPte->u.Long & (0xF << PTE_SHIFT)) ==
+ (PointerPde->u.Long & (0xF << PTE_SHIFT)));
+
+ *PointerPde = TempPte;
+ PointerPde->u.Hard.Global = 0;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPde,
+ PointerPte,
+ TRUE);
+
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ TempPte = *PointerPde;
+ TempPte.u.Hard.Valid = 0;
+ TempPte.u.Hard.Global = 0;
+
+ KeFlushSingleTb (PointerPte,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPde,
+ TempPte.u.Hard);
+
+ KeLowerIrql(OldIrql);
+
+ //
+ // Initialize hyperspace for this process.
+ //
+
+ PointerPte = MmFirstReservedMappingPte;
+ PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
+
+ CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
+ CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
+
+ MmInitializeProcessAddressSpace (CurrentProcess,
+ (PEPROCESS)NULL,
+ (PVOID)NULL);
+
+ *PointerPde = ZeroKernelPte;
+
+ //
+ // Check to see if moving the secondary page structures to the end
+ // of the PFN database is a waste of memory. And if so, copy it
+ // to paged pool.
+ //
+ // If the PFN datbase ends on a page aligned boundary and the
+ // size of the two arrays is less than a page, free the page
+ // and allocate nonpagedpool for this.
+ //
+
+ if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
+ ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
+
+ PMMCOLOR_TABLES c;
+
+ c = MmFreePagesByColor[0];
+
+ MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
+ ' mM');
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ RtlMoveMemory (MmFreePagesByColor[0],
+ c,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
+
+ //
+ // Free the page.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(c)) {
+ PointerPte = MiGetPteAddress(c);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *PointerPte = ZeroKernelPte;
+ } else {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ }
+
+ return;
+}
diff --git a/private/ntos/mm/alpha/mialpha.h b/private/ntos/mm/alpha/mialpha.h
new file mode 100644
index 000000000..0ba5addfe
--- /dev/null
+++ b/private/ntos/mm/alpha/mialpha.h
@@ -0,0 +1,2048 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ mialpha.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ It is specifically tailored for the DEC ALPHA architecture.
+
+Author:
+ Lou Perazzoli (loup) 12-Mar-1990
+ Joe Notarangelo 23-Apr-1992 ALPHA version
+
+Revision History:
+
+--*/
+
+/*++
+
+ Virtual Memory Layout on an ALPHA is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7ffff000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | | KSEG_0
+ | HAL loads kernel and initial |
+ | boot drivers in first 16mb |
+ | of this region. |
+ | Kernel mode access only. |
+ | |
+ | Initial NonPaged Pool is within |
+ | KEG_0 |
+ | |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 16mb region |
+ | Kernel mode access only. |
+ | (only using 2MB) |
+ +------------------------------------+
+ C1000000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 16mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C2000000 | No Access Region (16MB) |
+ | |
+ | |
+ +------------------------------------+
+ C3000000 | System Cache Structures |
+ | reside in this 16mb region |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C4000000 | System cache resides here. |
+ | Kernel mode access only. |
+ | |
+ | |
+ +------------------------------------+
+ DE000000 | System mapped views |
+ | |
+ | |
+ +------------------------------------+
+ E1000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ | |
+ F0000000 +------------------------------------+
+ | |
+ | Kernel mode access only. |
+ | |
+ | |
+ | NonPaged System area |
+ +------------------------------------+
+ FE000000 | |
+ | Reserved for the HAL. |
+ | |
+ | |
+ FFFFFFFF | |
+ +------------------------------------+
+
+--*/
+
+//
+// Address space definitions.
+//
+
+#define MmProtopte_Base ((ULONG)0xE1000000)
+
+#define PDE_TOP (0xC01FFFFF)
+
+#define MM_PAGES_IN_KSEG0 (((ULONG)KSEG2_BASE - (ULONG)KSEG0_BASE) >> PAGE_SHIFT)
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define MM_SYSTEM_SPACE_START (0xC3000000)
+
+#define MM_SYSTEM_CACHE_START (0xC4000000)
+
+#define MM_SYSTEM_CACHE_END (0xDE000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
+ ( ((ULONG)MM_SYSTEM_CACHE_END - (ULONG)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT )
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0xC3000000)
+
+//
+// Define area for mapping views into system space.
+//
+
+#define MM_SYSTEM_VIEW_START (0xDE000000)
+
+#define MM_SYSTEM_VIEW_SIZE (48*1024*1024)
+
+#define MM_PAGED_POOL_START ((PVOID)0xE1000000)
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)0xEB000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xFE000000-(16*PAGE_SIZE)))
+
+#define NON_PAGED_SYSTEM_END ((PVOID)0xFFFFFFF0) //quadword aligned.
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define HYPER_SPACE_END (0xC1FFFFFF)
+
+//
+// Define absolute minumum and maximum count for system ptes.
+//
+
+#define MM_MINIMUM_SYSTEM_PTES 5000
+
+#define MM_MAXIMUM_SYSTEM_PTES 20000
+
+#define MM_DEFAULT_SYSTEM_PTES 11000
+
+//
+// Pool limits.
+//
+
+//
+// The maximum amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(96*1024*1024))
+
+//
+// The total amount of nonpaged pool
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)((256*1024*1024)-16))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(240*1024*1024))
+
+//
+// Define the maximum default for pool (user specified 0 in registry).
+//
+
+#define MM_MAX_DEFAULT_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+#define MM_MAX_DEFAULT_PAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The maximum total pool.
+//
+
+#define MM_MAX_TOTAL_POOL \
+ (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)MM_PAGED_POOL_START))
+
+//
+// Granularity Hint definitions
+//
+
+//
+// Granularity Hint = 3, page size = 8**3 * PAGE_SIZE
+//
+
+#define GH3 (3)
+#define GH3_PAGE_SIZE (PAGE_SIZE << 9)
+
+//
+// Granularity Hint = 2, page size = 8**2 * PAGE_SIZE
+//
+
+#define GH2 (2)
+#define GH2_PAGE_SIZE (PAGE_SIZE << 6)
+
+//
+// Granularity Hint = 1, page size = 8**1 * PAGE_SIZE
+//
+
+#define GH1 (1)
+#define GH1_PAGE_SIZE (PAGE_SIZE << 3)
+
+//
+// Granularity Hint = 0, page size = PAGE_SIZE
+//
+
+#define GH0 (0)
+#define GH0_PAGE_SIZE PAGE_SIZE
+
+
+//
+// Physical memory size and boundary constants.
+//
+
+#define __1GB (0x40000000)
+
+//
+// PAGE_SIZE for ALPHA (at least current implementation) is 8k
+// PAGE_SHIFT bytes for an offset leaves 19
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT (19)
+
+
+#define MM_PROTO_PTE_ALIGNMENT ((ULONG)MM_MAXIMUM_NUMBER_OF_COLORS * (ULONG)PAGE_SIZE)
+
+//
+// Define maximum number of paging files
+//
+
+#define MAX_PAGE_FILES (8)
+
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x00FFFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x1000000)
+
+#define LOWEST_IO_ADDRESS (0)
+
+#define PTE_SHIFT (2)
+
+//
+// Number of physical address bits, maximum for ALPHA architecture = 48.
+//
+
+#define PHYSICAL_ADDRESS_BITS (48)
+
+#define MM_MAXIMUM_NUMBER_OF_COLORS (1)
+
+//
+// i386 does not require support for colored pages.
+//
+
+#define MM_NUMBER_OF_COLORS (1)
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK (0)
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT (0)
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL (0)
+
+//
+// Define 1mb worth of secondary colors.
+//
+
+#define MM_SECONDARY_COLORS_DEFAULT ((1024*1024) >> PAGE_SHIFT)
+
+#define MM_SECONDARY_COLORS_MIN (2)
+
+#define MM_SECONDARY_COLORS_MAX (2048)
+
+//
+// Mask for isolating secondary color from physical page number;
+//
+
+extern ULONG MmSecondaryColorMask;
+
+//
+// Hyper space definitions.
+//
+
+#define HYPER_SPACE ((PVOID)0xC1000000)
+
+#define FIRST_MAPPING_PTE ((ULONG)0xC1000000)
+
+#define NUMBER_OF_MAPPING_PTES (1023)
+
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_MAXIMUM_WORKING_SET \
+ ((ULONG)((ULONG)2*1024*1024*1024 - 64*1024*1024) >> PAGE_SHIFT) //2Gb-64Mb
+
+#define MM_WORKING_SET_END ((ULONG)0xC2000000)
+
+#define MM_PTE_VALID_MASK (0x1)
+#define MM_PTE_PROTOTYPE_MASK (0x2)
+#define MM_PTE_DIRTY_MASK (0x4)
+#define MM_PTE_TRANSITION_MASK (0x4)
+#define MM_PTE_GLOBAL_MASK (0x10)
+#define MM_PTE_WRITE_MASK (0x80)
+#define MM_PTE_COPY_ON_WRITE_MASK (0x100)
+#define MM_PTE_OWNER_MASK (0x2)
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS (0x0) // not expressable on ALPHA
+#define MM_PTE_READONLY (0x0)
+#define MM_PTE_READWRITE (MM_PTE_WRITE_MASK)
+#define MM_PTE_WRITECOPY (MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_EXECUTE (0x0) // read-only on ALPHA
+#define MM_PTE_EXECUTE_READ (0x0)
+#define MM_PTE_EXECUTE_READWRITE (MM_PTE_WRITE_MASK)
+#define MM_PTE_EXECUTE_WRITECOPY (MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_NOCACHE (0x0) // not expressable on ALPHA
+#define MM_PTE_GUARD (0x0) // not expressable on ALPHA
+#define MM_PTE_CACHE (0x0)
+
+#define MM_PROTECT_FIELD_SHIFT 3
+
+//
+// Zero PTE
+//
+
+#define MM_ZERO_PTE 0
+
+//
+// Zero Kernel PTE
+//
+
+#define MM_ZERO_KERNEL_PTE 0
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE.
+//
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE for system space.
+//
+
+#define MM_KERNEL_DEMAND_ZERO_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A no access PTE for system space.
+//
+
+#define MM_KERNEL_NOACCESS_PTE (MM_NOACCESS << MM_PROTECT_FIELD_SHIFT)
+
+//
+// Dirty bit definitions for clean and dirty.
+//
+
+#define MM_PTE_CLEAN 0
+
+#define MM_PTE_DIRTY 1
+
+
+//
+// Kernel stack alignment requirements.
+//
+
+#define MM_STACK_ALIGNMENT (0x0)
+#define MM_STACK_OFFSET (0x0)
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)256)
+
+#define PTE_PER_PAGE ((ULONG)2048)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (128)
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ { \
+ (OUTPTE).u.Long = ( (FRAME << 9) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MM_PTE_VALID_MASK ); \
+ (OUTPTE).u.Hard.Owner = MI_DETERMINE_OWNER(PPTE); \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ } else { \
+ (OUTPTE).u.Hard.Global = 0; \
+ } \
+ }
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Soft.Transition = 1; \
+ (OUTPTE).u.Soft.Valid = 0; \
+ (OUTPTE).u.Soft.Prototype = 0; \
+ (OUTPTE).u.Soft.Protection = PROTECT;
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ (OUTPTE).u.Long = (((PPTE)->u.Long & 0xFFFFFE00) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MM_PTE_VALID_MASK); \
+ (OUTPTE).u.Hard.Owner = MI_DETERMINE_OWNER( PPTE ); \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ } else { \
+ (OUTPTE).u.Hard.Global = 0; \
+ }
+
+
+//++
+//VOID
+//MI_SET_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set dirty.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_DIRTY(PTE) (PTE).u.Hard.Dirty = MM_PTE_DIRTY
+
+
+//++
+//VOID
+//MI_SET_PTE_CLEAN (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro clears the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set clear.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_CLEAN(PTE) (PTE).u.Hard.Dirty = MM_PTE_CLEAN
+
+
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != MM_PTE_CLEAN)
+
+//++
+//VOID
+//MI_SET_GLOBAL_BIT_IF_SYSTEM (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit if the pointer PTE is within
+// system space.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the PTE becoming valid.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // Global not implemented in software PTE for Alpha
+#define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE)
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_STATE (
+// IN MMPTE PTE,
+// IN ULONG STATE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit in the PTE. if the pointer PTE is within
+//
+// Argments
+//
+// PTE - Supplies the PTE to set global state into.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_STATE(PTE,STATE) \
+ (PTE).u.Hard.Global = STATE;
+
+
+
+
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // not implemented on ALPHA
+#define MI_ENABLE_CACHING(PTE)
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // not implemented on ALPHA
+#define MI_DISABLE_CACHING(PTE)
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+ // caching is always on for ALPHA
+#define MI_IS_CACHING_DISABLED(PPTE) (FALSE)
+
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) \
+ (((ULONG)(PPFN)->PteAddress &= (ULONG)0x7FFFFFFF))
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ ( ( (ULONG)((PPFN)->PteAddress) & 0x80000000 ) == 0 )
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// COLOR - Supplies the new page color of the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,COLOR)
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // not implemented for ALPHA, we use super-pages
+#define MI_INITIALIZE_HYPERSPACE_MAP(HYPER_PAGE)
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The page's color.
+//
+//--
+
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
+ ((ULONG)((MmSystemPageColor++) & MmSecondaryColorMask))
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
+ ((ULONG)((MmSystemPageColor++) & MmSecondaryColorMask))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_PTE_PROCESS (
+// IN PCHAR COLOR,
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+
+#define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
+ ((ULONG)((*(COLOR))++) & MmSecondaryColorMask)
+
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_VA_PROCESS (
+// IN PVOID ADDRESS,
+// IN PEPROCESS COLOR
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
+ ((ULONG)((*(COLOR))++) & MmSecondaryColorMask)
+
+
+
+//++
+//ULONG
+//MI_GET_NEXT_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the next color in the sequence.
+//
+// Arguments
+//
+// COLOR - Supplies the color to return the next of.
+//
+// Return Value:
+//
+// Next color in sequence.
+//
+//--
+
+#define MI_GET_NEXT_COLOR(COLOR) ((COLOR+1) & MM_COLOR_MASK)
+
+//++
+//ULONG
+//MI_GET_PREVIOUS_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the previous color in the sequence.
+//
+// Arguments
+//
+// COLOR - Supplies the color to return the previous of.
+//
+// Return Value:
+//
+// Previous color in sequence.
+//
+//--
+
+#define MI_GET_PREVIOUS_COLOR(COLOR) ((COLOR-1) & MM_COLOR_MASK)
+
+#define MI_GET_SECONDARY_COLOR(PAGE,PFN) (PAGE & MmSecondaryColorMask)
+
+#define MI_GET_COLOR_FROM_SECONDARY(SECONDARY_COLOR) (0)
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_BY_COLOR (
+// OUT ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined fro a paging
+// file with the desired color. It does NOT remove the page
+// from its list.
+//
+// Arguments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_ANY_COLOR (
+// OUT ULONG PAGE,
+// IN OUT ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the frist page destined for a paging
+// file with the desired color. If not page of the desired
+// color exists, all colored lists are searched for a page.
+// It does NOT remove the page from its list.
+//
+// Arguments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of the page to locate and returns the
+// color of the page located.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
+{ \
+ if( MmTotalPagesForPagingFile == 0 ){ \
+ PAGE = MM_EMPTY_LIST; \
+ } else { \
+ while( MmModifiedPageListByColor[COLOR].Flink == MM_EMPTY_LIST ){ \
+ COLOR = MI_GET_NEXT_COLOR(COLOR); \
+ } \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink; \
+ } \
+}
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_WRITE_COPY (
+// IN OUT PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if the PTE indicates that the
+// page is writable and if so it clears the write bit and
+// sets the copy-on-write bit.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Dirty = MM_PTE_CLEAN; \
+ }
+
+
+//++
+//ULONG
+//MI_DETERMINE_OWNER (
+// IN MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro examines the virtual address of the PTE and determines
+// if the PTE resides in system space or user space.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ (((ULONG)(PPTE) <= (ULONG)MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) ? 1 : 0)
+
+
+//++
+//VOID
+//MI_SET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+
+
+//++
+//ULONG
+//MI_GET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro returns the state of the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the ACCESSED field.
+//
+//--
+
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+
+
+//++
+//VOID
+//MI_SET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// IN ULONG OWNER
+// );
+//
+// Routine Description:
+//
+// This macro sets the owner field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER) \
+ ( (PPTE)->u.Hard.Owner = OWNER )
+
+
+//++
+//ULONG
+//MI_GET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro gets the owner field from the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the OWNER field.
+//
+//--
+
+#define MI_GET_OWNER_IN_PTE(PPTE) \
+ ( (PPTE)->u.Hard.Owner )
+
+//
+// bit mask to clear out fields in a PTE to or in prototype pte offset.
+//
+
+#define CLEAR_FOR_PROTO_PTE_ADDRESS ((ULONG)0x7)
+
+
+// bit mask to clear out fields in a PTE to or in paging file location.
+
+#define CLEAR_FOR_PAGE_FILE 0x000000F8
+
+//++
+//VOID
+//MI_SET_PAGING_FILE_INFO (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// FILEINFO - Supplies the number of the paging file.
+//
+// OFFSET - Supplies the offset into the paging file.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) ((((PTE).u.Long & \
+ CLEAR_FOR_PAGE_FILE) | \
+ (((FILEINFO & 0xF) << 8)) | \
+ (OFFSET << 12)))
+
+
+
+//++
+//PMMPTE
+//MiPteToProto (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro returns the address of the corresponding prototype which
+// was encoded earlier into the supplied PTE.
+//
+// NOTE THAT AS PROTOPTE CAN ONLY RESIDE IN PAGED POOL!!!!!!
+//
+// MAX SIZE = 2^(2+7+21) = 2^30 = 1GB.
+//
+// NOTE, that the valid bit must be zero!
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Pointer to the prototype PTE that backs this PTE.
+//
+//--
+//
+//
+
+#define MiPteToProto(lpte) \
+ ( (PMMPTE)( ( ((lpte)->u.Long >> 4 ) << 2 ) + \
+ MmProtopte_Base ) )
+
+
+//++
+//ULONG
+//MiProtoAddressForPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForPte(proto_va) \
+ (((((ULONG)proto_va - MmProtopte_Base) << 2) & 0xfffffff0) | \
+ MM_PTE_PROTOTYPE_MASK )
+
+//++
+//ULONG
+//MiProtoAddressForKernelPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// This macro also sets any other information (such as global bits)
+// required for kernel mode PTEs.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+ // not different on alpha.
+#define MiProtoAddressForKernelPte(proto_va) MiProtoAddressForPte(proto_va)
+
+
+
+#define MM_SUBSECTION_MAP (128*1024*1024)
+
+
+//++
+//PSUBSECTION
+//MiGetSubsectionAddress (
+// IN PMMPTE lpte
+// );
+//
+// Routine Description:
+//
+// This macro takes a PTE and returns the address of the subsection that
+// the PTE refers to. Subsections are quadword structures allocated
+// from nonpaged pool.
+//
+// NOTE THIS MACRO LIMITS THE SIZE OF NON-PAGED POOL!
+// MAXIMUM NONPAGED POOL = 2^(24+3) = 2^27 = 128 MB in both pools.
+//
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// A pointer to the subsection referred to by the supplied PTE.
+//
+//--
+#define MiGetSubsectionAddress(lpte) \
+ ( ((lpte)->u.Subsect.WhichPool == 1) ? \
+ ((PSUBSECTION)((ULONG)MmSubsectionBase + \
+ (((lpte)->u.Long >> 8) << 3) )) \
+ : ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - \
+ (((lpte)->u.Long >> 8) << 3))) )
+
+
+//++
+//ULONG
+//MiGetSubsectionAddressForPte (
+// IN PSUBSECTION VA
+// );
+//
+// Routine Description:
+//
+// This macro takes the address of a subsection and encodes it for use
+// in a PTE.
+//
+// NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
+//
+// Argments
+//
+// VA - Supplies a pointer to the subsection to encode.
+//
+// Return Value:
+//
+// The mask to set into the PTE to make it reference the supplied
+// subsetion.
+//
+//--
+
+#define MiGetSubsectionAddressForPte(VA) \
+ ( ((ULONG)VA < (ULONG)KSEG2_BASE) ? \
+ ( (((ULONG)VA - (ULONG)MmSubsectionBase) << 5) | 0x4 ) \
+ : ( (((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) << 5 ) ) )
+
+
+//++
+//PMMPTE
+//MiGetPdeAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeAddress returns the address of the PDE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PDE for.
+//
+// Return Value:
+//
+// The address of the PDE.
+//
+//--
+
+#define MiGetPdeAddress(va) \
+ ((PMMPTE)(((((ULONG)(va)) >> PDI_SHIFT) << 2) + PDE_BASE))
+
+
+//++
+//PMMPTE
+//MiGetPteAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PTE for.
+//
+// Return Value:
+//
+// The address of the PTE.
+//
+//--
+
+#define MiGetPteAddress(va) \
+ ((PMMPTE)(((((ULONG)(va)) >> PTI_SHIFT) << 2) + PTE_BASE))
+
+
+//++
+//ULONG
+//MiGetPdeOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page directory table the corresponding PDE is at.
+//
+//--
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> PDI_SHIFT)
+
+
+
+//++
+//ULONG
+//MiGetPteOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteOffset returns the offset into a page table page
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page table page table the corresponding PTE is at.
+//
+//--
+
+#define MiGetPteOffset(va) \
+ ( (((ULONG)(va)) << (32-PDI_SHIFT)) >> ((32-PDI_SHIFT) + PTI_SHIFT) )
+
+
+//++
+//PMMPTE
+//MiGetProtoPteAddress (
+// IN PMMPTE VAD,
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+// Argments
+//
+// VAD - Supplies a pointer to the virtual address descriptor that contains
+// the VA.
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// A pointer to the proto PTE which corresponds to the VA.
+//
+//--
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+
+//++
+//PVOID
+//MiGetVirtualAddressMappedByPte (
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+// Argments
+//
+// PTE - Supplies the PTE to get the virtual address for.
+//
+// Return Value:
+//
+// Virtual address mapped by the PTE.
+//
+//--
+
+#define MiGetVirtualAddressMappedByPte(va) \
+ ((PVOID)((ULONG)(va) << (PAGE_SHIFT-2)))
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_NUMBER (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the paging file number from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file number.
+//
+//--
+
+#define GET_PAGING_FILE_NUMBER(PTE) ( ((PTE).u.Long << 20) >> 28 )
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_OFFSET (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the offset into the paging file from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file offset.
+//
+//--
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+
+
+//++
+//ULONG
+//IS_PTE_NOT_DEMAND_ZERO (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if a given PTE is NOT a demand zero PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Returns 0 if the PTE is demand zero, non-zero otherwise.
+//
+//--
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFFF01)
+
+//++
+//VOID
+//MI_MAKING_VALID_PTE_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make a single valid PTE invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // No action is required.
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make multiple valid PTEs invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ // No action is required.
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+
+
+//++
+//VOID
+//MI_MAKE_PROTECT_WRITE_COPY (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro makes a writable PTE a writeable-copy PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// NONE
+//
+//--
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Long & 0x20) { \
+ ((PTE).u.Long |= 0x8); \
+ }
+
+
+//++
+//VOID
+//MI_SET_PAGE_DIRTY(
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit (and release page file space).
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
+ if ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+
+//++
+//VOID
+//MI_NO_FAULT_FOUND(
+// IN TEMP,
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro handles the case when a page fault is taken and no
+// PTE with the valid bit clear is found.
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ } else { \
+ KiFlushSingleTb( 1, VA ); \
+ }
+
+
+//++
+//ULONG
+//MI_CAPTURE_DIRTY_BIT_TO_PFN (
+// IN PMMPTE PPTE,
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro gets captures the state of the dirty bit to the PFN
+// and frees any associated page file space if the PTE has been
+// modified element.
+//
+// NOTE - THE PFN LOCK MUST BE HELD!
+//
+// Argments
+//
+// PPTE - Supplies the PTE to operate upon.
+//
+// PPFN - Supplies a pointer to the PFN database element that corresponds
+// to the page mapped by the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ if (((PPFN)->u3.e1.Modified == 0) && \
+ ((PPTE)->u.Hard.Dirty == MM_PTE_DIRTY)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+
+
+//++
+//BOOLEAN
+//MI_IS_PHYSICAL_ADDRESS (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro deterines if a give virtual address is really a
+// physical address.
+//
+// Argments
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// FALSE if it is not a physical address, TRUE if it is.
+//
+//--
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ ( ((ULONG)Va >= KSEG0_BASE) && ((ULONG)Va < KSEG2_BASE) )
+
+
+//++
+//ULONG
+//MI_CONVERT_PHYSICAL_TO_PFN (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
+// to its corresponding physical frame number.
+//
+// Argments
+//
+// VA - Supplies a pointer to the physical address.
+//
+// Return Value:
+//
+// Returns the PFN for the page.
+//
+//--
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) \
+ (((ULONG)Va << 2) >> (PAGE_SHIFT + 2))
+
+
+//++
+// ULONG
+// MI_CONVERT_PHYSICAL_BUS_TO_PFN(
+// PHYSICAL_ADDRESS Pa,
+// )
+//
+// Routine Description:
+//
+// This macro takes a physical address and returns the pfn to which
+// it corresponds.
+//
+// Arguments
+//
+// Pa - Supplies the physical address to convert.
+//
+// Return Value:
+//
+// The Pfn that corresponds to the physical address is returned.
+//
+//--
+
+#define MI_CONVERT_PHYSICAL_BUS_TO_PFN(Pa) \
+ ((ULONG)( (Pa).QuadPart >> ((CCHAR)PAGE_SHIFT)))
+
+
+
+
+typedef struct _MMCOLOR_TABLES {
+ ULONG Flink;
+ PVOID Blink;
+} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
+
+typedef struct _MMPRIMARY_COLOR_TABLES {
+ LIST_ENTRY ListHead;
+} MMPRIMARY_COLOR_TABLES, *PMMPRIMARY_COLOR_TABLES;
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+extern PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+extern ULONG MmTotalPagesForPagingFile;
+
+
+//
+// The hardware PTE is defined in ...sdk/inc/ntalpha.h
+//
+
+//
+// Invalid PTEs have the following defintion.
+//
+
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Valid: 1;
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Protection : 5;
+ ULONG PageFileLow : 4;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Protection : 5;
+ ULONG filler01 : 1;
+ ULONG PageFrameNumber : 23;
+} MMPTE_TRANSITION;
+
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG ReadOnly : 1;
+ ULONG filler02 : 1;
+ ULONG ProtoAddress : 28;
+} MMPTE_PROTOTYPE;
+
+typedef struct _MMPTE_LIST {
+ ULONG Valid : 1;
+ ULONG filler07 : 7;
+ ULONG OneEntry : 1;
+ ULONG filler03 : 3;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG WhichPool : 1;
+ ULONG Protection : 5;
+ ULONG SubsectionAddress : 24;
+} MMPTE_SUBSECTION;
+
+
+//
+// A Valid Page Table Entry on a DEC ALPHA (ev4) has the following definition.
+//
+//
+//
+//typedef struct _HARDWARE_PTE {
+// ULONG Valid: 1;
+// ULONG Owner: 1;
+// ULONG Dirty: 1;
+// ULONG reserved: 1;
+// ULONG Global: 1;
+// ULONG filler2: 2;
+// ULONG Write: 1;
+// ULONG CopyOnWrite: 1;
+// ULONG PageFrameNumber: 23;
+//} HARDWARE_PTE, *PHARDWARE_PTE;
+//
+
+
+//
+// A Page Table Entry on a DEC ALPHA (ev4) has the following definition.
+//
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ HARDWARE_PTE Hard;
+ HARDWARE_PTE Flush;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_TRANSITION Trans;
+ MMPTE_LIST List;
+ MMPTE_SUBSECTION Subsect;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
+
+
diff --git a/private/ntos/mm/alpha/physsect.c b/private/ntos/mm/alpha/physsect.c
new file mode 100644
index 000000000..bde6eae19
--- /dev/null
+++ b/private/ntos/mm/alpha/physsect.c
@@ -0,0 +1,562 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ physsect.c
+
+Abstract:
+
+ This module contains the routine for mapping physical sections for
+ ALPHA machines.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+ Joe Notarangelo 21-Sep-1992
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//#define FIRSTDBG 1
+//#define AGGREGATE_DBG FIRSTDBG
+
+
+static
+ULONG
+MaximumAlignment( ULONG );
+
+static
+ULONG
+AggregatePages( PMMPTE, ULONG, ULONG, PULONG );
+
+
+
+NTSTATUS
+MiMapViewOfPhysicalSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN ULONG ProtectionMask,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ OUT PBOOLEAN ReleasedWsMutex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified phyiscal section into the
+ specified process's address space.
+
+Arguments:
+
+ see MmMapViewOfSection above...
+
+ ControlArea - Supplies the control area for the section.
+
+ Process - Supplies the process pointer which is receiving the section.
+
+ ProtectionMask - Supplies the initial page protection-mask.
+
+ ReleasedWsMutex - Supplies FALSE, receives TRUE if the working set
+ mutex is released.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, working set mutex and address creation mutex held.
+
+--*/
+
+{
+ PMMVAD Vad;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ KIRQL OldIrql;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE TempPte;
+ PMMPFN Pfn2;
+ ULONG PhysicalViewSize;
+ ULONG Alignment;
+ ULONG PagesToMap;
+ ULONG NextPfn;
+
+ //
+ // Physical memory section.
+ //
+
+#ifdef FIRSTDBG
+
+ DbgPrint( "MM: Physsect CaptureBase = %x SectionOffset = %x\n",
+ CapturedBase, SectionOffset->LowPart );
+ DbgPrint( "MM: Physsect Allocation Type = %x, MEM_LARGE_PAGES = %x\n",
+ AllocationType, MEM_LARGE_PAGES );
+
+#endif //FIRSTDBG
+
+ //
+ // Compute the alignment we require for the virtual mapping.
+ // The default is 64K to match protection boundaries.
+ // Larger page sizes are used if MEM_LARGE_PAGES is requested.
+ // The Alpha AXP architecture supports granularity hints so that
+ // larger pages can be defined in the following multiples of
+ // PAGE_SIZE:
+ // 8**(GH) * PAGE_SIZE, where GH element of {0,1,2,3}
+ //
+
+ Alignment = X64K;
+
+ if( AllocationType & MEM_LARGE_PAGES ){
+
+ //
+ // MaxAlignment is the maximum boundary alignment of the
+ // SectionOffset (where the maximum boundary is one of the possible
+ // granularity hints boundaries)
+ //
+
+ ULONG MaxAlignment = MaximumAlignment( SectionOffset->LowPart );
+
+ Alignment = (MaxAlignment > Alignment) ? MaxAlignment : Alignment;
+
+#ifdef FIRSTDBG
+
+ DbgPrint( "MM: Alignment = %x, SectionOffset = %x\n",
+ Alignment, SectionOffset->LowPart );
+
+#endif //FIRSTDBG
+
+ }
+
+
+ LOCK_WS (Process);
+
+ if (*CapturedBase == NULL) {
+
+ //
+ // Attempt to locate address space. This could raise an
+ // exception.
+ //
+
+ try {
+
+ //
+ // Find a starting address on an Alignment boundary.
+ //
+
+
+ PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
+ (ULONG)MI_64K_ALIGN(SectionOffset->LowPart);
+ StartingAddress = MiFindEmptyAddressRange (PhysicalViewSize,
+ Alignment,
+ ZeroBits);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ return GetExceptionCode();
+ }
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ PhysicalViewSize - 1L) | (PAGE_SIZE - 1L));
+ StartingAddress = (PVOID)((ULONG)StartingAddress +
+ (SectionOffset->LowPart & (X64K - 1)));
+
+ if (ZeroBits > 0) {
+ if (EndingAddress > (PVOID)((ULONG)0xFFFFFFFF >> ZeroBits)) {
+ return STATUS_NO_MEMORY;
+ }
+ }
+
+ } else {
+
+ //
+ // Check to make sure the specified base address to ending address
+ // is currently unused.
+ //
+
+ PhysicalViewSize = (SectionOffset->LowPart + *CapturedViewSize) -
+ (ULONG)MI_64K_ALIGN(SectionOffset->LowPart);
+ StartingAddress = (PVOID)((ULONG)MI_64K_ALIGN(*CapturedBase) +
+ (SectionOffset->LowPart & (X64K - 1)));
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+ if (Vad != (PMMVAD)NULL) {
+#if 0
+ MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
+#endif
+
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+ }
+
+ //
+ // An unoccuppied address range has been found, build the virtual
+ // address descriptor to describe this range.
+ //
+
+ //
+ // Establish an exception handler and attempt to allocate
+ // the pool and charge quota. Note that the InsertVad routine
+ // will also charge quota which could raise an exception.
+ //
+
+ try {
+
+ Vad = (PMMVAD)NULL;
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV');
+ Vad->StartingVa = StartingAddress;
+ Vad->EndingVa = EndingAddress;
+ Vad->ControlArea = ControlArea;
+ Vad->u.LongFlags = 0;
+ Vad->u.VadFlags.Inherit = ViewUnmap;
+ Vad->u.VadFlags.PhysicalMapping = 1;
+ Vad->Banked = NULL;
+ // Vad->u.VadFlags.ImageMap = 0;
+ Vad->u.VadFlags.Protection = ProtectionMask;
+ // Vad->u.VadFlags.CopyOnWrite = 0;
+ // Vad->u.VadFlags.LargePages = 0;
+ Vad->FirstPrototypePte =
+ (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
+
+ //
+ // Set the first prototype PTE field in the Vad.
+ //
+
+ Vad->LastContiguousPte =
+ (PMMPTE)(MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset));
+
+ //
+ // Insert the VAD. This could get an exception.
+ //
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The pool allocation suceeded, but the quota charge
+ // in InsertVad failed, deallocate the pool and return
+ // and error.
+ //
+
+ ExFreePool (Vad);
+ return GetExceptionCode();
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ // Increment the count of the number of views for the
+ // section object. This requires the PFN mutex to be held.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfUserReferences += 1;
+ ASSERT (ControlArea->NumberOfSectionReferences != 0);
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Build the PTEs in the address space.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
+
+ PagesToMap = ( ((ULONG)EndingAddress - (ULONG)StartingAddress)
+ + (PAGE_SIZE-1) ) >> PAGE_SHIFT;
+
+ NextPfn = MI_CONVERT_PHYSICAL_BUS_TO_PFN(*SectionOffset);
+
+#ifdef FIRSTDBG
+
+ DbgPrint( "MM: Physsect, PagesToMap = %x NextPfn = %x\n",
+ PagesToMap, NextPfn );
+
+#endif //FIRSTDBG
+
+ MI_MAKE_VALID_PTE (TempPte,
+ NextPfn,
+ ProtectionMask,
+ PointerPte);
+
+ if (TempPte.u.Hard.Write) {
+ TempPte.u.Hard.Dirty = 1;
+ }
+
+
+
+ while (PointerPte <= LastPte) {
+
+ ULONG PagesTogether;
+ ULONG GranularityHint;
+
+ //
+ // Compute the number of pages that can be mapped together
+ //
+
+ if( AllocationType & MEM_LARGE_PAGES ){
+ PagesTogether = AggregatePages( PointerPte,
+ NextPfn,
+ PagesToMap,
+ &GranularityHint );
+ } else {
+ PagesTogether = 1;
+ GranularityHint = 0;
+ }
+
+#ifdef FIRSTDBG
+
+ DbgPrint( "MM: Physsect PointerPte = %x, NextPfn = %x\n",
+ PointerPte, NextPfn );
+ DbgPrint( "MM: Va = %x TempPte.Pfn = %x\n",
+ MiGetVirtualAddressMappedByPte( PointerPte ),
+ TempPte.u.Hard.PageFrameNumber );
+ DbgPrint( "MM: PagesToMap = %x\n", PagesToMap );
+ DbgPrint( "MM: PagesTogether = %x, GH = %x\n",
+ PagesTogether, GranularityHint );
+
+#endif //FIRSTDBG
+
+ TempPte.u.Hard.GranularityHint = GranularityHint;
+
+ NextPfn += PagesTogether;
+ PagesToMap -= PagesTogether;
+
+ while( PagesTogether-- ){
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+ Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
+ }
+
+ ASSERT( PointerPte->u.Long == 0 );
+
+ *PointerPte = TempPte;
+ Pfn2->u2.ShareCount += 1;
+
+ //
+ // Increment the count of non-zero page table entires for this
+ // page table and the number of private pages for the process.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+
+ PointerPte += 1;
+
+ TempPte.u.Hard.PageFrameNumber += 1;
+
+ } // while (PagesTogether-- )
+
+ } // while (PointerPte <= LastPte)
+
+ UNLOCK_WS (Process);
+ *ReleasedWsMutex = TRUE;
+
+ //
+ // Update the current virtual size in the process header.
+ //
+
+ *CapturedViewSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ Process->VirtualSize += *CapturedViewSize;
+
+ if (Process->VirtualSize > Process->PeakVirtualSize) {
+ Process->PeakVirtualSize = Process->VirtualSize;
+ }
+
+ //
+ // Translate the virtual address to a quasi-virtual address for
+ // use by drivers that touch mapped devices. Note: the routine
+ // HalCreateQva will not translate the StartingAddress if the
+ // StartingAddress is within system memory address space.
+ //
+ // N.B. - It will not work to attempt map addresses that begin in
+ // system memory and extend through i/o space.
+ //
+
+ *CapturedBase = HalCreateQva( *SectionOffset, StartingAddress );
+
+ return STATUS_SUCCESS;
+}
+
+
+ULONG
+MaximumAlignment(
+ IN ULONG Offset
+ )
+/*++
+
+Routine Description:
+
+ This routine returns the maximum granularity hint alignment boundary
+ to which Offset is naturally aligned.
+
+Arguments:
+
+ Offset - Supplies the address offset to check for alignment.
+
+Return Value:
+
+ The number which represents the largest natural alignment of Offset.
+
+Environment:
+
+--*/
+{
+
+ if( (Offset & (GH3_PAGE_SIZE - 1)) == 0 ){
+ return GH3_PAGE_SIZE;
+ }
+
+ if( (Offset & (GH2_PAGE_SIZE - 1)) == 0 ){
+ return GH2_PAGE_SIZE;
+ }
+
+ if( (Offset & (GH1_PAGE_SIZE - 1)) == 0 ){
+ return GH1_PAGE_SIZE;
+ }
+
+ if( (Offset & (PAGE_SIZE - 1)) == 0 ){
+ return PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+
+ULONG
+AggregatePages(
+ IN PMMPTE PointerPte,
+ IN ULONG Pfn,
+ IN ULONG Pages,
+ OUT PULONG GranularityHint
+ )
+/*++
+
+Routine Description:
+
+ This routine computes the number of standard size pages that can be
+ aggregated into a single large page and returns the granularity hint
+ for that size large page.
+
+Arguments:
+
+ PointerPte - Supplies the PTE pointer for the starting virtual address
+ of the mapping.
+ Pfn - Supplies the starting page frame number of the memory to be
+ mapped.
+ Pages - Supplies the number of pages to map.
+
+ GranularityHint - Receives the granularity hint for the large page used
+ to aggregate the standard pages.
+
+Return Value:
+
+ The number of pages that can be aggregated together.
+
+Environment:
+
+--*/
+{
+
+ ULONG MaxVirtualAlignment;
+ ULONG MaxPhysicalAlignment;
+ ULONG MaxPageAlignment;
+ ULONG MaxAlignment;
+
+ //
+ // Determine the largest page that will map a maximum of Pages.
+ // The largest page must be both virtually and physically aligned
+ // to the large page size boundary.
+ // Determine the largest common alignment for the virtual and
+ // physical addresses, factor in Pages, and then match to the
+ // largest page size possible via the granularity hints.
+ //
+
+ MaxVirtualAlignment = MaximumAlignment((ULONG)
+ MiGetVirtualAddressMappedByPte( PointerPte ) );
+ MaxPhysicalAlignment = MaximumAlignment( (ULONG)(Pfn << PAGE_SHIFT) );
+
+ MaxPageAlignment = (ULONG)(Pages << PAGE_SHIFT);
+
+#ifdef AGGREGATE_DBG
+
+ DbgPrint( "MM: Aggregate MaxVirtualAlign = %x\n", MaxVirtualAlignment );
+ DbgPrint( "MM: Aggregate MaxPhysicalAlign = %x\n", MaxPhysicalAlignment );
+ DbgPrint( "MM: Aggregate MaxPageAlign = %x\n", MaxPageAlignment );
+
+#endif //AGGREGATE_DBG
+ //
+ // Maximum alignment is the minimum of the virtual and physical alignments.
+ //
+
+ MaxAlignment = (MaxVirtualAlignment > MaxPhysicalAlignment) ?
+ MaxPhysicalAlignment : MaxVirtualAlignment;
+ MaxAlignment = (MaxAlignment > MaxPageAlignment) ?
+ MaxPageAlignment : MaxAlignment;
+
+ //
+ // Convert MaxAlignment to granularity hint value
+ //
+
+ if( (MaxAlignment & (GH3_PAGE_SIZE - 1)) == 0 ){
+
+ *GranularityHint = GH3;
+
+ } else if( (MaxAlignment & (GH2_PAGE_SIZE - 1)) == 0 ){
+
+ *GranularityHint = GH2;
+
+ } else if( (MaxAlignment & (GH1_PAGE_SIZE - 1)) == 0 ){
+
+ *GranularityHint = GH1;
+
+ } else if( (MaxAlignment & (PAGE_SIZE - 1)) == 0 ){
+
+ *GranularityHint = GH0;
+
+ } else {
+
+ *GranularityHint = GH0;
+
+#if DBG
+
+ DbgPrint( "MM: Aggregate Physical pages - not page aligned\n" );
+
+#endif //DBG
+
+ } // end, if then elseif
+
+ //
+ // Return number of pages aggregated.
+ //
+
+ return( MaxAlignment >> PAGE_SHIFT );
+
+}
diff --git a/private/ntos/mm/alpha/setdirty.c b/private/ntos/mm/alpha/setdirty.c
new file mode 100644
index 000000000..df73963a7
--- /dev/null
+++ b/private/ntos/mm/alpha/setdirty.c
@@ -0,0 +1,126 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ setdirty.c
+
+Abstract:
+
+ This module contains the setting dirty bit routine for memory management.
+
+ ALPHA specific.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1990.
+ Joe Notarangelo 23-Apr-1992 ALPHA version
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets dirty in the specified PTE and the modify bit in the
+ correpsonding PFN element. If any page file space is allocated, it
+ is deallocated.
+
+Arguments:
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies a pointer to the corresponding valid PTE.
+
+ PfnHeld - Supplies TRUE if the PFN mutex is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty bit clear
+ // has been fetched into the TB. If it isn't flushed, another fault
+ // is generated as the dirty bit is not set in the cached TB entry.
+ //
+
+ // KiFlushSingleDataTb( FaultingAddress );
+ __dtbis( FaultingAddress );
+
+ //
+ // The page is NOT copy on write, update the PTE setting both the
+ // dirty bit and the accessed bit. Note, that as this PTE is in
+ // the TB, the TB must be flushed.
+ //
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ TempPte = *PointerPte;
+ TempPte.u.Hard.Dirty = 1;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ *PointerPte = TempPte;
+
+ //
+ // If PFN database lock is not held, then do not update the
+ // PFN database.
+ //
+
+ if( PfnHeld ){
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ if ( (Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0) ) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ Pfn1->u3.e1.Modified = 1;
+
+ }
+
+
+ return;
+}
diff --git a/private/ntos/mm/alpha/sources b/private/ntos/mm/alpha/sources
new file mode 100644
index 000000000..769eff4b3
--- /dev/null
+++ b/private/ntos/mm/alpha/sources
@@ -0,0 +1,6 @@
+ALPHA_SOURCES=..\alpha\inialpha.c \
+ ..\alpha\datalpha.c \
+ ..\alpha\debugsup.c \
+ ..\alpha\hypermap.c \
+ ..\alpha\setdirty.c \
+ ..\alpha\physsect.c
diff --git a/private/ntos/mm/checkpfn.c b/private/ntos/mm/checkpfn.c
new file mode 100644
index 000000000..332b373ec
--- /dev/null
+++ b/private/ntos/mm/checkpfn.c
@@ -0,0 +1,538 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ checkpfn.c
+
+Abstract:
+
+ This module contains routines for sanity checking the PFN database.
+
+Author:
+
+ Lou Perazzoli (loup) 25-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+
+PRTL_BITMAP CheckPfnBitMap;
+
+
+VOID
+MiCheckPfn (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks each physical page in the PFN database to ensure
+ it is in the proper state.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ ULONG Link, Previous;
+ ULONG i;
+ PMMPTE PointerPte;
+ KIRQL PreviousIrql;
+ KIRQL OldIrql;
+ USHORT ValidCheck[4];
+ USHORT ValidPage[4];
+ PMMPFN PfnX;
+
+ ValidCheck[0] = ValidCheck[1] = ValidCheck[2] = ValidCheck[3] = 0;
+ ValidPage[0] = ValidPage[1] = ValidPage[2] = ValidPage[3] = 0;
+
+ if (CheckPfnBitMap == NULL) {
+ MiCreateBitMap ( &CheckPfnBitMap, MmNumberOfPhysicalPages, NonPagedPool);
+ }
+ RtlClearAllBits (CheckPfnBitMap);
+
+ //
+ // Walk free list.
+ //
+
+ KeRaiseIrql (APC_LEVEL, &PreviousIrql);
+ LOCK_PFN (OldIrql);
+
+ Previous = MM_EMPTY_LIST;
+ Link = MmFreePageListHead.Flink;
+ for (i=0; i < MmFreePageListHead.Total; i++) {
+ if (Link == MM_EMPTY_LIST) {
+ DbgPrint("free list total count wrong\n");
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+ }
+ RtlSetBits (CheckPfnBitMap, Link, 1L);
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ DbgPrint("non zero reference count on free list\n");
+ MiFormatPfn(Pfn1);
+
+ }
+ if (Pfn1->u3.e1.PageLocation != FreePageList) {
+ DbgPrint("page location not freelist\n");
+ MiFormatPfn(Pfn1);
+ }
+ if (Pfn1->u2.Blink != Previous) {
+ DbgPrint("bad blink on free list\n");
+ MiFormatPfn(Pfn1);
+ }
+ Previous = Link;
+ Link = Pfn1->u1.Flink;
+
+ }
+ if (Link != MM_EMPTY_LIST) {
+ DbgPrint("free list total count wrong\n");
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ MiFormatPfn(Pfn1);
+ }
+
+ //
+ // Walk zeroed list.
+ //
+
+ Previous = MM_EMPTY_LIST;
+ Link = MmZeroedPageListHead.Flink;
+ for (i=0; i < MmZeroedPageListHead.Total; i++) {
+ if (Link == MM_EMPTY_LIST) {
+ DbgPrint("zero list total count wrong\n");
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+ }
+ RtlSetBits (CheckPfnBitMap, Link, 1L);
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ DbgPrint("non zero reference count on zero list\n");
+ MiFormatPfn(Pfn1);
+
+ }
+ if (Pfn1->u3.e1.PageLocation != ZeroedPageList) {
+ DbgPrint("page location not zerolist\n");
+ MiFormatPfn(Pfn1);
+ }
+ if (Pfn1->u2.Blink != Previous) {
+ DbgPrint("bad blink on zero list\n");
+ MiFormatPfn(Pfn1);
+ }
+ Previous = Link;
+ Link = Pfn1->u1.Flink;
+
+ }
+ if (Link != MM_EMPTY_LIST) {
+ DbgPrint("zero list total count wrong\n");
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ MiFormatPfn(Pfn1);
+ }
+
+ //
+ // Walk Bad list.
+ //
+ Previous = MM_EMPTY_LIST;
+ Link = MmBadPageListHead.Flink;
+ for (i=0; i < MmBadPageListHead.Total; i++) {
+ if (Link == MM_EMPTY_LIST) {
+ DbgPrint("Bad list total count wrong\n");
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+ }
+ RtlSetBits (CheckPfnBitMap, Link, 1L);
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ DbgPrint("non zero reference count on Bad list\n");
+ MiFormatPfn(Pfn1);
+
+ }
+ if (Pfn1->u3.e1.PageLocation != BadPageList) {
+ DbgPrint("page location not Badlist\n");
+ MiFormatPfn(Pfn1);
+ }
+ if (Pfn1->u2.Blink != Previous) {
+ DbgPrint("bad blink on Bad list\n");
+ MiFormatPfn(Pfn1);
+ }
+ Previous = Link;
+ Link = Pfn1->u1.Flink;
+
+ }
+ if (Link != MM_EMPTY_LIST) {
+ DbgPrint("Bad list total count wrong\n");
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ MiFormatPfn(Pfn1);
+ }
+
+ //
+ // Walk Standby list.
+ //
+
+ Previous = MM_EMPTY_LIST;
+ Link = MmStandbyPageListHead.Flink;
+ for (i=0; i < MmStandbyPageListHead.Total; i++) {
+ if (Link == MM_EMPTY_LIST) {
+ DbgPrint("Standby list total count wrong\n");
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+ }
+ RtlSetBits (CheckPfnBitMap, Link, 1L);
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ DbgPrint("non zero reference count on Standby list\n");
+ MiFormatPfn(Pfn1);
+
+ }
+ if (Pfn1->u3.e1.PageLocation != StandbyPageList) {
+ DbgPrint("page location not Standbylist\n");
+ MiFormatPfn(Pfn1);
+ }
+ if (Pfn1->u2.Blink != Previous) {
+ DbgPrint("bad blink on Standby list\n");
+ MiFormatPfn(Pfn1);
+ }
+
+ //
+ // Check to see if referenced PTE is okay.
+ //
+ if (MI_IS_PFN_DELETED (Pfn1)) {
+ DbgPrint("Invalid pteaddress in standby list\n");
+ MiFormatPfn(Pfn1);
+
+ } else {
+
+ OldIrql = 99;
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (MmIsAddressValid (Pfn1->PteAddress))) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+ PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame,
+ &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+ if (PointerPte->u.Trans.PageFrameNumber != Link) {
+ DbgPrint("Invalid PFN - PTE address is wrong in standby list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+ if (PointerPte->u.Soft.Transition == 0) {
+ DbgPrint("Pte not in transition for page on standby list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ OldIrql = 99;
+ }
+
+ }
+
+ Previous = Link;
+ Link = Pfn1->u1.Flink;
+
+ }
+ if (Link != MM_EMPTY_LIST) {
+ DbgPrint("Standby list total count wrong\n");
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ MiFormatPfn(Pfn1);
+ }
+
+ //
+ // Walk Modified list.
+ //
+
+ Previous = MM_EMPTY_LIST;
+ Link = MmModifiedPageListHead.Flink;
+ for (i=0; i < MmModifiedPageListHead.Total; i++) {
+ if (Link == MM_EMPTY_LIST) {
+ DbgPrint("Modified list total count wrong\n");
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+ }
+ RtlSetBits (CheckPfnBitMap, Link, 1L);
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ DbgPrint("non zero reference count on Modified list\n");
+ MiFormatPfn(Pfn1);
+
+ }
+ if (Pfn1->u3.e1.PageLocation != ModifiedPageList) {
+ DbgPrint("page location not Modifiedlist\n");
+ MiFormatPfn(Pfn1);
+ }
+ if (Pfn1->u2.Blink != Previous) {
+ DbgPrint("bad blink on Modified list\n");
+ MiFormatPfn(Pfn1);
+ }
+ //
+ // Check to see if referenced PTE is okay.
+ //
+ if (MI_IS_PFN_DELETED (Pfn1)) {
+ DbgPrint("Invalid pteaddress in modified list\n");
+ MiFormatPfn(Pfn1);
+
+ } else {
+
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (MmIsAddressValid (Pfn1->PteAddress))) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+ PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ if (PointerPte->u.Trans.PageFrameNumber != Link) {
+ DbgPrint("Invalid PFN - PTE address is wrong in modified list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+ if (PointerPte->u.Soft.Transition == 0) {
+ DbgPrint("Pte not in transition for page on modified list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ OldIrql = 99;
+ }
+ }
+
+ Previous = Link;
+ Link = Pfn1->u1.Flink;
+
+ }
+ if (Link != MM_EMPTY_LIST) {
+ DbgPrint("Modified list total count wrong\n");
+ Pfn1 = MI_PFN_ELEMENT(Link);
+ MiFormatPfn(Pfn1);
+ }
+ //
+ // All non active pages have been scanned. Locate the
+ // active pages and make sure they are consistent.
+ //
+
+ //
+ // set bit zero as page zero is reserved for now
+ //
+
+ RtlSetBits (CheckPfnBitMap, 0L, 1L);
+
+ Link = RtlFindClearBitsAndSet (CheckPfnBitMap, 1L, 0);
+ while (Link != 0xFFFFFFFF) {
+ Pfn1 = MI_PFN_ELEMENT (Link);
+
+ //
+ // Make sure the PTE address is okay
+ //
+
+ if ((Pfn1->PteAddress >= (PMMPTE)HYPER_SPACE)
+ && (Pfn1->u3.e1.PrototypePte == 0)) {
+ DbgPrint("pfn with illegal pte address\n");
+ MiFormatPfn(Pfn1);
+ break;
+ }
+
+ if (Pfn1->PteAddress < (PMMPTE)PTE_BASE) {
+ DbgPrint("pfn with illegal pte address\n");
+ MiFormatPfn(Pfn1);
+ break;
+ }
+
+#ifdef _MIPS_
+
+ //
+ // ignore ptes mapped to kseg0 or kseg1.
+ //
+
+ if ((Pfn1->PteAddress > (PMMPTE)0xc0200000) &&
+ (Pfn1->PteAddress < (PMMPTE)0xc0300000)) {
+
+ goto NoCheck;
+ }
+#endif //MIPS
+
+#ifdef _PPC_
+
+ //
+ // ignore ptes mapped to PowerPC kernel BAT.
+ //
+
+ if (MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(Pfn1->PteAddress))) {
+
+ goto NoCheck;
+ }
+#endif // _PPC_
+
+#ifdef _ALPHA_
+
+ //
+ // ignore ptes mapped to ALPHA's 32-bit superpage.
+ //
+
+ if ((Pfn1->PteAddress > (PMMPTE)0xc0100000) &&
+ (Pfn1->PteAddress < (PMMPTE)0xc0180000)) {
+
+ goto NoCheck;
+ }
+#endif //ALPHA
+
+ //
+ // Check to make sure the referenced PTE is for this page.
+ //
+
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (MmIsAddressValid (Pfn1->PteAddress))) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+ PointerPte = MiMapPageInHyperSpace(Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ if (PointerPte->u.Hard.PageFrameNumber != Link) {
+ DbgPrint("Invalid PFN - PTE address is wrong in active list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+ if (PointerPte->u.Hard.Valid == 0) {
+ //
+ // if the page is a page table page it could be out of
+ // the working set yet a transition page is keeping it
+ // around in memory (ups the share count).
+ //
+
+ if ((Pfn1->PteAddress < (PMMPTE)PDE_BASE) ||
+ (Pfn1->PteAddress > (PMMPTE)PDE_TOP)) {
+
+ DbgPrint("Pte not valid for page on active list\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+ }
+
+ if (Pfn1->u3.e2.ReferenceCount != 1) {
+ DbgPrint("refcount not 1\n");
+ MiFormatPfn(Pfn1);
+ }
+
+
+ //
+ // Check to make sure the PTE count for the frame is okay.
+ //
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+ PfnX = MI_PFN_ELEMENT(Pfn1->PteFrame);
+ for (i = 0; i < 4; i++) {
+ if (ValidPage[i] == 0) {
+ ValidPage[i] = (USHORT)Pfn1->PteFrame;
+ }
+ if (ValidPage[i] == (USHORT)Pfn1->PteFrame) {
+ ValidCheck[i] += 1;
+ break;
+ }
+ }
+ }
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ OldIrql = 99;
+ }
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+NoCheck:
+#endif
+ Link = RtlFindClearBitsAndSet (CheckPfnBitMap, 1L, 0);
+
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (ValidPage[i] == 0) {
+ break;
+ }
+ PfnX = MI_PFN_ELEMENT(ValidPage[i]);
+ }
+
+ UNLOCK_PFN (OldIrql);
+ KeLowerIrql (PreviousIrql);
+ return;
+
+}
+
+VOID
+MiDumpPfn ( )
+
+{
+ ULONG i;
+ PMMPFN Pfn1;
+
+ Pfn1 = MI_PFN_ELEMENT (MmLowestPhysicalPage);
+
+ for (i=0; i < MmNumberOfPhysicalPages; i++) {
+ MiFormatPfn (Pfn1);
+ Pfn1++;
+ }
+ return;
+}
+
+VOID
+MiFormatPfn (
+ IN PMMPFN PointerPfn
+ )
+
+{
+ struct longs {
+ ULONG Flink;
+ ULONG Pteadd;
+ ULONG Blink;
+ ULONG Refcount;
+ ULONG Origpte;
+ ULONG Flags;
+ };
+
+ struct longs *Pfake;
+ ULONG i;
+
+ i = PointerPfn - MmPfnDatabase;
+
+ Pfake = (struct longs *)PointerPfn;
+
+ DbgPrint("***PFN %lx flink %lx blink %lx ptecout-refcnt %lx\n",
+ i,
+ Pfake->Flink,
+ Pfake->Blink,
+ Pfake->Refcount);
+
+ DbgPrint(" pteaddr %lx originalPTE %lx flags %lx \n",
+ Pfake->Pteadd,
+ Pfake->Origpte,
+ Pfake->Flags);
+
+ return;
+
+}
+#endif //DBG
diff --git a/private/ntos/mm/checkpte.c b/private/ntos/mm/checkpte.c
new file mode 100644
index 000000000..a2133604b
--- /dev/null
+++ b/private/ntos/mm/checkpte.c
@@ -0,0 +1,235 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ checkpte.c
+
+Abstract:
+
+ This module contains routines for sanity checking the page directory.
+
+Author:
+
+ Lou Perazzoli (loup) 25-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+
+VOID
+CheckValidPte (
+ IN PMMPTE PointerPte
+ );
+
+VOID
+CheckInvalidPte (
+ IN PMMPTE PointerPte
+ );
+
+
+VOID
+MiCheckPte (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks each page table page in an address space to
+ ensure it is in the proper state.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ ULONG i,j;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPFN Pfn1;
+ ULONG ValidCount;
+ ULONG TransitionCount;
+ KIRQL PreviousIrql;
+ KIRQL OldIrql;
+ PEPROCESS TargetProcess;
+ USHORT UsedPages;
+ ULONG PdeValidCount;
+
+ TargetProcess = PsGetCurrentProcess ();
+
+ KeRaiseIrql (APC_LEVEL, &PreviousIrql);
+
+ LOCK_WS (TargetProcess);
+ LOCK_PFN (OldIrql);
+
+ PointerPde = MiGetPdeAddress(0);
+
+ UsedPages = 0;
+ PdeValidCount = 1;
+
+ for (i = 0; i < PDE_PER_PAGE; i++) {
+ if (PointerPde->u.Hard.Valid) {
+
+ if ((i < 512) || (i == 769) || (i== 896) ) {
+ PdeValidCount += 1;
+ }
+
+ ValidCount = 0;
+ TransitionCount = 0;
+ CheckValidPte (PointerPde);
+
+ PointerPte = MiGetPteAddress (i<<22);
+
+ for (j=0; j < PTE_PER_PAGE; j++) {
+
+ if ((PointerPte >= MiGetPteAddress(HYPER_SPACE)) &&
+ (PointerPte < MiGetPteAddress(WORKING_SET_LIST))) {
+ goto endloop;
+ }
+
+ if (PointerPte->u.Hard.Valid) {
+ ValidCount += 1;
+ CheckValidPte (PointerPte);
+
+ } else {
+ CheckInvalidPte (PointerPte);
+
+ if ((PointerPte->u.Soft.Transition == 1) &&
+ (PointerPte->u.Soft.Prototype == 0)) {
+
+ //
+ // Transition PTE, up the transition count.
+ //
+
+ TransitionCount += 1;
+
+ }
+ }
+
+ if (PointerPte->u.Long != 0) {
+ UsedPages += 1;
+ }
+endloop:
+ PointerPte++;
+
+ }
+ if ((i < 512) || (i == 896)) {
+ if (MmWorkingSetList->UsedPageTableEntries[i] != UsedPages) {
+ DbgPrint("used pages and page table used not equal %lx %lx %lx\n",
+ i,MmWorkingSetList->UsedPageTableEntries[i], UsedPages);
+ }
+ }
+
+ //
+ // Check the share count for the page table page.
+ //
+ if ((i < 511) || (i == 896)) {
+ Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
+ if (Pfn1->u2.ShareCount != ((ULONG)1+ValidCount+TransitionCount)) {
+ DbgPrint("share count for page table page bad - %lx %lx %lx\n",
+ i,ValidCount, TransitionCount);
+ MiFormatPfn(Pfn1);
+ }
+ }
+ }
+ PointerPde++;
+ UsedPages = 0;
+ }
+
+ PointerPde = (PMMPTE)0xc0300c00;
+ Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_WS (TargetProcess);
+ KeLowerIrql (PreviousIrql);
+ return;
+
+}
+
+VOID
+CheckValidPte (
+ IN PMMPTE PointerPte
+ )
+
+{
+ PMMPFN Pfn1;
+ PMMPTE PointerPde;
+
+ if (PointerPte->u.Hard.PageFrameNumber > MmNumberOfPhysicalPages) {
+ return;
+ }
+
+
+ Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
+
+ if (PointerPte->u.Hard.PageFrameNumber == 0) {
+ DbgPrint("physical page zero mapped\n");
+ MiFormatPte(PointerPte);
+ MiFormatPfn(Pfn1);
+ }
+
+ if (Pfn1->u3.e1.PageLocation != ActiveAndValid) {
+ DbgPrint("valid PTE with page frame not active and valid\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+
+ if (Pfn1->u3.e1.PrototypePte == 0) {
+ //
+ // This is not a prototype PTE.
+ //
+ if (Pfn1->PteAddress != PointerPte) {
+ DbgPrint("checkpte - pfn pte address and pte address not equal\n");
+ MiFormatPte(PointerPte);
+ MiFormatPfn(Pfn1);
+ return;
+ }
+
+ }
+
+ if (!MmIsAddressValid(Pfn1->PteAddress)) {
+ return;
+ }
+
+ PointerPde = MiGetPteAddress (Pfn1->PteAddress);
+ if (PointerPde->u.Hard.Valid == 1) {
+
+ if (PointerPde->u.Hard.PageFrameNumber != Pfn1->PteFrame) {
+ DbgPrint("checkpte - pteframe not right\n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ MiFormatPte(PointerPde);
+ }
+ }
+ return;
+
+}
+
+VOID
+CheckInvalidPte (
+ IN PMMPTE PointerPte
+ )
+
+
+{
+ PointerPte;
+ return;
+
+}
+#endif //DBG
diff --git a/private/ntos/mm/creasect.c b/private/ntos/mm/creasect.c
new file mode 100644
index 000000000..504d77b26
--- /dev/null
+++ b/private/ntos/mm/creasect.c
@@ -0,0 +1,3638 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ creasect.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtCreateSection and NtOpenSection.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+ULONG MMCONTROL = 'aCmM';
+ULONG MMTEMPORARY = 'xxmM';
+ULONG MMSECT = 'tSmM';
+
+#define MM_SIZE_OF_LARGEST_IMAGE ((ULONG)0x10000000)
+
+#define MM_MAXIMUM_IMAGE_HEADER (2 * PAGE_SIZE)
+
+#define MM_ALLOCATION_FRAGMENT (64 * 1024)
+
+extern ULONG MmSharedCommit;
+
+//
+// The maximum number of image object (object table entries) is
+// the number which will fit into the MM_MAXIMUM_IMAGE_HEADER with
+// the start of the PE image header in the last word of the first
+//
+
+#define MM_MAXIMUM_IMAGE_SECTIONS \
+ ((MM_MAXIMUM_IMAGE_HEADER - (PAGE_SIZE + sizeof(IMAGE_NT_HEADERS))) / \
+ sizeof(IMAGE_SECTION_HEADER))
+
+#if DBG
+extern PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+
+
+extern POBJECT_TYPE IoFileObjectType;
+
+CCHAR MmImageProtectionArray[16] = {
+ MM_NOACCESS,
+ MM_EXECUTE,
+ MM_READONLY,
+ MM_EXECUTE_READ,
+ MM_WRITECOPY,
+ MM_EXECUTE_WRITECOPY,
+ MM_WRITECOPY,
+ MM_EXECUTE_WRITECOPY,
+ MM_NOACCESS,
+ MM_EXECUTE,
+ MM_READONLY,
+ MM_EXECUTE_READ,
+ MM_READWRITE,
+ MM_EXECUTE_READWRITE,
+ MM_READWRITE,
+ MM_EXECUTE_READWRITE };
+
+
+CCHAR
+MiGetImageProtection (
+ IN ULONG SectionCharacteristics
+ );
+
+NTSTATUS
+MiVerifyImageHeader (
+ IN PIMAGE_NT_HEADERS NtHeader,
+ IN PIMAGE_DOS_HEADER DosHeader,
+ IN ULONG NtHeaderSize
+ );
+
+BOOLEAN
+MiCheckDosCalls (
+ IN PIMAGE_OS2_HEADER Os2Header,
+ IN ULONG HeaderSize
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,MiCreateImageFileMap)
+#pragma alloc_text(PAGE,NtCreateSection)
+#pragma alloc_text(PAGE,NtOpenSection)
+#pragma alloc_text(PAGE,MiGetImageProtection)
+#pragma alloc_text(PAGE,MiVerifyImageHeader)
+#pragma alloc_text(PAGE,MiCheckDosCalls)
+#pragma alloc_text(PAGE,MiCreatePagingFileMap)
+#pragma alloc_text(PAGE,MiCreateDataFileMap)
+#endif
+
+#pragma pack (1)
+typedef struct _PHARLAP_CONFIG {
+ UCHAR uchCopyRight[0x32];
+ USHORT usType;
+ USHORT usRsv1;
+ USHORT usRsv2;
+ USHORT usSign;
+} CONFIGPHARLAP, *PCONFIGPHARLAP;
+#pragma pack ()
+
+
+NTSTATUS
+NtCreateSection (
+ OUT PHANDLE SectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
+ IN PLARGE_INTEGER MaximumSize OPTIONAL,
+ IN ULONG SectionPageProtection,
+ IN ULONG AllocationAttributes,
+ IN HANDLE FileHandle OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates a section object and opens a handle to the object
+ with the specified desired access.
+
+Arguments:
+
+ SectionHandle - A pointer to a variable that will
+ receive the section object handle value.
+
+ DesiredAccess - The desired types of access for the section.
+
+ DesiredAccess Flags
+
+ EXECUTE - Execute access to the section is
+ desired.
+
+ READ - Read access to the section is desired.
+
+ WRITE - Write access to the section is desired.
+
+
+ ObjectAttributes - Supplies a pointer to an object attributes structure.
+
+ MaximumSize - Supplies the maximum size of the section in bytes.
+ This value is rounded up to the host page size and
+ specifies the size of the section (page file
+ backed section) or the maximum size to which a
+ file can be extended or mapped (file backed
+ section).
+
+ SectionPageProtection - Supplies the protection to place on each page
+ in the section. One of PAGE_READ, PAGE_READWRITE, PAGE_EXECUTE,
+ or PAGE_WRITECOPY and, optionally, PAGE_NOCACHE may be specified.
+
+ AllocationAttributes - Supplies a set of flags that describe the
+ allocation attributes of the section.
+
+ AllocationAttributes Flags
+
+ SEC_BASED - The section is a based section and will be
+ allocated at the same virtual address in each process
+ address space that receives the section. This does not
+ imply that addresses are reserved for based sections.
+ Rather if the section cannot be mapped at the based address
+ an error is returned.
+
+
+ SEC_RESERVE - All pages of the section are set to the
+ reserved state.
+
+ SEC_COMMIT - All pages of the section are set to the commit
+ state.
+
+ SEC_IMAGE - The file specified by the file handle is an
+ executable image file.
+
+ SEC_FILE - The file specified by the file handle is a mapped
+ file. If a file handle is supplied and neither
+ SEC_IMAGE or SEC_FILE is supplied, SEC_FILE is
+ assumed.
+
+ SEC_NO_CHANGE - Once the file is mapped, the protection cannot
+ be changed nor can the view be unmapped. The
+ view is unmapped when the process is deleted.
+ Cannot be used with SEC_IMAGE.
+
+ FileHandle - Supplies an optional handle of an open file object.
+ If the value of this handle is null, then the
+ section will be backed by a paging file. Otherwise
+ the section is backed by the specified data file.
+
+Return Value:
+
+ Returns the status of the operation.
+
+ TBS
+
+--*/
+
+{
+ NTSTATUS Status;
+ PVOID Section;
+ HANDLE Handle;
+ LARGE_INTEGER LargeSize;
+ LARGE_INTEGER CapturedSize;
+ ULONG RetryCount;
+
+ if ((AllocationAttributes & ~(SEC_COMMIT | SEC_RESERVE | SEC_BASED |
+ SEC_IMAGE | SEC_NOCACHE | SEC_NO_CHANGE)) != 0) {
+ return STATUS_INVALID_PARAMETER_6;
+ }
+
+ if ((AllocationAttributes & (SEC_COMMIT | SEC_RESERVE | SEC_IMAGE)) == 0) {
+ return STATUS_INVALID_PARAMETER_6;
+ }
+
+ if ((AllocationAttributes & SEC_IMAGE) &&
+ (AllocationAttributes & (SEC_COMMIT | SEC_RESERVE | SEC_NOCACHE | SEC_NO_CHANGE))) {
+
+ return STATUS_INVALID_PARAMETER_6;
+ }
+
+ if ((AllocationAttributes & SEC_COMMIT) &&
+ (AllocationAttributes & SEC_RESERVE)) {
+ return STATUS_INVALID_PARAMETER_6;
+ }
+
+ //
+ // Check the SectionProtection Flag.
+ //
+
+ if ((SectionPageProtection & PAGE_NOCACHE) ||
+ (SectionPageProtection & PAGE_GUARD) ||
+ (SectionPageProtection & PAGE_NOACCESS)) {
+
+ //
+ // No cache is only specified through SEC_NOCACHE option in the
+ // allocation attributes.
+ //
+
+ return STATUS_INVALID_PAGE_PROTECTION;
+ }
+
+
+ if (KeGetPreviousMode() != KernelMode) {
+ try {
+ ProbeForWriteHandle(SectionHandle);
+ if (ARGUMENT_PRESENT (MaximumSize)) {
+ LargeSize = *MaximumSize;
+ } else {
+ ZERO_LARGE (LargeSize);
+ }
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ } else {
+ if (ARGUMENT_PRESENT (MaximumSize)) {
+ LargeSize = *MaximumSize;
+ } else {
+ ZERO_LARGE (LargeSize);
+ }
+ }
+
+ RetryCount = 0;
+
+retry:
+
+ CapturedSize = LargeSize;
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+ Status = MmCreateSection ( &Section,
+ DesiredAccess,
+ ObjectAttributes,
+ &CapturedSize,
+ SectionPageProtection,
+ AllocationAttributes,
+ FileHandle,
+ NULL );
+
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+ if (!NT_SUCCESS(Status)) {
+ if ((Status == STATUS_FILE_LOCK_CONFLICT) &&
+ (RetryCount < 3)) {
+
+ //
+ // The file system may have prevented this from working
+ // due to log file flushing. Delay and try again.
+ //
+
+ RetryCount += 1;
+
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &MmHalfSecond);
+
+ goto retry;
+
+ }
+ return Status;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ DbgPrint("inserting section %lx control %lx\n",Section,
+ ((PSECTION)Section)->Segment->ControlArea);
+ }
+#endif
+
+ {
+ PCONTROL_AREA ControlArea;
+ ControlArea = ((PSECTION)Section)->Segment->ControlArea;
+ if ((ControlArea != NULL) && (ControlArea->FilePointer != NULL)) {
+ CcZeroEndOfLastPage (ControlArea->FilePointer);
+ }
+ }
+
+ Status = ObInsertObject (Section,
+ NULL,
+ DesiredAccess,
+ 0,
+ (PVOID *)NULL,
+ &Handle);
+
+ try {
+ *SectionHandle = Handle;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return Status;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess )
+ DbgPrint("return crea sect handle %lx status %lx\n",Handle, Status);
+ }
+#endif
+
+ return Status;
+}
+
+NTSTATUS
+MmCreateSection (
+ OUT PVOID *SectionObject,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG SectionPageProtection,
+ IN ULONG AllocationAttributes,
+ IN HANDLE FileHandle OPTIONAL,
+ IN PFILE_OBJECT FileObject OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates a section object and opens a handle to the object
+ with the specified desired access.
+
+Arguments:
+
+ Section - A pointer to a variable that will
+ receive the section object address.
+
+ DesiredAccess - The desired types of access for the
+ section.
+
+ DesiredAccess Flags
+
+
+ EXECUTE - Execute access to the section is
+ desired.
+
+ READ - Read access to the section is desired.
+
+ WRITE - Write access to the section is desired.
+
+
+ ObjectAttributes - Supplies a pointer to an object attributes structure.
+
+ MaximumSize - Supplies the maximum size of the section in bytes.
+ This value is rounded up to the host page size and
+ specifies the size of the section (page file
+ backed section) or the maximum size to which a
+ file can be extended or mapped (file backed
+ section).
+
+ SectionPageProtection - Supplies the protection to place on each page
+ in the section. One of PAGE_READ, PAGE_READWRITE, PAGE_EXECUTE,
+ or PAGE_WRITECOPY and, optionally, PAGE_NOCACHE may be specified.
+
+ AllocationAttributes - Supplies a set of flags that describe the
+ allocation attributes of the section.
+
+ AllocationAttributes Flags
+
+ SEC_BASED - The section is a based section and will be
+ allocated at the same virtual address in each process
+ address space that receives the section. This does not
+ imply that addresses are reserved for based sections.
+ Rather if the section cannot be mapped at the based address
+ an error is returned.
+
+ SEC_RESERVE - All pages of the section are set to the
+ reserved state.
+
+ SEC_COMMIT - All pages of the section are set to the commit
+ state.
+
+ SEC_IMAGE - The file specified by the file handle is an
+ executable image file.
+
+ SEC_FILE - The file specified by the file handle is a mapped
+ file. If a file handle is supplied and neither
+ SEC_IMAGE or SEC_FILE is supplied, SEC_FILE is
+ assumed.
+
+ FileHandle - Supplies an optional handle of an open file object.
+ If the value of this handle is null, then the
+ section will be backed by a paging file. Otherwise
+ the section is backed by the specified data file.
+
+ FileObject - Supplies an optional pointer to the file object. If this
+ value is NULL and the FileHandle is NULL, then there is
+ no file to map (image or mapped file). If this value
+ is specified, then the File is to be mapped as a MAPPED FILE
+ and NO file size checking will be performed.
+
+ ONLY THE SYSTEM CACHE SHOULD PROVIDE A FILE OBJECT WITH THE CALL!!
+ as this is optimized to not check the size, only do data mapping,
+ no protection check, etc.
+
+ Note - Only one of FileHandle or File should be specified!
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ SECTION Section;
+ PSECTION NewSection;
+ PSEGMENT Segment;
+ PSEGMENT NewSegment;
+ KPROCESSOR_MODE PreviousMode;
+ KIRQL OldIrql;
+ NTSTATUS Status;
+ PCONTROL_AREA ControlArea;
+ PCONTROL_AREA NewControlArea;
+ PCONTROL_AREA SegmentControlArea;
+ ACCESS_MASK FileDesiredAccess;
+ PFILE_OBJECT File;
+ PEVENT_COUNTER Event;
+ ULONG IgnoreFileSizing = FALSE;
+ ULONG ProtectionMask;
+ ULONG ProtectMaskForAccess;
+ ULONG FileAcquired = FALSE;
+ PEVENT_COUNTER SegmentEvent;
+ BOOLEAN FileSizeChecked = FALSE;
+ LARGE_INTEGER TempSectionSize;
+ LARGE_INTEGER EndOfFile;
+ ULONG IncrementedRefCount = FALSE;
+ PFILE_OBJECT ChangeFileReference = NULL;
+#if DBG
+ PVOID PreviousSectionPointer;
+#endif //DBG
+
+ DesiredAccess;
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess ) {
+ DbgPrint("crea sect access mask %lx maxsize %lx page prot %lx\n",
+ DesiredAccess, MaximumSize->LowPart, SectionPageProtection);
+ DbgPrint(" allocation attributes %lx file handle %lx\n",
+ AllocationAttributes, FileHandle);
+ }
+ }
+#endif
+
+ //
+ // Check allocation attributes flags.
+ //
+
+ File = (PFILE_OBJECT)NULL;
+
+ ASSERT ((AllocationAttributes & ~(SEC_COMMIT | SEC_RESERVE | SEC_BASED |
+ SEC_IMAGE | SEC_NOCACHE | SEC_NO_CHANGE)) == 0);
+
+ ASSERT ((AllocationAttributes & (SEC_COMMIT | SEC_RESERVE | SEC_IMAGE)) != 0);
+
+ ASSERT (!((AllocationAttributes & SEC_IMAGE) &&
+ (AllocationAttributes & (SEC_COMMIT | SEC_RESERVE | SEC_NOCACHE | SEC_NO_CHANGE))));
+
+ ASSERT (!((AllocationAttributes & SEC_COMMIT) &&
+ (AllocationAttributes & SEC_RESERVE)));
+
+ ASSERT (!((SectionPageProtection & PAGE_NOCACHE) ||
+ (SectionPageProtection & PAGE_GUARD) ||
+ (SectionPageProtection & PAGE_NOACCESS)));
+
+ if (AllocationAttributes & SEC_NOCACHE) {
+ SectionPageProtection |= PAGE_NOCACHE;
+ }
+
+ //
+ // Check the protection field. This could raise an exception.
+ //
+
+ try {
+ ProtectionMask = MiMakeProtectionMask (SectionPageProtection);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ ProtectMaskForAccess = ProtectionMask & 0x7;
+
+ FileDesiredAccess = MmMakeFileAccess[ProtectMaskForAccess];
+
+ //
+ // Get previous processor mode and probe output arguments if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+
+ Section.InitialPageProtection = SectionPageProtection;
+ Section.Segment = (PSEGMENT)NULL;
+
+ if (ARGUMENT_PRESENT(FileHandle) || ARGUMENT_PRESENT(FileObject)) {
+
+ if (ARGUMENT_PRESENT(FileObject)) {
+ IgnoreFileSizing = TRUE;
+ File = FileObject;
+
+ //
+ // Quick check to see if a control area already exists.
+ //
+
+ if (File->SectionObjectPointer->DataSectionObject) {
+
+ LOCK_PFN (OldIrql);
+ ControlArea =
+ (PCONTROL_AREA)(File->SectionObjectPointer->DataSectionObject);
+
+ if ((ControlArea != NULL) &&
+ (!ControlArea->u.Flags.BeingDeleted) &&
+ (!ControlArea->u.Flags.BeingCreated)) {
+
+ //
+ // Control area exists and is not being deleted,
+ // reference it.
+ //
+
+ NewSegment = ControlArea->Segment;
+ if ((ControlArea->NumberOfSectionReferences == 0) &&
+ (ControlArea->NumberOfMappedViews == 0) &&
+ (ControlArea->ModifiedWriteCount == 0)) {
+
+ //
+ // Dereference the current file object and
+ // reference this one.
+ //
+
+ ChangeFileReference = ControlArea->FilePointer;
+ ControlArea->FilePointer = FileObject;
+
+ //
+ // This dereference is purposely at DPC_LEVEL
+ // so the object manager queues it to another
+ // thread thereby eliminating deadlocks with
+ // the redirector.
+ //
+
+ ObDereferenceObject (ChangeFileReference);
+ }
+ ControlArea->NumberOfSectionReferences += 1;
+ IncrementedRefCount = TRUE;
+ UNLOCK_PFN (OldIrql);
+ Section.SizeOfSection = *MaximumSize;
+
+ goto ReferenceObject;
+ }
+ UNLOCK_PFN (OldIrql);
+ }
+
+ ObReferenceObject (FileObject);
+
+ } else {
+
+ //
+ // Only one of FileHandle or FileObject should be supplied
+ // if a FileObject is supplied, this must be from the
+ // file system and therefore the file's size should not
+ // be checked.
+ //
+
+ Status = ObReferenceObjectByHandle ( FileHandle,
+ FileDesiredAccess,
+ IoFileObjectType,
+ PreviousMode,
+ (PVOID *)&File,
+ NULL );
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // If this file doesn't have a section object pointer,
+ // return an error.
+ //
+
+ if (File->SectionObjectPointer == NULL) {
+ ObDereferenceObject (File);
+ return STATUS_INVALID_FILE_FOR_SECTION;
+ }
+ }
+
+ //
+ // Check to see if the specified file already has a section.
+ // If not, indicate in the file object's pointer to an FCB that
+ // a section is being built. This synchronizes segment creation
+ // for the file.
+ //
+
+ NewControlArea = ExAllocatePoolWithTag (NonPagedPool,
+ (ULONG)sizeof(CONTROL_AREA) +
+ (ULONG)sizeof(SUBSECTION),
+ MMCONTROL);
+
+ if (NewControlArea == NULL) {
+ ObDereferenceObject (File);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ RtlZeroMemory (NewControlArea,
+ sizeof(CONTROL_AREA) + sizeof(SUBSECTION));
+ NewSegment = (PSEGMENT)NULL;
+
+ //
+ // We only need the file resource if the was a user request, i.e. not
+ // a call from the cache manager or file system.
+ //
+
+ if (ARGUMENT_PRESENT(FileHandle)) {
+
+ FsRtlAcquireFileExclusive (File);
+ IoSetTopLevelIrp((PIRP)FSRTL_FSP_TOP_LEVEL_IRP);
+ FileAcquired = TRUE;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Allocate an event to wait on in case the segment is in the
+ // process of being deleted. This event cannot be allocated
+ // with the PFN database locked as pool expansion would deadlock.
+ //
+
+ SegmentEvent = MiGetEventCounter();
+
+RecheckSegment:
+
+ if (AllocationAttributes & SEC_IMAGE) {
+ ControlArea =
+ (PCONTROL_AREA)(File->SectionObjectPointer->ImageSectionObject);
+
+ } else {
+ ControlArea =
+ (PCONTROL_AREA)(File->SectionObjectPointer->DataSectionObject);
+ }
+
+ if (ControlArea != NULL) {
+
+ //
+ // A segment already exists for this file. Make sure that it
+ // is not in the process of being deleted, or being created.
+ //
+
+
+ if ((ControlArea->u.Flags.BeingDeleted) ||
+ (ControlArea->u.Flags.BeingCreated)) {
+
+ //
+ // The segment object is in the process of being deleted or
+ // created.
+ // Check to see if another thread is waiting for the deletion,
+ // otherwise create and event object to wait upon.
+ //
+
+ if (ControlArea->WaitingForDeletion == NULL) {
+
+ //
+ // Initialize an event a put it's address in the control area.
+ //
+
+ ControlArea->WaitingForDeletion = SegmentEvent;
+ Event = SegmentEvent;
+ SegmentEvent = NULL;
+ } else {
+ Event = ControlArea->WaitingForDeletion;
+ Event->RefCount += 1;
+ }
+
+ //
+ // Release the pfn lock, the file lock, and wait for the event.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ if (FileAcquired) {
+ IoSetTopLevelIrp((PIRP)NULL);
+ FsRtlReleaseFile (File);
+ }
+
+ KeWaitForSingleObject(&Event->Event,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ if (FileAcquired) {
+ FsRtlAcquireFileExclusive (File);
+ IoSetTopLevelIrp((PIRP)FSRTL_FSP_TOP_LEVEL_IRP);
+ }
+
+ LOCK_PFN (OldIrql);
+ MiFreeEventCounter (Event, TRUE);
+
+ if (SegmentEvent == NULL) {
+
+ //
+ // The event was freed from pool, allocate another
+ // event in case we have to synchronize one more time.
+ //
+
+ SegmentEvent = MiGetEventCounter();
+ }
+ goto RecheckSegment;
+
+ } else {
+
+ //
+ // There is already a segment for this file, have
+ // this section refer to that segment.
+ // No need to reference the file object any more.
+ //
+
+ NewSegment = ControlArea->Segment;
+ ControlArea->NumberOfSectionReferences += 1;
+ IncrementedRefCount = TRUE;
+
+ //
+ // If this reference was not from the cache manager
+ // up the count of user references.
+ //
+
+ if (IgnoreFileSizing == FALSE) {
+ ControlArea->NumberOfUserReferences += 1;
+ }
+ }
+ } else {
+
+ //
+ // There is no segment associated with this file object.
+ // Set the file object to refer to the new control area.
+ //
+
+ ControlArea = NewControlArea;
+ ControlArea->u.Flags.BeingCreated = 1;
+
+ if (AllocationAttributes & SEC_IMAGE) {
+ ((PCONTROL_AREA)((File->SectionObjectPointer->ImageSectionObject))) =
+ ControlArea;
+ } else {
+#if DBG
+ PreviousSectionPointer = File->SectionObjectPointer;
+#endif //DBG
+ ((PCONTROL_AREA)((File->SectionObjectPointer->DataSectionObject))) =
+ ControlArea;
+ }
+ }
+
+ if (SegmentEvent != NULL) {
+ MiFreeEventCounter (SegmentEvent, TRUE);
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ if (NewSegment != (PSEGMENT)NULL) {
+
+ //
+ // Whether we created a segment or not, lets flush the data section
+ // if there is one.
+ //
+
+ if ((AllocationAttributes & SEC_IMAGE) &&
+ (File->SectionObjectPointer->DataSectionObject)) {
+
+ IO_STATUS_BLOCK IoStatus;
+
+ if (((PCONTROL_AREA)((File->SectionObjectPointer->DataSectionObject)))->NumberOfSystemCacheViews) {
+ CcFlushCache (File->SectionObjectPointer,
+ NULL,
+ 0,
+ &IoStatus);
+
+ } else {
+ MmFlushSection (File->SectionObjectPointer,
+ NULL,
+ 0,
+ &IoStatus,
+ TRUE);
+ }
+ }
+
+ //
+ // A segment already exists for this file object.
+ // Deallocate the new control area as it is no longer required
+ // and dereference the file object.
+ //
+
+ ExFreePool (NewControlArea);
+ ObDereferenceObject (File);
+
+ //
+ // The section is in paged pool, this can't be set until
+ // the PFN mutex has been released.
+ //
+
+ if ((!IgnoreFileSizing) && (ControlArea->u.Flags.Image == 0)) {
+
+ //
+ // The file size in the segment may not match the current
+ // file size, query the file system and get the file
+ // size.
+ //
+
+ Status = FsRtlGetFileSize (File, &EndOfFile );
+
+ if (!NT_SUCCESS (Status)) {
+ goto UnrefAndReturn;
+ }
+
+ if ((EndOfFile.QuadPart== 0) &&
+ (MaximumSize->QuadPart == 0)) {
+
+ //
+ // Can't map a zero length without specifying the maximum
+ // size as non-zero.
+ //
+
+ Status = STATUS_MAPPED_FILE_SIZE_ZERO;
+ goto UnrefAndReturn;
+ }
+ } else {
+
+ //
+ // The size is okay in the segment.
+ //
+
+ EndOfFile = NewSegment->SizeOfSegment;
+ }
+
+ if (MaximumSize->QuadPart == 0) {
+
+ Section.SizeOfSection = EndOfFile;
+ FileSizeChecked = TRUE;
+
+ } else if (EndOfFile.QuadPart >= MaximumSize->QuadPart) {
+
+ //
+ // EndOfFile is greater than the MaximumSize,
+ // use the specified maximum size.
+ //
+
+ Section.SizeOfSection = *MaximumSize;
+ FileSizeChecked = TRUE;
+
+ } else {
+
+ //
+ // Need to extend the section, make sure the file was
+ // opened for write access.
+ //
+
+ if (((SectionPageProtection & PAGE_READWRITE) |
+ (SectionPageProtection & PAGE_EXECUTE_READWRITE)) == 0) {
+
+ Status = STATUS_SECTION_TOO_BIG;
+ goto UnrefAndReturn;
+ }
+ Section.SizeOfSection = *MaximumSize;
+ }
+
+ } else {
+
+ //
+ // The file does not have an associated segment, create a segment
+ // object.
+ //
+
+ if (AllocationAttributes & SEC_IMAGE) {
+
+ Status = MiCreateImageFileMap (File,
+ &Segment);
+
+ } else {
+
+ Status = MiCreateDataFileMap (File,
+ &Segment,
+ MaximumSize,
+ SectionPageProtection,
+ AllocationAttributes,
+ IgnoreFileSizing );
+ ASSERT (PreviousSectionPointer == File->SectionObjectPointer);
+ }
+
+ if (!NT_SUCCESS(Status)) {
+
+ //
+ // Lock the PFN database and check to see if another thread has
+ // tried to create a segment to the file object at the same
+ // time.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ Event = ControlArea->WaitingForDeletion;
+ ControlArea->WaitingForDeletion = NULL;
+ ASSERT (ControlArea->u.Flags.FilePointerNull == 0);
+ ControlArea->u.Flags.FilePointerNull = 1;
+
+ if (AllocationAttributes & SEC_IMAGE) {
+ (PCONTROL_AREA)((File->SectionObjectPointer->ImageSectionObject)) =
+ NULL;
+ } else {
+ (PCONTROL_AREA)((File->SectionObjectPointer->DataSectionObject)) =
+ NULL;
+ }
+ ControlArea->u.Flags.BeingCreated = 0;
+
+ UNLOCK_PFN (OldIrql);
+
+ if (FileAcquired) {
+ IoSetTopLevelIrp((PIRP)NULL);
+ FsRtlReleaseFile (File);
+ }
+
+ ExFreePool (NewControlArea);
+
+ ObDereferenceObject (File);
+
+ if (Event != NULL) {
+
+ //
+ // Signal any waiters that the segment structure exists.
+ //
+
+ KeSetEvent (&Event->Event, 0, FALSE);
+ }
+
+ return Status;
+ }
+
+ //
+ // If the size was specified as zero, set the section size
+ // from the created segment size. This solves problems with
+ // race conditions when multiple sections
+ // are created for the same mapped file with varying sizes.
+ //
+
+ if (MaximumSize->QuadPart == 0) {
+ Section.SizeOfSection = Segment->SizeOfSegment;
+ } else {
+ Section.SizeOfSection = *MaximumSize;
+ }
+ }
+
+ } else {
+
+ //
+ // No file handle exists, this is a page file backed section.
+ //
+
+ if (AllocationAttributes & SEC_IMAGE) {
+ return STATUS_INVALID_FILE_FOR_SECTION;
+ }
+
+ Status = MiCreatePagingFileMap ( &NewSegment,
+ MaximumSize,
+ ProtectionMask,
+ AllocationAttributes);
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // Set the section size from the created segment size. This
+ // solves problems with race conditions when multiple sections
+ // are created for the same mapped file with varying sizes.
+ //
+
+ Section.SizeOfSection = NewSegment->SizeOfSegment;
+ ControlArea = NewSegment->ControlArea;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ if (NewSegment == (PSEGMENT)NULL) {
+ DbgPrint("inserting segment %lx control %lx\n",Segment,
+ Segment->ControlArea);
+ } else {
+ DbgPrint("inserting segment %lx control %lx\n",NewSegment,
+ NewSegment->ControlArea);
+ }
+ }
+#endif
+
+
+ if (NewSegment == (PSEGMENT)NULL) {
+ NewSegment = Segment;
+
+ //
+ // Lock the PFN database and check to see if another thread has
+ // tried to create a segment to the file object at the same time.
+ //
+
+ SegmentControlArea = Segment->ControlArea;
+
+ ASSERT (File != NULL);
+
+ LOCK_PFN (OldIrql);
+
+ Event = ControlArea->WaitingForDeletion;
+ ControlArea->WaitingForDeletion = NULL;
+
+ if (AllocationAttributes & SEC_IMAGE) {
+
+ //
+ // Change the control area in the file object pointer.
+ //
+
+ ((PCONTROL_AREA)(File->SectionObjectPointer->ImageSectionObject)) =
+ SegmentControlArea;
+
+ ControlArea = SegmentControlArea;
+ }
+
+ ControlArea->u.Flags.BeingCreated = 0;
+
+ UNLOCK_PFN (OldIrql);
+
+ if (AllocationAttributes & SEC_IMAGE) {
+
+ //
+ // Deallocate the pool used for the original control area.
+ //
+
+ ExFreePool (NewControlArea);
+ }
+
+ if (Event != NULL) {
+
+ //
+ // Signal any waiters that the segment structure exists.
+ //
+
+ KeSetEvent (&Event->Event, 0, FALSE);
+ }
+ }
+
+ //
+ // Being created has now been cleared allowing other threads
+ // to reference the segment. Release the resource on the file.
+ //
+
+ if (FileAcquired) {
+ IoSetTopLevelIrp((PIRP)NULL);
+ FsRtlReleaseFile (File);
+ FileAcquired = FALSE;
+ }
+
+ReferenceObject:
+
+ if (ChangeFileReference) {
+ ObReferenceObject (FileObject);
+ }
+
+ //
+ // Now that the segment object is created, make the section object
+ // refer to the segment object.
+ //
+
+ Section.Segment = NewSegment;
+ Section.u.LongFlags = ControlArea->u.LongFlags;
+
+ //
+ // Create the section object now. The section object is created
+ // now so that the error handling when the section object cannot
+ // be created is simplified.
+ //
+
+ Status = ObCreateObject (PreviousMode,
+ MmSectionObjectType,
+ ObjectAttributes,
+ PreviousMode,
+ NULL,
+ sizeof(SECTION),
+ sizeof(SECTION) +
+ NewSegment->TotalNumberOfPtes * sizeof(MMPTE),
+ sizeof(CONTROL_AREA) +
+ NewSegment->ControlArea->NumberOfSubsections *
+ sizeof(SUBSECTION),
+ (PVOID *)&NewSection);
+
+ if (!NT_SUCCESS(Status)) {
+ goto UnrefAndReturn;
+ }
+
+ RtlMoveMemory (NewSection, &Section, sizeof(SECTION));
+ NewSection->Address.StartingVa = NULL;
+
+ if (!IgnoreFileSizing) {
+
+ //
+ // Indicate that the cache manager is not the owner of this
+ // section.
+ //
+
+ NewSection->u.Flags.UserReference = 1;
+
+ if (AllocationAttributes & SEC_NO_CHANGE) {
+
+ //
+ // Indicate that once the section is mapped, no protection
+ // changes or freeing the mapping is allowed.
+ //
+
+ NewSection->u.Flags.NoChange = 1;
+ }
+
+ if (((SectionPageProtection & PAGE_READWRITE) |
+ (SectionPageProtection & PAGE_EXECUTE_READWRITE)) == 0) {
+
+ //
+ // This section does not support WRITE access, indicate
+ // that changing the protection to WRITE results in COPY_ON_WRITE.
+ //
+
+ NewSection->u.Flags.CopyOnWrite = 1;
+ }
+
+ if (AllocationAttributes & SEC_BASED) {
+
+ NewSection->u.Flags.Based = 1;
+
+ //
+ // Get the allocation base mutex.
+ //
+
+ ExAcquireFastMutex (&MmSectionBasedMutex);
+
+ //
+ // This section is based at a unique address system wide.
+ //
+
+ try {
+ NewSection->Address.StartingVa = (PVOID)MiFindEmptySectionBaseDown (
+ NewSection->SizeOfSection.LowPart,
+ MmHighSectionBase);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ ExReleaseFastMutex (&MmSectionBasedMutex);
+ ObDereferenceObject (NewSection);
+ return Status;
+ }
+
+ NewSection->Address.EndingVa =
+ (PVOID)((ULONG)NewSection->Address.StartingVa +
+ NewSection->SizeOfSection.LowPart - 1);
+
+ MiInsertBasedSection (NewSection);
+ ExReleaseFastMutex (&MmSectionBasedMutex);
+ }
+ }
+
+ //
+ // If the cache manager is creating the section, set the was
+ // purged flag as the file size can change.
+ //
+
+ ControlArea->u.Flags.WasPurged |= IgnoreFileSizing;
+
+ //
+ // Check to see if the section is for a data file and the size
+ // of the section is greater than the current size of the
+ // segment.
+ //
+
+ if (((ControlArea->u.Flags.WasPurged == 1) && (!IgnoreFileSizing)) &&
+ (!FileSizeChecked)
+ ||
+ (NewSection->SizeOfSection.QuadPart >
+ NewSection->Segment->SizeOfSegment.QuadPart)) {
+
+ TempSectionSize = NewSection->SizeOfSection;
+
+ NewSection->SizeOfSection = NewSection->Segment->SizeOfSegment;
+
+ Status = MmExtendSection (NewSection,
+ &TempSectionSize,
+ IgnoreFileSizing);
+
+ if (!NT_SUCCESS(Status)) {
+ ObDereferenceObject (NewSection);
+ return Status;
+ }
+ }
+
+ *SectionObject = (PVOID)NewSection;
+
+ return Status;
+
+UnrefAndReturn:
+
+ //
+ // Unreference the control area, if it was referenced and return
+ // the error status.
+ //
+
+ if (FileAcquired) {
+ IoSetTopLevelIrp((PIRP)NULL);
+ FsRtlReleaseFile (File);
+ }
+
+ if (IncrementedRefCount) {
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfSectionReferences -= 1;
+ if (!IgnoreFileSizing) {
+ ASSERT ((LONG)ControlArea->NumberOfUserReferences > 0);
+ ControlArea->NumberOfUserReferences -= 1;
+ }
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+ }
+ return Status;
+}
+
+NTSTATUS
+MiCreateImageFileMap (
+ IN PFILE_OBJECT File,
+ OUT PSEGMENT *Segment
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the necessary strutures to allow the mapping
+ of an image file.
+
+ The image file is opened and verified for correctness, a segment
+ object is created and initialized based on data in the image
+ header.
+
+Arguments:
+
+ File - Supplies the file object for the image file.
+
+ Segment - Returns the segment object.
+
+Return Value:
+
+ Returns the status value.
+
+ TBS
+
+
+--*/
+
+{
+ NTSTATUS Status;
+ ULONG NumberOfPtes;
+ ULONG SizeOfSegment;
+ ULONG SectionVirtualSize;
+ ULONG i;
+ PCONTROL_AREA ControlArea;
+ PSUBSECTION Subsection;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ MMPTE TempPteDemandZero;
+ PVOID Base;
+ PIMAGE_DOS_HEADER DosHeader;
+ PIMAGE_NT_HEADERS NtHeader;
+ PIMAGE_SECTION_HEADER SectionTableEntry;
+ PSEGMENT NewSegment;
+ ULONG SectorOffset;
+ ULONG NumberOfSubsections;
+ ULONG PageFrameNumber;
+ LARGE_INTEGER StartingOffset;
+ PCHAR ExtendedHeader = NULL;
+ PULONG Page;
+ ULONG PreferredImageBase;
+ ULONG NextVa;
+ PKEVENT InPageEvent;
+ PMDL Mdl;
+ ULONG ImageFileSize;
+ ULONG OffsetToSectionTable;
+ ULONG ImageAlignment;
+ ULONG FileAlignment;
+ BOOLEAN ImageCommit;
+ BOOLEAN SectionCommit;
+ IO_STATUS_BLOCK IoStatus;
+ LARGE_INTEGER EndOfFile;
+ ULONG NtHeaderSize;
+
+#if defined (_ALPHA_)
+ BOOLEAN InvalidAlignmentAllowed = FALSE;
+#endif
+
+
+ // *************************************************************
+ // Create image file file section.
+ // *************************************************************
+
+ PAGED_CODE();
+
+ Status = FsRtlGetFileSize (File, &EndOfFile );
+
+ if (Status == STATUS_FILE_IS_A_DIRECTORY) {
+
+ //
+ // Can't map a directory as a section. Return error.
+ //
+
+ return STATUS_INVALID_FILE_FOR_SECTION;
+ }
+
+ if (!NT_SUCCESS (Status)) {
+ return Status;
+ }
+
+ if (EndOfFile.HighPart != 0) {
+
+ //
+ // File too big. Return error.
+ //
+
+ return STATUS_INVALID_FILE_FOR_SECTION;
+ }
+
+ //
+ // Create a segment which maps an image file.
+ // For now map a COFF image file with the subsections
+ // containing the based address of the file.
+ //
+
+ //
+ // Read in the file header.
+ //
+
+ InPageEvent = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(KEVENT) + MmSizeOfMdl (
+ NULL,
+ MM_MAXIMUM_IMAGE_HEADER),
+ MMTEMPORARY);
+ if (InPageEvent == NULL) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ Mdl = (PMDL)(InPageEvent + 1);
+
+ //
+ // Create an event for the read operation.
+ //
+
+ KeInitializeEvent (InPageEvent, NotificationEvent, FALSE);
+
+ //
+ // Build an MDL for the operation.
+ //
+
+ (VOID) MmCreateMdl( Mdl, NULL, PAGE_SIZE);
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+
+ PageFrameNumber = MiGetPageForHeader();
+
+ Page = (PULONG)(Mdl + 1);
+ *Page = PageFrameNumber;
+
+ ZERO_LARGE (StartingOffset);
+
+ CcZeroEndOfLastPage (File);
+
+ //
+ // Lets flush the data section if there is one.
+ //
+
+ if (File->SectionObjectPointer->DataSectionObject) {
+ IO_STATUS_BLOCK IoStatus;
+ if (((PCONTROL_AREA)((File->SectionObjectPointer->DataSectionObject)))->NumberOfSystemCacheViews) {
+ CcFlushCache (File->SectionObjectPointer,
+ NULL,
+ 0,
+ &IoStatus);
+
+ } else {
+ MmFlushSection (File->SectionObjectPointer,
+ NULL,
+ 0,
+ &IoStatus,
+ TRUE);
+ }
+ }
+
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+ Status = IoPageRead (File,
+ Mdl,
+ &StartingOffset,
+ InPageEvent,
+ &IoStatus
+ );
+
+ if (Status == STATUS_PENDING) {
+ KeWaitForSingleObject( InPageEvent,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ if ((!NT_SUCCESS(Status)) || (!NT_SUCCESS(IoStatus.Status))) {
+ if (Status != STATUS_FILE_LOCK_CONFLICT) {
+ Status = STATUS_INVALID_FILE_FOR_SECTION;
+ }
+ goto BadSection;
+ }
+
+ Base = MiMapImageHeaderInHyperSpace (PageFrameNumber);
+ DosHeader = (PIMAGE_DOS_HEADER)Base;
+
+ if (IoStatus.Information != PAGE_SIZE) {
+
+ //
+ // A full page was not read from the file, zero any remaining
+ // bytes.
+ //
+
+ RtlZeroMemory ((PVOID)((ULONG)Base + IoStatus.Information),
+ PAGE_SIZE - IoStatus.Information);
+ }
+
+ //
+ // Check to determine if this is an NT image (PE format) or
+ // a DOS image, Win-16 image, or OS/2 image. If the image is
+ // not NT format, return an error indicating which image it
+ // appears to be.
+ //
+
+ if (DosHeader->e_magic != IMAGE_DOS_SIGNATURE) {
+
+ Status = STATUS_INVALID_IMAGE_NOT_MZ;
+ goto NeImage;
+ }
+
+#ifndef i386
+ if (((ULONG)DosHeader->e_lfanew & 3) != 0) {
+
+ //
+ // The image header is not aligned on a longword boundary.
+ // Report this as an invalid protect mode image.
+ //
+
+ Status = STATUS_INVALID_IMAGE_PROTECT;
+ goto NeImage;
+ }
+#endif
+
+ if ((ULONG)DosHeader->e_lfanew > EndOfFile.LowPart) {
+ Status = STATUS_INVALID_IMAGE_PROTECT;
+ goto NeImage;
+ }
+
+ if (((ULONG)DosHeader->e_lfanew +
+ sizeof(IMAGE_NT_HEADERS) +
+ (16 * sizeof(IMAGE_SECTION_HEADER))) > PAGE_SIZE) {
+
+ //
+ // The PE header is not within the page already read or the
+ // objects are in another page.
+ // Build another MDL and read an additional 8k.
+ //
+
+ ExtendedHeader = ExAllocatePoolWithTag (NonPagedPool,
+ MM_MAXIMUM_IMAGE_HEADER,
+ MMTEMPORARY);
+ if (ExtendedHeader == NULL) {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto NeImage;
+ }
+
+ //
+ // Build an MDL for the operation.
+ //
+
+ (VOID) MmCreateMdl( Mdl, ExtendedHeader, MM_MAXIMUM_IMAGE_HEADER);
+
+ MmBuildMdlForNonPagedPool (Mdl);
+
+ StartingOffset.LowPart = (ULONG)PAGE_ALIGN ((ULONG)DosHeader->e_lfanew);
+
+ KeClearEvent (InPageEvent);
+ Status = IoPageRead (File,
+ Mdl,
+ &StartingOffset,
+ InPageEvent,
+ &IoStatus
+ );
+
+ if (Status == STATUS_PENDING) {
+ KeWaitForSingleObject( InPageEvent,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ if ((!NT_SUCCESS(Status)) || (!NT_SUCCESS(IoStatus.Status))) {
+ if (Status != STATUS_FILE_LOCK_CONFLICT) {
+ Status = STATUS_INVALID_FILE_FOR_SECTION;
+ }
+ goto NeImage;
+ }
+ NtHeader = (PIMAGE_NT_HEADERS)((ULONG)ExtendedHeader +
+ BYTE_OFFSET((ULONG)DosHeader->e_lfanew));
+ NtHeaderSize = MM_MAXIMUM_IMAGE_HEADER -
+ (ULONG)(BYTE_OFFSET((ULONG)DosHeader->e_lfanew));
+
+ } else {
+ NtHeader = (PIMAGE_NT_HEADERS)((ULONG)DosHeader +
+ (ULONG)DosHeader->e_lfanew);
+ NtHeaderSize = PAGE_SIZE - (ULONG)DosHeader->e_lfanew;
+ }
+
+ //
+ // Check to see if this is an NT image or a DOS or OS/2 image.
+ //
+
+ Status = MiVerifyImageHeader (NtHeader, DosHeader, NtHeaderSize);
+ if (Status != STATUS_SUCCESS) {
+ goto NeImage;
+ }
+
+ ImageAlignment = NtHeader->OptionalHeader.SectionAlignment;
+ FileAlignment = NtHeader->OptionalHeader.FileAlignment - 1;
+ NumberOfSubsections = NtHeader->FileHeader.NumberOfSections;
+
+ if (ImageAlignment < PAGE_SIZE) {
+
+ //
+ // The image alignment is less than the page size,
+ // map the image with a single subsection.
+ //
+
+ ControlArea = ExAllocatePoolWithTag (NonPagedPool,
+ (ULONG)(sizeof(CONTROL_AREA) + (sizeof(SUBSECTION))),
+ MMCONTROL);
+ } else {
+
+ //
+ // Allocate a control area and a subsection for each section
+ // header plus one for the image header which has no section.
+ //
+
+ ControlArea = ExAllocatePoolWithTag(NonPagedPool,
+ (ULONG)(sizeof(CONTROL_AREA) +
+ (sizeof(SUBSECTION) *
+ (NumberOfSubsections + 1))),
+ 'iCmM');
+ }
+
+ if (ControlArea == NULL) {
+
+ //
+ // The requested pool could not be allocated.
+ //
+
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto NeImage;
+ }
+
+ //
+ // Zero the control area and the FIRST subsection.
+ //
+
+ RtlZeroMemory (ControlArea,
+ sizeof(CONTROL_AREA) + sizeof(SUBSECTION));
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ NumberOfPtes = BYTES_TO_PAGES (NtHeader->OptionalHeader.SizeOfImage);
+
+ SizeOfSegment = sizeof(SEGMENT) + sizeof(MMPTE) * (NumberOfPtes - 1);
+
+ NewSegment = ExAllocatePoolWithTag (PagedPool,
+ SizeOfSegment,
+ MMSECT);
+
+ if (NewSegment == NULL) {
+
+ //
+ // The requested pool could not be allocated.
+ //
+
+ ExFreePool (ControlArea);
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto NeImage;
+ }
+ *Segment = NewSegment;
+ RtlZeroMemory (NewSegment, sizeof(SEGMENT));
+
+ //
+ // Align the prototype PTEs on the proper boundary.
+ //
+
+ PointerPte = &NewSegment->ThePtes[0];
+ i = ((ULONG)PointerPte >> PTE_SHIFT) &
+ ((MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - 1);
+
+ if (i != 0) {
+ i = (MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - i;
+ }
+
+ NewSegment->PrototypePte = &NewSegment->ThePtes[i];
+
+ NewSegment->ControlArea = ControlArea;
+ NewSegment->TotalNumberOfPtes = NumberOfPtes;
+ NewSegment->NonExtendedPtes = NumberOfPtes;
+ NewSegment->SizeOfSegment.LowPart = NumberOfPtes * PAGE_SIZE;
+
+ NewSegment->ImageInformation.TransferAddress =
+ (PVOID)(NtHeader->OptionalHeader.ImageBase +
+ NtHeader->OptionalHeader.AddressOfEntryPoint);
+ NewSegment->ImageInformation.MaximumStackSize =
+ NtHeader->OptionalHeader.SizeOfStackReserve;
+ NewSegment->ImageInformation.CommittedStackSize =
+ NtHeader->OptionalHeader.SizeOfStackCommit;
+ NewSegment->ImageInformation.SubSystemType =
+ NtHeader->OptionalHeader.Subsystem;
+ NewSegment->ImageInformation.SubSystemMajorVersion = (USHORT)(NtHeader->OptionalHeader.MajorSubsystemVersion);
+ NewSegment->ImageInformation.SubSystemMinorVersion = (USHORT)(NtHeader->OptionalHeader.MinorSubsystemVersion);
+ NewSegment->ImageInformation.ImageCharacteristics =
+ NtHeader->FileHeader.Characteristics;
+ NewSegment->ImageInformation.DllCharacteristics =
+ NtHeader->OptionalHeader.DllCharacteristics;
+ NewSegment->ImageInformation.Machine =
+ NtHeader->FileHeader.Machine;
+ NewSegment->ImageInformation.ImageContainsCode =
+ (BOOLEAN)(NtHeader->OptionalHeader.SizeOfCode != 0);
+ NewSegment->ImageInformation.Spare1 = FALSE;
+ NewSegment->ImageInformation.LoaderFlags = 0;
+ NewSegment->ImageInformation.Reserved[0] = 0;
+ NewSegment->ImageInformation.Reserved[1] = 0;
+
+ ControlArea->Segment = NewSegment;
+ ControlArea->NumberOfSectionReferences = 1;
+ ControlArea->NumberOfUserReferences = 1;
+ ControlArea->u.Flags.BeingCreated = 1;
+
+ if (ImageAlignment < PAGE_SIZE) {
+
+ //
+ // Image alignment is less than a page, the number
+ // of subsections is 1.
+ //
+
+ ControlArea->NumberOfSubsections = 1;
+ } else {
+ ControlArea->NumberOfSubsections = (USHORT)NumberOfSubsections;
+ }
+
+ ControlArea->u.Flags.Image = 1;
+ ControlArea->u.Flags.File = 1;
+
+ if ((FILE_FLOPPY_DISKETTE & File->DeviceObject->Characteristics) ||
+ ((NtHeader->FileHeader.Characteristics &
+ IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP) &&
+ (FILE_REMOVABLE_MEDIA & File->DeviceObject->Characteristics)) ||
+ ((NtHeader->FileHeader.Characteristics &
+ IMAGE_FILE_NET_RUN_FROM_SWAP) &&
+ (FILE_REMOTE_DEVICE & File->DeviceObject->Characteristics))) {
+
+ //
+ // This file resides on a floppy disk or a removable media or
+ // network with with flags set indicating it should be copied
+ // to the paging file.
+ //
+
+ ControlArea->u.Flags.FloppyMedia = 1;
+ }
+
+ if (FILE_REMOTE_DEVICE & File->DeviceObject->Characteristics) {
+
+ //
+ // This file resides on a redirected drive.
+ //
+
+ ControlArea->u.Flags.Networked = 1;
+ }
+
+ ControlArea->FilePointer = File;
+
+ //
+ // Build the subsection and prototype PTEs for the image header.
+ //
+
+ Subsection->ControlArea = ControlArea;
+ NextVa = NtHeader->OptionalHeader.ImageBase;
+
+ if ((NextVa & (X64K - 1)) != 0) {
+
+ //
+ // Image header is not aligned on a 64k boundary.
+ //
+
+ goto BadPeImageSegment;
+ }
+
+ NewSegment->BasedAddress = (PVOID)NextVa;
+ Subsection->PtesInSubsection = MI_ROUND_TO_SIZE (
+ NtHeader->OptionalHeader.SizeOfHeaders,
+ ImageAlignment
+ ) >> PAGE_SHIFT;
+
+ PointerPte = NewSegment->PrototypePte;
+ Subsection->SubsectionBase = PointerPte;
+
+ TempPte.u.Long = (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ TempPte.u.Soft.Prototype = 1;
+
+ NewSegment->SegmentPteTemplate = TempPte;
+ SectorOffset = 0;
+
+ if (ImageAlignment < PAGE_SIZE) {
+
+ //
+ // Aligned on less than a page size boundary.
+ // Build a single subsection to refer to the image.
+ //
+
+ PointerPte = NewSegment->PrototypePte;
+
+ Subsection->PtesInSubsection = NumberOfPtes;
+ Subsection->EndingSector =
+ (ULONG)(EndOfFile.QuadPart >> MMSECTOR_SHIFT);
+ Subsection->u.SubsectionFlags.SectorEndOffset =
+ EndOfFile.LowPart & MMSECTOR_MASK;
+ Subsection->u.SubsectionFlags.Protection = MM_EXECUTE_WRITECOPY;
+
+ //
+ // Set all the PTEs to the execute-read-write protection.
+ // The section will control access to these and the segment
+ // must provide a method to allow other users to map the file
+ // for various protections.
+ //
+
+ TempPte.u.Soft.Protection = MM_EXECUTE_WRITECOPY;
+
+ NewSegment->SegmentPteTemplate = TempPte;
+
+
+#if defined (_ALPHA_)
+ //
+ // Invalid image alignments are supported for cross platform
+ // emulation. Only alpha requires extra handling because page
+ // size (8k) is larger than other platforms (4k).
+ //
+
+
+ if (KeGetPreviousMode() != KernelMode &&
+ (NtHeader->FileHeader.Machine < USER_SHARED_DATA->ImageNumberLow ||
+ NtHeader->FileHeader.Machine > USER_SHARED_DATA->ImageNumberHigh))
+ {
+
+ InvalidAlignmentAllowed = TRUE;
+
+ TempPteDemandZero.u.Long = 0;
+ TempPteDemandZero.u.Soft.Protection = MM_EXECUTE_WRITECOPY;
+ SectorOffset = 0;
+
+ for (i = 0; i < NumberOfPtes; i++) {
+
+ //
+ // Set prototype PTEs.
+ //
+
+ if (SectorOffset < EndOfFile.LowPart) {
+
+ //
+ // Data resides on the disk, refer to the control section.
+ //
+
+ *PointerPte = TempPte;
+
+ } else {
+
+ //
+ // Data does not reside on the disk, use Demand zero pages.
+ //
+
+ *PointerPte = TempPteDemandZero;
+ }
+
+ SectorOffset += PAGE_SIZE;
+ PointerPte += 1;
+ }
+
+ } else
+#endif
+ {
+
+ for (i = 0; i < NumberOfPtes; i++) {
+
+ //
+ // Set all the prototype PTEs to refer to the control section.
+ //
+
+ *PointerPte = TempPte;
+ PointerPte += 1;
+ }
+ }
+
+ NewSegment->ImageCommitment = NumberOfPtes;
+
+
+ //
+ // Indicate alignment is less than a page.
+ //
+
+ TempPte.u.Long = 0;
+
+ } else {
+
+ //
+ // Aligmment is PAGE_SIZE of greater.
+ //
+
+ if (Subsection->PtesInSubsection > NumberOfPtes) {
+
+ //
+ // Inconsistent image, size does not agree with header.
+ //
+
+ goto BadPeImageSegment;
+ }
+ NumberOfPtes -= Subsection->PtesInSubsection;
+
+ Subsection->EndingSector =
+ NtHeader->OptionalHeader.SizeOfHeaders >> MMSECTOR_SHIFT;
+ Subsection->u.SubsectionFlags.SectorEndOffset =
+ NtHeader->OptionalHeader.SizeOfHeaders & MMSECTOR_MASK;
+ Subsection->u.SubsectionFlags.ReadOnly = 1;
+ Subsection->u.SubsectionFlags.CopyOnWrite = 1;
+ Subsection->u.SubsectionFlags.Protection = MM_READONLY;
+
+ TempPte.u.Soft.Protection = MM_READONLY;
+ NewSegment->SegmentPteTemplate = TempPte;
+
+ for (i = 0; i < Subsection->PtesInSubsection; i++) {
+
+ //
+ // Set all the prototype PTEs to refer to the control section.
+ //
+
+ if (SectorOffset < NtHeader->OptionalHeader.SizeOfHeaders) {
+ *PointerPte = TempPte;
+ } else {
+ *PointerPte = ZeroPte;
+ }
+ SectorOffset += PAGE_SIZE;
+ PointerPte += 1;
+ NextVa += PAGE_SIZE;
+ }
+ }
+
+ //
+ // Build the next subsections.
+ //
+
+ PreferredImageBase = NtHeader->OptionalHeader.ImageBase;
+
+ //
+ // At this point the object table is read in (if it was not
+ // already read in) and may displace the image header.
+ //
+
+ SectionTableEntry = NULL;
+ OffsetToSectionTable = sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeader->FileHeader.SizeOfOptionalHeader;
+
+ if ((BYTE_OFFSET(NtHeader) + OffsetToSectionTable +
+ ((NumberOfSubsections + 1) *
+ sizeof (IMAGE_SECTION_HEADER))) <= PAGE_SIZE) {
+
+ //
+ // Section tables are within the header which was read.
+ //
+
+ SectionTableEntry = (PIMAGE_SECTION_HEADER)((ULONG)NtHeader +
+ OffsetToSectionTable);
+
+ } else {
+
+ //
+ // Has an extended header been read in and are the object
+ // tables resident?
+ //
+
+ if (ExtendedHeader != NULL) {
+
+ SectionTableEntry = (PIMAGE_SECTION_HEADER)
+ ((PUCHAR)NtHeader + OffsetToSectionTable);
+
+ //
+ // Is the whole range of object tables mapped by the
+ // extended header?
+ //
+
+ if ((((ULONG)SectionTableEntry +
+ ((NumberOfSubsections + 1) *
+ sizeof (IMAGE_SECTION_HEADER))) -
+ (ULONG)ExtendedHeader) >
+ MM_MAXIMUM_IMAGE_HEADER) {
+ SectionTableEntry = NULL;
+
+ }
+ }
+ }
+
+ if (SectionTableEntry == NULL) {
+
+ //
+ // The section table entries are not in the same
+ // pages as the other data already read in. Read in
+ // the object table entries.
+ //
+
+ if (ExtendedHeader == NULL) {
+ ExtendedHeader = ExAllocatePoolWithTag (NonPagedPool,
+ MM_MAXIMUM_IMAGE_HEADER,
+ MMTEMPORARY);
+ if (ExtendedHeader == NULL) {
+ ExFreePool (NewSegment);
+ ExFreePool (ControlArea);
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto NeImage;
+ }
+
+ //
+ // Build an MDL for the operation.
+ //
+
+ (VOID) MmCreateMdl( Mdl, ExtendedHeader, MM_MAXIMUM_IMAGE_HEADER);
+
+ MmBuildMdlForNonPagedPool (Mdl);
+ }
+
+ StartingOffset.LowPart = (ULONG)PAGE_ALIGN (
+ (ULONG)DosHeader->e_lfanew +
+ OffsetToSectionTable);
+
+ SectionTableEntry = (PIMAGE_SECTION_HEADER)((ULONG)ExtendedHeader +
+ BYTE_OFFSET((ULONG)DosHeader->e_lfanew +
+ OffsetToSectionTable));
+
+ KeClearEvent (InPageEvent);
+ Status = IoPageRead (File,
+ Mdl,
+ &StartingOffset,
+ InPageEvent,
+ &IoStatus
+ );
+
+ if (Status == STATUS_PENDING) {
+ KeWaitForSingleObject( InPageEvent,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ if ((!NT_SUCCESS(Status)) || (!NT_SUCCESS(IoStatus.Status))) {
+ if (Status != STATUS_FILE_LOCK_CONFLICT) {
+ Status = STATUS_INVALID_FILE_FOR_SECTION;
+ }
+ ExFreePool (NewSegment);
+ ExFreePool (ControlArea);
+ goto NeImage;
+ }
+
+ //
+ // From this point on NtHeader is only valid if it
+ // was in the first 4k of the image, otherwise reading in
+ // the object tables wiped it out.
+ //
+
+ }
+
+
+ if (TempPte.u.Long == 0) {
+
+ // The image header is no longer valid, TempPte is
+ // used to indicate that this image alignment is
+ // less than a PAGE_SIZE.
+
+ //
+ // Loop through all sections and make sure there is no
+ // unitialized data.
+ //
+
+ Status = STATUS_SUCCESS;
+
+ while (NumberOfSubsections > 0) {
+ if (SectionTableEntry->Misc.VirtualSize == 0) {
+ SectionVirtualSize = SectionTableEntry->SizeOfRawData;
+ } else {
+ SectionVirtualSize = SectionTableEntry->Misc.VirtualSize;
+ }
+
+
+ //
+ // if the section goes past the end of file return an error
+ //
+ if ((SectionTableEntry->SizeOfRawData +
+ SectionTableEntry->PointerToRawData) >
+ EndOfFile.LowPart) {
+
+ KdPrint(("MMCREASECT: invalid section/file size %Z\n",
+ &File->FileName));
+
+ Status = STATUS_INVALID_IMAGE_FORMAT;
+ break;
+ }
+
+
+
+ //
+ // if the virtual size and address does not match the rawdata
+ // and invalid alignments not allowed return error.
+ //
+ if (((SectionTableEntry->PointerToRawData !=
+ SectionTableEntry->VirtualAddress))
+ ||
+ (SectionVirtualSize > SectionTableEntry->SizeOfRawData)) {
+
+#if defined (_ALPHA_)
+ if (!InvalidAlignmentAllowed)
+#endif
+ {
+ KdPrint(("MMCREASECT: invalid BSS/Trailingzero %Z\n",
+ &File->FileName));
+
+ Status = STATUS_INVALID_IMAGE_FORMAT;
+ break;
+ }
+ }
+
+
+ SectionTableEntry += 1;
+ NumberOfSubsections -= 1;
+ }
+
+
+ if (!NT_SUCCESS(Status)) {
+ ExFreePool (NewSegment);
+ ExFreePool (ControlArea);
+ goto NeImage;
+ }
+
+
+ goto PeReturnSuccess;
+ }
+
+ while (NumberOfSubsections > 0) {
+
+ //
+ // Handle case where virtual size is 0.
+ //
+
+ if (SectionTableEntry->Misc.VirtualSize == 0) {
+ SectionVirtualSize = SectionTableEntry->SizeOfRawData;
+ } else {
+ SectionVirtualSize = SectionTableEntry->Misc.VirtualSize;
+ }
+
+ //
+ // Fix for Borland linker problem. The SizeOfRawData can
+ // be a zero, but the PointerToRawData is not zero.
+ // Set it to zero.
+ //
+
+ if (SectionTableEntry->SizeOfRawData == 0) {
+ SectionTableEntry->PointerToRawData = 0;
+ }
+
+ Subsection += 1;
+ Subsection->ControlArea = ControlArea;
+ Subsection->NextSubsection = (PSUBSECTION)NULL;
+ Subsection->UnusedPtes = 0;
+
+ if ((NextVa !=
+ (PreferredImageBase + SectionTableEntry->VirtualAddress)) ||
+ (SectionVirtualSize == 0)) {
+
+ //
+ // The specified virtual address does not align
+ // with the next prototype PTE.
+ //
+
+ goto BadPeImageSegment;
+ }
+
+ Subsection->PtesInSubsection =
+ MI_ROUND_TO_SIZE (SectionVirtualSize, ImageAlignment) >> PAGE_SHIFT;
+
+ if (Subsection->PtesInSubsection > NumberOfPtes) {
+
+ //
+ // Inconsistent image, size does not agree with object tables.
+ //
+
+ goto BadPeImageSegment;
+ }
+ NumberOfPtes -= Subsection->PtesInSubsection;
+
+ Subsection->u.LongFlags = 0;
+ Subsection->StartingSector =
+ SectionTableEntry->PointerToRawData >> MMSECTOR_SHIFT;
+
+ //
+ // Align ending sector on file align boundary.
+ //
+
+ Subsection->EndingSector =
+ (SectionTableEntry->PointerToRawData +
+ SectionTableEntry->SizeOfRawData +
+ FileAlignment) & ~FileAlignment;
+
+ Subsection->u.SubsectionFlags.SectorEndOffset =
+ Subsection->EndingSector & MMSECTOR_MASK;
+ Subsection->EndingSector = Subsection->EndingSector >> MMSECTOR_SHIFT;
+
+ Subsection->SubsectionBase = PointerPte;
+
+ //
+ // Build both a demand zero PTE and a PTE pointing to the
+ // subsection.
+ //
+ TempPte.u.Long = 0;
+ TempPteDemandZero.u.Long = 0;
+
+ TempPte.u.Long = (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ TempPte.u.Soft.Prototype = 1;
+ ImageFileSize = SectionTableEntry->PointerToRawData +
+ SectionTableEntry->SizeOfRawData;
+
+ TempPte.u.Soft.Protection =
+ MiGetImageProtection (SectionTableEntry->Characteristics);
+ TempPteDemandZero.u.Soft.Protection = TempPte.u.Soft.Protection;
+
+ if (SectionTableEntry->PointerToRawData == 0) {
+ TempPte = TempPteDemandZero;
+ }
+
+ Subsection->u.SubsectionFlags.ReadOnly = 1;
+ Subsection->u.SubsectionFlags.CopyOnWrite = 1;
+ Subsection->u.SubsectionFlags.Protection = TempPte.u.Soft.Protection;
+
+ if (TempPte.u.Soft.Protection & MM_PROTECTION_WRITE_MASK) {
+ if ((TempPte.u.Soft.Protection & MM_COPY_ON_WRITE_MASK)
+ == MM_COPY_ON_WRITE_MASK) {
+
+ //
+ // This page is copy on write, charge ImageCommitment
+ // for all pages in this subsection.
+ //
+
+ ImageCommit = TRUE;
+ } else {
+
+ //
+ // This page is write shared, charge commitment when
+ // the mapping completes.
+ //
+
+ SectionCommit = TRUE;
+ Subsection->u.SubsectionFlags.GlobalMemory = 1;
+ ControlArea->u.Flags.GlobalMemory = 1;
+ }
+ } else {
+
+ //
+ // Not writable, don't charge commitment at all.
+ //
+
+ ImageCommit = FALSE;
+ SectionCommit = FALSE;
+ }
+
+ NewSegment->SegmentPteTemplate = TempPte;
+ SectorOffset = 0;
+
+ for (i = 0; i < Subsection->PtesInSubsection; i++) {
+
+ //
+ // Set all the prototype PTEs to refer to the control section.
+ //
+
+ if (SectorOffset < SectionVirtualSize) {
+
+ //
+ // Make PTE accessable.
+ //
+
+ if (SectionCommit) {
+ NewSegment->NumberOfCommittedPages += 1;
+ }
+ if (ImageCommit) {
+ NewSegment->ImageCommitment += 1;
+ }
+
+ if (SectorOffset < SectionTableEntry->SizeOfRawData) {
+
+ //
+ // Data resides on the disk, use the subsection format
+ // pte.
+ //
+
+ *PointerPte = TempPte;
+ } else {
+
+ //
+ // Demand zero pages.
+ //
+
+ *PointerPte = TempPteDemandZero;
+ }
+ } else {
+
+ //
+ // No access pages.
+ //
+
+ *PointerPte = ZeroPte;
+ }
+ SectorOffset += PAGE_SIZE;
+ PointerPte += 1;
+ NextVa += PAGE_SIZE;
+ }
+
+ SectionTableEntry += 1;
+ NumberOfSubsections -= 1;
+ }
+
+ //
+ // If the file size is not as big as the image claimed to be,
+ // return an error.
+ //
+
+ if (ImageFileSize > EndOfFile.LowPart) {
+
+ //
+ // Invalid image size.
+ //
+
+ KdPrint(("MMCREASECT: invalid image size - file size %lx - image size %lx\n %Z\n",
+ EndOfFile.LowPart, ImageFileSize, &File->FileName));
+ goto BadPeImageSegment;
+ }
+
+ //
+ // The total number of PTEs was decremented as sections were built,
+ // make sure that there are less than 64ks worth at this point.
+ //
+
+ if (NumberOfPtes >= (ImageAlignment >> PAGE_SHIFT)) {
+
+ //
+ // Inconsistent image, size does not agree with object tables.
+ //
+
+ KdPrint(("MMCREASECT: invalid image - PTE left %lx\n image name %Z\n",
+ NumberOfPtes, &File->FileName));
+
+ goto BadPeImageSegment;
+ }
+
+ //
+ // Set any remaining PTEs to no access.
+ //
+
+ while (NumberOfPtes != 0) {
+ *PointerPte = ZeroPte;
+ PointerPte += 1;
+ NumberOfPtes -= 1;
+ }
+
+ //
+ // Turn the image header page into a transition page within the
+ // prototype PTEs.
+ //
+
+ if ((ExtendedHeader == NULL) &&
+ (NtHeader->OptionalHeader.SizeOfHeaders < PAGE_SIZE)) {
+
+ //
+ // Zero remaining portion of header.
+ //
+
+ RtlZeroMemory ((PVOID)((ULONG)Base +
+ NtHeader->OptionalHeader.SizeOfHeaders),
+ PAGE_SIZE - NtHeader->OptionalHeader.SizeOfHeaders);
+ }
+
+
+ if (NewSegment->NumberOfCommittedPages != 0) {
+ Status = STATUS_SUCCESS;
+
+ //
+ // Commit the pages for the image section.
+ //
+
+ try {
+
+ MiChargeCommitment (NewSegment->NumberOfCommittedPages, NULL);
+ MmSharedCommit += NewSegment->NumberOfCommittedPages;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ }
+
+ if (Status != STATUS_SUCCESS) {
+ ExFreePool (NewSegment);
+ ExFreePool (ControlArea);
+ goto NeImage;
+ }
+ }
+
+PeReturnSuccess:
+
+ MiUnmapImageHeaderInHyperSpace ();
+
+ //
+ // Set the PFN database entry for this page to look like a transition
+ // page.
+ //
+
+ PointerPte = NewSegment->PrototypePte;
+
+ MiUpdateImageHeaderPage (PointerPte, PageFrameNumber, ControlArea);
+ if (ExtendedHeader != NULL) {
+ ExFreePool (ExtendedHeader);
+ }
+ ExFreePool (InPageEvent);
+
+ return STATUS_SUCCESS;
+
+
+ //
+ // Error returns from image verification.
+ //
+
+BadPeImageSegment:
+
+ ExFreePool (NewSegment);
+ ExFreePool (ControlArea);
+//BadPeImage:
+ Status = STATUS_INVALID_IMAGE_FORMAT;
+NeImage:
+ MiUnmapImageHeaderInHyperSpace ();
+BadSection:
+ MiRemoveImageHeaderPage(PageFrameNumber);
+ if (ExtendedHeader != NULL) {
+ ExFreePool (ExtendedHeader);
+ }
+ ExFreePool (InPageEvent);
+ return Status;
+}
+
+
+BOOLEAN
+MiCheckDosCalls (
+ IN PIMAGE_OS2_HEADER Os2Header,
+ IN ULONG HeaderSize
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+Return Value:
+
+ Returns the status value.
+
+ TBS
+
+--*/
+{
+ PUCHAR ImportTable;
+ UCHAR EntrySize;
+ USHORT ModuleCount,ModuleSize,i;
+ PUSHORT ModuleTable;
+
+ PAGED_CODE();
+
+ // if there are no modules to check return immidiatly.
+ if ((ModuleCount = Os2Header->ne_cmod) == 0)
+ return FALSE;
+
+ // exe headers are notorious and sometime have jink values for offsets
+ // in import table and module table. We need to guard against any such
+ // bad offset by putting our exception handler.
+
+ try {
+ // Find out where the Module ref table is. Mod table has two byte
+ // for each entry in import table. These two bytes tell the offset
+ // in the import table for that entry.
+
+ ModuleTable = (PUSHORT)((ULONG)Os2Header + (ULONG)Os2Header->ne_modtab);
+
+ // make sure that complete module table is in our pages. Note that each
+ // module table entry is 2 bytes long.
+ if (((ULONG)Os2Header->ne_modtab + (ModuleCount*2)) > HeaderSize)
+ return FALSE;
+
+ // now search individual entries for DOSCALLS.
+ for (i=0 ; i<ModuleCount ; i++) {
+
+ ModuleSize = *((UNALIGNED USHORT *)ModuleTable);
+
+ // import table has count byte followed by the string where count
+ // is the string length.
+ ImportTable = (PUCHAR)((ULONG)Os2Header +
+ (ULONG)Os2Header->ne_imptab + (ULONG)ModuleSize);
+
+ // make sure the offset is within in our valid range.
+ if (((ULONG)Os2Header->ne_imptab + (ULONG)ModuleSize)
+ > HeaderSize)
+ return FALSE;
+
+ EntrySize = *ImportTable++;
+
+ // 0 is a bad size, bail out.
+ if (EntrySize == 0)
+ return FALSE;
+
+ // make sure the offset is within our valid range.
+ if (((ULONG)Os2Header->ne_imptab + (ULONG)ModuleSize +
+ (ULONG)EntrySize) > HeaderSize)
+ return FALSE;
+
+ // If size matches compare DOSCALLS
+ if (EntrySize == 8) {
+ if (RtlEqualMemory (ImportTable,
+ "DOSCALLS",
+ 8) ) {
+ return TRUE;
+ }
+ }
+ // move on to next module table entry. Each entry is 2 bytes.
+ ModuleTable = (PUSHORT)((ULONG)ModuleTable + 2);
+ }
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ ASSERT (FALSE);
+ return FALSE;
+ }
+
+ return FALSE;
+}
+
+
+
+NTSTATUS
+MiVerifyImageHeader (
+ IN PIMAGE_NT_HEADERS NtHeader,
+ IN PIMAGE_DOS_HEADER DosHeader,
+ IN ULONG NtHeaderSize
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+Return Value:
+
+ Returns the status value.
+
+ TBS
+
+--*/
+
+
+
+{
+ PCONFIGPHARLAP PharLapConfigured;
+ PUCHAR pb;
+ LONG pResTableAddress;
+
+ PAGED_CODE();
+
+ if (NtHeader->Signature != IMAGE_NT_SIGNATURE) {
+ if ((USHORT)NtHeader->Signature == (USHORT)IMAGE_OS2_SIGNATURE) {
+
+ //
+ // Check to see if this is a win-16 image.
+ //
+
+ if ((!MiCheckDosCalls ((PIMAGE_OS2_HEADER)NtHeader, NtHeaderSize)) &&
+ ((((PIMAGE_OS2_HEADER)NtHeader)->ne_exetyp == 2)
+ ||
+ ((((PIMAGE_OS2_HEADER)NtHeader)->ne_exetyp == 0) &&
+ (((((PIMAGE_OS2_HEADER)NtHeader)->ne_expver & 0xff00) ==
+ 0x200) ||
+ ((((PIMAGE_OS2_HEADER)NtHeader)->ne_expver & 0xff00) ==
+ 0x300))))) {
+
+ //
+ // This is a win-16 image.
+ //
+
+ return STATUS_INVALID_IMAGE_WIN_16;
+ }
+
+ // The following os2 hedaers types go to NTDOS
+ //
+ // - exetype == 5 means binary is for Dos 4.0.
+ // e.g Borland Dos extender type
+ //
+ // - os2 apps which have no import table entries
+ // cannot be meant for os2ss.
+ // e.g. QuickC for dos binaries
+ //
+ // - "old" Borland Dosx BC++ 3.x, Paradox 4.x
+ // exe type == 1
+ // DosHeader->e_cs * 16 + DosHeader->e_ip + 0x200 - 10
+ // contains the string " mode EXE$"
+ // but import table is empty, so we don't make special check
+ //
+
+ if (((PIMAGE_OS2_HEADER)NtHeader)->ne_exetyp == 5 ||
+ ((PIMAGE_OS2_HEADER)NtHeader)->ne_enttab ==
+ ((PIMAGE_OS2_HEADER)NtHeader)->ne_imptab )
+ {
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+
+
+ //
+ // Borland Dosx types: exe type 1
+ //
+ // - "new" Borland Dosx BP7.0
+ // exe type == 1
+ // DosHeader + 0x200 contains the string "16STUB"
+ // 0x200 happens to be e_parhdr*16
+ //
+
+ if (((PIMAGE_OS2_HEADER)NtHeader)->ne_exetyp == 1 &&
+ RtlEqualMemory((PUCHAR)DosHeader + 0x200, "16STUB", 6) )
+ {
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+
+ //
+ // Check for PharLap extended header which we run as a dos app.
+ // The PharLap config block is pointed to by the SizeofHeader
+ // field in the DosHdr.
+ // The following algorithm for detecting a pharlap exe
+ // was recommended by PharLap Software Inc.
+ //
+
+ PharLapConfigured =(PCONFIGPHARLAP) ((ULONG)DosHeader +
+ ((ULONG)DosHeader->e_cparhdr << 4));
+
+ if ((ULONG)PharLapConfigured <
+ (ULONG)DosHeader + PAGE_SIZE - sizeof(CONFIGPHARLAP)) {
+ if (RtlEqualMemory(&PharLapConfigured->uchCopyRight[0x18],
+ "Phar Lap Software, Inc.", 24) &&
+ (PharLapConfigured->usSign == 0x4b50 || // stub loader type 2
+ PharLapConfigured->usSign == 0x4f50 || // bindable 286|DosExtender
+ PharLapConfigured->usSign == 0x5650 )) // bindable 286|DosExtender (Adv)
+ {
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+ }
+
+
+
+ //
+ // Check for Rational extended header which we run as a dos app.
+ // We look for the rational copyright at:
+ // wCopyRight = *(DosHeader->e_cparhdr*16 + 30h)
+ // pCopyRight = wCopyRight + DosHeader->e_cparhdr*16
+ // "Copyright (C) Rational Systems, Inc."
+ //
+
+ pb = (PUCHAR)((ULONG)DosHeader + ((ULONG)DosHeader->e_cparhdr << 4));
+
+ if ((ULONG)pb < (ULONG)DosHeader + PAGE_SIZE - 0x30 - sizeof(USHORT)) {
+ pb += *(PUSHORT)(pb + 0x30);
+ if ( (ULONG)pb < (ULONG)DosHeader + PAGE_SIZE - 36 &&
+ RtlEqualMemory(pb,
+ "Copyright (C) Rational Systems, Inc.",
+ 36) )
+ {
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+ }
+
+ //
+ // Check for lotus 123 family of applications. Starting
+ // with 123 3.0 (till recently shipped 123 3.4), every
+ // exe header is bound but is meant for DOS. This can
+ // be checked via, a string signature in the extended
+ // header. <len byte>"1-2-3 Preloader" is the string
+ // at ne_nrestab offset.
+ //
+
+ pResTableAddress = ((PIMAGE_OS2_HEADER)NtHeader)->ne_nrestab;
+ if (pResTableAddress > DosHeader->e_lfanew &&
+ ((ULONG)((pResTableAddress+16) - DosHeader->e_lfanew) <
+ NtHeaderSize) &&
+ RtlEqualMemory(
+ (PUCHAR)((ULONG)NtHeader + 1 +
+ (ULONG)(pResTableAddress - DosHeader->e_lfanew)),
+ "1-2-3 Preloader",
+ 15) ) {
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+
+ return STATUS_INVALID_IMAGE_NE_FORMAT;
+ }
+
+ if ((USHORT)NtHeader->Signature == (USHORT)IMAGE_OS2_SIGNATURE_LE) {
+
+ //
+ // This is a LE (OS/2) image. We dont support it, so give it to
+ // DOS subsystem. There are cases (Rbase.exe) which have a LE
+ // header but actually it is suppose to run under DOS. When we
+ // do support LE format, some work needs to be done here to
+ // decide wether to give it to VDM or OS/2.
+ //
+
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+
+ if ((NtHeader->FileHeader.Machine == 0) &&
+ (NtHeader->FileHeader.SizeOfOptionalHeader == 0)) {
+
+ //
+ // This is a bogus DOS app which has a 32-bit portion
+ // mascarading as a PE image.
+ //
+
+ return STATUS_INVALID_IMAGE_PROTECT;
+ }
+
+ if (!(NtHeader->FileHeader.Characteristics & IMAGE_FILE_EXECUTABLE_IMAGE)) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+#ifdef i386
+
+ //
+ // Make sure the image header is aligned on a Long word boundary.
+ //
+
+ if (((ULONG)NtHeader & 3) != 0) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+#endif
+
+ //
+ // File aligment must be multiple of 512 and power of 2.
+ //
+
+ if (((NtHeader->OptionalHeader.FileAlignment & 511) != 0) &&
+ (NtHeader->OptionalHeader.FileAlignment !=
+ NtHeader->OptionalHeader.SectionAlignment)) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ if (((NtHeader->OptionalHeader.FileAlignment - 1) &
+ NtHeader->OptionalHeader.FileAlignment) != 0) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ if (NtHeader->OptionalHeader.SectionAlignment < NtHeader->OptionalHeader.FileAlignment) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ if (NtHeader->OptionalHeader.SizeOfImage > MM_SIZE_OF_LARGEST_IMAGE) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ if (NtHeader->FileHeader.NumberOfSections > MM_MAXIMUM_IMAGE_SECTIONS) {
+ return STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ //commented out to map drivers at based addresses.
+
+ //if ((PVOID)NtHeader->OptionalHeader.ImageBase >= MM_HIGHEST_USER_ADDRESS) {
+ // return STATUS_INVALID_IMAGE_FORMAT;
+ //}
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MiCreateDataFileMap (
+ IN PFILE_OBJECT File,
+ OUT PSEGMENT *Segment,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG SectionPageProtection,
+ IN ULONG AllocationAttributes,
+ IN ULONG IgnoreFileSizing
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the necessary strutures to allow the mapping
+ of a data file.
+
+ The data file is accessed to verify desire access, a segment
+ object is created and initialized.
+
+Arguments:
+
+ File - Supplies the file object for the image file.
+
+ Segment - Returns the segment object.
+
+ FileHandle - Supplies the handle to the image file.
+
+ MaximumSize - Supplies the maximum size for the mapping.
+
+ SectionPageProtection - Supplies the initial page protection.
+
+ AllocationAttributes - Supplies the allocation attributes for the
+ mapping.
+
+Return Value:
+
+ Returns the status value.
+
+ TBS
+
+
+--*/
+
+{
+
+ NTSTATUS Status;
+ ULONG NumberOfPtes;
+ ULONG SizeOfSegment;
+ ULONG j;
+ ULONG Size;
+ ULONG PartialSize;
+ ULONG First;
+ PCONTROL_AREA ControlArea;
+ PSEGMENT NewSegment;
+ PSUBSECTION Subsection;
+ PSUBSECTION ExtendedSubsection;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ ULONG NumberOfPtesWithAlignment;
+ LARGE_INTEGER EndOfFile;
+ ULONG ExtendedSubsections = 0;
+ PSUBSECTION FirstSubsection = NULL;
+ PSUBSECTION Last;
+ ULONG NumberOfNewSubsections = 0;
+
+ PAGED_CODE();
+
+ // *************************************************************
+ // Create mapped file section.
+ // *************************************************************
+
+
+ if (!IgnoreFileSizing) {
+
+ Status = FsRtlGetFileSize (File, &EndOfFile);
+
+ if (Status == STATUS_FILE_IS_A_DIRECTORY) {
+
+ //
+ // Can't map a directory as a section. Return error.
+ //
+
+ return STATUS_INVALID_FILE_FOR_SECTION;
+ }
+
+ if (!NT_SUCCESS (Status)) {
+ return Status;
+ }
+
+ if ((EndOfFile.QuadPart == 0) && (MaximumSize->QuadPart == 0)) {
+
+ //
+ // Can't map a zero length without specifying the maximum
+ // size as non-zero.
+ //
+
+ return STATUS_MAPPED_FILE_SIZE_ZERO;
+ }
+
+ //
+ // Make sure this file is big enough for the section.
+ //
+
+ if (MaximumSize->QuadPart > EndOfFile.QuadPart) {
+
+ //
+ // If the maximum size is greater than the end-of-file,
+ // and the user did not request page_write or page_execte_readwrite
+ // to the section, reject the request.
+ //
+
+ if (((SectionPageProtection & PAGE_READWRITE) |
+ (SectionPageProtection & PAGE_EXECUTE_READWRITE)) == 0) {
+
+ return STATUS_SECTION_TOO_BIG;
+ }
+
+ //
+ // Check to make sure that the allocation size large enough
+ // to contain all the data, if not set a new allocation size.
+ //
+
+ EndOfFile = *MaximumSize;
+
+ Status = FsRtlSetFileSize (File, &EndOfFile);
+
+ if (!NT_SUCCESS (Status)) {
+ return Status;
+ }
+ }
+ } else {
+
+ //
+ // Ignore the file size, this call is from the cache manager.
+ //
+
+ EndOfFile = *MaximumSize;
+ }
+
+ //
+ // Calculate the number of prototype PTEs to build for this section.
+ //
+
+ NumberOfPtes = (ULONG)(EndOfFile.QuadPart +
+ (PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ //
+ // Calculate the number of ptes to allocate to maintain the
+ // desired alignment. On x86 and R3000 no additional PTEs are
+ // needed, on MIPS, the desired aligment is 64k to avoid cache
+ // problems.
+ //
+
+ NumberOfPtesWithAlignment = (NumberOfPtes +
+ ((MM_PROTO_PTE_ALIGNMENT >> PAGE_SHIFT) - 1)) &
+ (~((MM_PROTO_PTE_ALIGNMENT >> PAGE_SHIFT) - 1));
+
+ ASSERT (NumberOfPtes <= NumberOfPtesWithAlignment);
+
+ SizeOfSegment = sizeof(SEGMENT) + sizeof(MMPTE) *
+ (NumberOfPtesWithAlignment - 1);
+
+ NewSegment = ExAllocatePoolWithTag (PagedPool,
+ SizeOfSegment,
+ MMSECT);
+ if (NewSegment == NULL) {
+
+ //
+ // The requested pool could not be allocated.
+ // Try to allocate the memory in smaller sizes.
+ //
+
+ if (SizeOfSegment < MM_ALLOCATION_FRAGMENT) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ Size = MM_ALLOCATION_FRAGMENT;
+ PartialSize = SizeOfSegment;
+
+ do {
+
+ if (PartialSize < MM_ALLOCATION_FRAGMENT) {
+ PartialSize = ROUND_TO_PAGES (PartialSize);
+ Size = PartialSize;
+ }
+
+ NewSegment = ExAllocatePoolWithTag (PagedPool,
+ Size,
+ MMSECT);
+ ExtendedSubsection = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(SUBSECTION),
+ 'bSmM');
+
+ if ((NewSegment == NULL) || (ExtendedSubsection == NULL)) {
+ if (NewSegment) {
+ ExFreePool (NewSegment);
+ }
+ if (ExtendedSubsection) {
+ ExFreePool (ExtendedSubsection);
+ }
+
+ //
+ // Free all the previous allocations and return an error.
+ //
+
+ while (FirstSubsection != NULL) {
+ ExFreePool (FirstSubsection->SubsectionBase);
+ Last = FirstSubsection->NextSubsection;
+ ExFreePool (FirstSubsection);
+ FirstSubsection = Last;
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ NumberOfNewSubsections += 1;
+ RtlZeroMemory (ExtendedSubsection, sizeof(SUBSECTION));
+
+ if (FirstSubsection == NULL) {
+ FirstSubsection = ExtendedSubsection;
+ Last = ExtendedSubsection;
+ NumberOfNewSubsections = 0;
+ } else {
+ Last->NextSubsection = ExtendedSubsection;
+ }
+
+ ExtendedSubsection->PtesInSubsection = Size / sizeof(MMPTE);
+ ExtendedSubsection->SubsectionBase = (PMMPTE)NewSegment;
+ Last = ExtendedSubsection;
+ PartialSize -= Size;
+ } while (PartialSize != 0);
+
+ //
+ // Reset new segment and free the first subsection, as
+ // the subsection after the control area will become the
+ // first subsection.
+ //
+
+ NewSegment = (PSEGMENT)FirstSubsection->SubsectionBase;
+ }
+
+ *Segment = NewSegment;
+ RtlZeroMemory (NewSegment, sizeof(SEGMENT));
+
+ ControlArea =
+ (PCONTROL_AREA)File->SectionObjectPointer->DataSectionObject;
+
+ //
+ // Control area and first subsection have been zeroed when allocated.
+ //
+
+ ControlArea->Segment = NewSegment;
+ ControlArea->NumberOfSectionReferences = 1;
+
+ if (IgnoreFileSizing == FALSE) {
+
+ //
+ // This reference is not from the cache manager.
+ //
+
+ ControlArea->NumberOfUserReferences = 1;
+ }
+
+ ControlArea->u.Flags.BeingCreated = 1;
+ ControlArea->u.Flags.File = 1;
+
+ if (FILE_REMOTE_DEVICE & File->DeviceObject->Characteristics) {
+
+ //
+ // This file resides on a redirected drive.
+ //
+
+ ControlArea->u.Flags.Networked = 1;
+ }
+
+ if (AllocationAttributes & SEC_NOCACHE) {
+ ControlArea->u.Flags.NoCache = 1;
+ }
+
+ if (IgnoreFileSizing) {
+ // Set the was purged flag to indicate that the
+ // file size was not explicitly set.
+ //
+
+ ControlArea->u.Flags.WasPurged = 1;
+ }
+
+ ControlArea->NumberOfSubsections = 1 + (USHORT)NumberOfNewSubsections;
+ ControlArea->FilePointer = File;
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ if (FirstSubsection) {
+
+ Subsection->NextSubsection = FirstSubsection->NextSubsection;
+ Subsection->PtesInSubsection = FirstSubsection->PtesInSubsection;
+ ExFreePool (FirstSubsection);
+#if DBG
+ FirstSubsection = NULL;
+#endif //DBG
+ } else {
+ ASSERT (Subsection->NextSubsection == NULL);
+ }
+
+ First = TRUE;
+ PartialSize = 0;
+
+ do {
+
+ //
+ // Loop through all the subsections and fill in the PTEs.
+ //
+
+
+ TempPte.u.Long = (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ TempPte.u.Soft.Prototype = 1;
+
+ //
+ // Set all the PTEs to the execute-read-write protection.
+ // The section will control access to these and the segment
+ // must provide a method to allow other users to map the file
+ // for various protections.
+ //
+
+ TempPte.u.Soft.Protection = MM_EXECUTE_READWRITE;
+
+ //
+ // Align the prototype PTEs on the proper boundary.
+ //
+
+ if (First) {
+
+ PointerPte = &NewSegment->ThePtes[0];
+ j = ((ULONG)PointerPte >> PTE_SHIFT) &
+ ((MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - 1);
+
+ if (j != 0) {
+ j = (MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - j;
+ }
+
+ NewSegment->PrototypePte = &NewSegment->ThePtes[j];
+ NewSegment->ControlArea = ControlArea;
+ NewSegment->SizeOfSegment = EndOfFile;
+ NewSegment->TotalNumberOfPtes = NumberOfPtes;
+ NewSegment->SegmentPteTemplate = TempPte;
+ PointerPte = NewSegment->PrototypePte;
+ Subsection->SubsectionBase = PointerPte;
+
+ if (Subsection->NextSubsection != NULL) {
+
+ //
+ // Multiple segments and subsections.
+ // Align first so it is a multiple of 64k sizes.
+ //
+ //
+
+ NewSegment->NonExtendedPtes =
+ (((Subsection->PtesInSubsection * sizeof(MMPTE)) -
+ ((PCHAR)NewSegment->PrototypePte - (PCHAR)NewSegment))
+ / sizeof(MMPTE)) & ~((X64K >> PAGE_SHIFT) - 1);
+ } else {
+ NewSegment->NonExtendedPtes = NumberOfPtesWithAlignment;
+ }
+ Subsection->PtesInSubsection = NewSegment->NonExtendedPtes;
+
+ First = FALSE;
+ } else {
+ PointerPte = (PMMPTE)Subsection->SubsectionBase;
+ }
+
+ Subsection->ControlArea = ControlArea;
+ Subsection->StartingSector = PartialSize;
+ Subsection->u.SubsectionFlags.Protection = MM_EXECUTE_READWRITE;
+
+ if (Subsection->NextSubsection == NULL) {
+ Subsection->EndingSector = (ULONG)(EndOfFile.QuadPart >> MMSECTOR_SHIFT);
+ Subsection->u.SubsectionFlags.SectorEndOffset =
+ EndOfFile.LowPart & MMSECTOR_MASK;
+ j = Subsection->PtesInSubsection;
+ Subsection->PtesInSubsection = NumberOfPtes -
+ (PartialSize >> (PAGE_SHIFT - MMSECTOR_SHIFT));
+ Subsection->UnusedPtes = j - Subsection->PtesInSubsection;
+ } else {
+ Subsection->EndingSector = PartialSize +
+ (Subsection->PtesInSubsection << (PAGE_SHIFT - MMSECTOR_SHIFT));
+ }
+
+ RtlFillMemoryUlong (PointerPte,
+ (Subsection->PtesInSubsection +
+ Subsection->UnusedPtes) * sizeof(MMPTE),
+ TempPte.u.Long);
+
+ PartialSize += Subsection->PtesInSubsection <<
+ (PAGE_SHIFT - MMSECTOR_SHIFT);
+ Subsection = Subsection->NextSubsection;
+ } while (Subsection != NULL);
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MiCreatePagingFileMap (
+ OUT PSEGMENT *Segment,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG ProtectionMask,
+ IN ULONG AllocationAttributes
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the necessary strutures to allow the mapping
+ of a paging file.
+
+Arguments:
+
+ Segment - Returns the segment object.
+
+ MaximumSize - Supplies the maximum size for the mapping.
+
+ ProtectionMask - Supplies the initial page protection.
+
+ AllocationAttributes - Supplies the allocation attributes for the
+ mapping.
+
+Return Value:
+
+ Returns the status value.
+
+ TBS
+
+
+--*/
+
+
+{
+
+ ULONG NumberOfPtes;
+ ULONG SizeOfSegment;
+ ULONG i;
+ PCONTROL_AREA ControlArea;
+ PSEGMENT NewSegment;
+ PMMPTE PointerPte;
+ PSUBSECTION Subsection;
+ MMPTE TempPte;
+
+ PAGED_CODE();
+
+ //*******************************************************************
+ // Create a section backed by paging file.
+ //*******************************************************************
+
+ if (MaximumSize->QuadPart == 0) {
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // Limit page file backed sections to 2 gigabytes.
+ //
+
+ if ((MaximumSize->HighPart != 0) ||
+ (MaximumSize->LowPart > (ULONG)0x7FFFFFFF)) {
+
+ return STATUS_SECTION_TOO_BIG;
+ }
+
+ //
+ // Create the segment object.
+ //
+
+ //
+ // Calculate the number of prototype PTEs to build for this segment.
+ //
+
+ NumberOfPtes = BYTES_TO_PAGES (MaximumSize->LowPart);
+
+ if (AllocationAttributes & SEC_COMMIT) {
+
+ //
+ // Commit the pages for the section.
+ //
+
+ try {
+
+ MiChargeCommitment (NumberOfPtes, NULL);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+
+ SizeOfSegment = sizeof(SEGMENT) + sizeof(MMPTE) * (NumberOfPtes - 1);
+
+ NewSegment = ExAllocatePoolWithTag (PagedPool, SizeOfSegment,
+ MMSECT);
+
+ if (NewSegment == NULL) {
+
+ //
+ // The requested pool could not be allocated.
+ //
+
+ if (AllocationAttributes & SEC_COMMIT) {
+ MiReturnCommitment (NumberOfPtes);
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ *Segment = NewSegment;
+
+ ControlArea = ExAllocatePoolWithTag (NonPagedPool,
+ (ULONG)sizeof(CONTROL_AREA) +
+ (ULONG)sizeof(SUBSECTION),
+ MMCONTROL);
+
+ if (ControlArea == NULL) {
+
+ //
+ // The requested pool could not be allocated.
+ //
+
+ ExFreePool (NewSegment);
+
+ if (AllocationAttributes & SEC_COMMIT) {
+ MiReturnCommitment (NumberOfPtes);
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ //
+ // Zero control area and first subsection.
+ //
+
+ RtlZeroMemory (ControlArea,
+ sizeof(CONTROL_AREA) + sizeof(SUBSECTION));
+
+ ControlArea->Segment = NewSegment;
+ ControlArea->NumberOfSectionReferences = 1;
+ ControlArea->NumberOfUserReferences = 1;
+ ControlArea->NumberOfSubsections = 1;
+
+ if (AllocationAttributes & SEC_BASED) {
+ ControlArea->u.Flags.Based = 1;
+ }
+
+ if (AllocationAttributes & SEC_RESERVE) {
+ ControlArea->u.Flags.Reserve = 1;
+ }
+
+ if (AllocationAttributes & SEC_COMMIT) {
+ ControlArea->u.Flags.Commit = 1;
+ }
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ Subsection->ControlArea = ControlArea;
+ Subsection->PtesInSubsection = NumberOfPtes;
+ Subsection->u.SubsectionFlags.Protection = ProtectionMask;
+
+ //
+ // Align the prototype PTEs on the proper boundary.
+ //
+
+ PointerPte = &NewSegment->ThePtes[0];
+ i = ((ULONG)PointerPte >> PTE_SHIFT) &
+ ((MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - 1);
+
+ if (i != 0) {
+ i = (MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE) - i;
+ }
+
+ //
+ // Zero the segment header.
+ //
+
+ RtlZeroMemory (NewSegment, sizeof(SEGMENT));
+
+ NewSegment->PrototypePte = &NewSegment->ThePtes[i];
+
+ NewSegment->ControlArea = ControlArea;
+
+ //
+ // As size is limited to 2gb, ignore the high part.
+ //
+
+ NewSegment->SizeOfSegment.LowPart = NumberOfPtes * PAGE_SIZE;
+ NewSegment->TotalNumberOfPtes = NumberOfPtes;
+ NewSegment->NonExtendedPtes = NumberOfPtes;
+
+ PointerPte = NewSegment->PrototypePte;
+ Subsection->SubsectionBase = PointerPte;
+ TempPte = ZeroPte;
+
+ if (AllocationAttributes & SEC_COMMIT) {
+ TempPte.u.Soft.Protection = ProtectionMask;
+ NewSegment->NumberOfCommittedPages = NumberOfPtes;
+ MmSharedCommit += NewSegment->NumberOfCommittedPages;
+ }
+
+ NewSegment->SegmentPteTemplate.u.Soft.Protection = ProtectionMask;
+
+ for (i = 0; i < NumberOfPtes; i++) {
+
+ //
+ // Set all the prototype PTEs to either no access or demand zero
+ // depending on the commit flag.
+ //
+
+ *PointerPte = TempPte;
+ PointerPte += 1;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+
+NTSTATUS
+NtOpenSection (
+ OUT PHANDLE SectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes
+ )
+
+/*++
+
+Routine Description:
+
+ This function opens a handle to a section object with the specified
+ desired access.
+
+Arguments:
+
+
+ Sectionhandle - Supplies a pointer to a variable that will
+ receive the section object handle value.
+
+ DesiredAccess - Supplies the desired types of access for the
+ section.
+
+ DesiredAccess Flags
+
+
+ EXECUTE - Execute access to the section is
+ desired.
+
+ READ - Read access to the section is desired.
+
+ WRITE - Write access to the section is desired.
+
+ ObjectAttributes - Supplies a pointer to an object attributes structure.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ HANDLE Handle;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+ //
+ // Get previous processor mode and probe output arguments if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ try {
+ ProbeForWriteHandle(SectionHandle);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+
+ //
+ // Open handle to the section object with the specified desired
+ // access.
+ //
+
+ Status = ObOpenObjectByName (ObjectAttributes,
+ MmSectionObjectType,
+ PreviousMode,
+ NULL,
+ DesiredAccess,
+ NULL,
+ &Handle
+ );
+
+ try {
+ *SectionHandle = Handle;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return Status;
+ }
+
+ return Status;
+}
+
+CCHAR
+MiGetImageProtection (
+ IN ULONG SectionCharacteristics
+ )
+
+/*++
+
+Routine Description:
+
+ This function takes a section characteristic mask from the
+ image and converts it to an PTE protection mask.
+
+Arguments:
+
+ SectionCharacteristics - Supplies the characteristics mask from the
+ image.
+
+Return Value:
+
+ Returns the protection mask for the PTE.
+
+--*/
+
+{
+ ULONG Index;
+ PAGED_CODE();
+
+ Index = 0;
+ if (SectionCharacteristics & IMAGE_SCN_MEM_EXECUTE) {
+ Index |= 1;
+ }
+ if (SectionCharacteristics & IMAGE_SCN_MEM_READ) {
+ Index |= 2;
+ }
+ if (SectionCharacteristics & IMAGE_SCN_MEM_WRITE) {
+ Index |= 4;
+ }
+ if (SectionCharacteristics & IMAGE_SCN_MEM_SHARED) {
+ Index |= 8;
+ }
+
+ return MmImageProtectionArray[Index];
+}
+
+ULONG
+MiGetPageForHeader (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This non-pagable function acquires the PFN lock, removes
+ a page and updates the PFN database as though the page was
+ ready to be deleted if the reference count is decremented.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Returns the phyiscal page frame number.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG PageFrameNumber;
+ PMMPFN Pfn1;
+ PEPROCESS Process;
+ ULONG PageColor;
+
+ Process = PsGetCurrentProcess();
+ PageColor = MI_PAGE_COLOR_VA_PROCESS ((PVOID)X64K,
+ &Process->NextPageColor);
+
+ //
+ // Lock the PFN database and get a page.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+
+ //
+ // Remove page for 64k alignment.
+ //
+
+ PageFrameNumber = MiRemoveAnyPage (PageColor);
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Increment the reference count for the page so the
+ // paging I/O will work.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameNumber);
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->OriginalPte = ZeroPte;
+ Pfn1->PteAddress = (PVOID) X64K;
+ MI_SET_PFN_DELETED (Pfn1);
+
+ return(PageFrameNumber);
+}
+
+VOID
+MiUpdateImageHeaderPage (
+ IN PMMPTE PointerPte,
+ IN ULONG PageFrameNumber,
+ IN PCONTROL_AREA ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This non-pagable function acquires the PFN lock, and
+ makes the specified protype PTE page a transition PTE
+ referring to the specified physical page. It then
+ decrements the reference count causing the page to
+ be placed on the standby or modified list.
+
+Arguments:
+
+ PointerPte - Supplies the PTE to set into the transition state.
+
+ PageFrameNumber - Supplies the physical page.
+
+ ControlArea - Supplies the control area for the prototype PTEs.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ MiMakeSystemAddressValidPfn (PointerPte);
+
+ MiInitializeTransitionPfn (PageFrameNumber, PointerPte, 0xFFFFFFFF);
+ ControlArea->NumberOfPfnReferences += 1;
+
+ //
+ // Add the page to the standby list.
+ //
+
+ MiDecrementReferenceCount (PageFrameNumber);
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MiRemoveImageHeaderPage (
+ IN ULONG PageFrameNumber
+ )
+
+/*++
+
+Routine Description:
+
+ This non-pagable function acquires the PFN lock, and decrements
+ the reference count thereby causing the phyiscal page to
+ be deleted.
+
+Arguments:
+
+ PageFrameNumber - Supplies the PFN to decrement.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+ MiDecrementReferenceCount (PageFrameNumber);
+ UNLOCK_PFN (OldIrql);
+ return;
+}
diff --git a/private/ntos/mm/deleteva.c b/private/ntos/mm/deleteva.c
new file mode 100644
index 000000000..ac9f4a938
--- /dev/null
+++ b/private/ntos/mm/deleteva.c
@@ -0,0 +1,985 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ deleteva.c
+
+Abstract:
+
+ This module contains the routines for deleting virtual address space.
+
+Author:
+
+ Lou Perazzoli (loup) 11-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+extern ULONG MmPagingFileDebug[8192];
+#endif
+
+
+
+VOID
+MiDeleteVirtualAddresses (
+ IN PUCHAR StartingAddress,
+ IN PUCHAR EndingAddress,
+ IN ULONG AddressSpaceDeletion,
+ IN PMMVAD Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes the specified virtual address range within
+ the current process.
+
+Arguments:
+
+ StartingAddress - Supplies the first virtual address to delete.
+
+ EndingAddress - Supplies the last address to delete.
+
+ AddressSpaceDeletion - Supplies TRUE if the address space is being
+ deleted, FALSE otherwise. If TRUE is specified
+ the TB is not flushed and valid addresses are
+ not removed from the working set.
+
+ Vad - Supplies the virtual address descriptor which maps this range
+ or NULL if we are not concerned about views. From the Vad the
+ range of prototype PTEs is determined and this information is
+ used to uncover if the PTE refers to a prototype PTE or a
+ fork PTE.
+
+Return Value:
+
+ None.
+
+
+Environment:
+
+ Kernel mode, called with APCs disabled working set mutex and PFN lock
+ held. These mutexes may be released and reacquired to fault pages in.
+
+--*/
+
+{
+ PUCHAR Va;
+ PVOID TempVa;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE OriginalPointerPte;
+ PMMPTE ProtoPte;
+ PMMPTE LastProtoPte;
+ PEPROCESS CurrentProcess;
+ ULONG FlushTb = FALSE;
+ PSUBSECTION Subsection;
+ PUSHORT UsedPageTableCount;
+ KIRQL OldIrql = APC_LEVEL;
+ MMPTE_FLUSH_LIST FlushList;
+
+ FlushList.Count = 0;
+
+ MM_PFN_LOCK_ASSERT();
+ CurrentProcess = PsGetCurrentProcess();
+
+ Va = StartingAddress;
+ PointerPde = MiGetPdeAddress (Va);
+ PointerPte = MiGetPteAddress (Va);
+ OriginalPointerPte = PointerPte;
+ UsedPageTableCount =
+ &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)];
+
+ while (MiDoesPdeExistAndMakeValid (PointerPde,
+ CurrentProcess,
+ TRUE) == FALSE) {
+
+ //
+ // This page directory entry is empty, go to the next one.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ if (Va > EndingAddress) {
+
+ //
+ // All done, return.
+ //
+
+ return;
+
+ }
+ UsedPageTableCount += 1;
+ }
+
+ //
+ // A valid PDE has been located, examine each PTE and delete them.
+ //
+
+ if ((Vad == (PMMVAD)NULL) ||
+ (Vad->u.VadFlags.PrivateMemory) ||
+ (Vad->FirstPrototypePte == (PMMPTE)NULL)) {
+ ProtoPte = (PMMPTE)NULL;
+ LastProtoPte = (PMMPTE)NULL;
+ } else {
+ ProtoPte = Vad->FirstPrototypePte;
+ LastProtoPte = (PMMPTE)4;
+ }
+
+ //
+ // Examine each PTE within the address range and delete it.
+ //
+
+ while (Va <= EndingAddress) {
+
+ if (((ULONG)Va & PAGE_DIRECTORY_MASK) == 0) {
+
+ //
+ // Note, the initial address could be aligned on a 4mb boundary.
+ //
+
+ //
+ // The virtual address is on a page directory (4mb) boundary,
+ // check the next PDE for validity and flush PTEs for previous
+ // page table page.
+ //
+
+ MiFlushPteList (&FlushList, FALSE, ZeroPte);
+
+ //
+ // If all the entries have been eliminated from the previous
+ // page table page, delete the page table page itself.
+ //
+
+ if ((*UsedPageTableCount == 0) && (PointerPde->u.Long != 0)) {
+
+ TempVa = MiGetVirtualAddressMappedByPte(PointerPde);
+ MiDeletePte (PointerPde,
+ TempVa,
+ AddressSpaceDeletion,
+ CurrentProcess,
+ NULL,
+ NULL);
+ }
+
+ //
+ // Release the PFN lock. This prevents a single thread
+ // from forcing other high priority threads from being
+ // blocked while a large address range is deleted. There
+ // is nothing magic about the instruction within the
+ // lock and unlock.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ PointerPde = MiGetPdeAddress (Va);
+ LOCK_PFN (OldIrql);
+
+ UsedPageTableCount =
+ &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)];
+
+ while (MiDoesPdeExistAndMakeValid (
+ PointerPde, CurrentProcess, TRUE) == FALSE) {
+
+ //
+ // This page directory entry is empty, go to the next one.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ if (Va > EndingAddress) {
+
+ //
+ // All done, return.
+ //
+
+ return;
+ }
+
+ UsedPageTableCount += 1;
+ if (LastProtoPte != NULL) {
+ ProtoPte = MiGetProtoPteAddress(Vad,Va);
+ Subsection = MiLocateSubsection (Vad,Va);
+ LastProtoPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+#if DBG
+ if (Vad->u.VadFlags.ImageMap != 1) {
+ if ((ProtoPte < Subsection->SubsectionBase) ||
+ (ProtoPte >= LastProtoPte)) {
+ DbgPrint ("bad proto pte %lx va %lx Vad %lx sub %lx\n",
+ ProtoPte,Va,Vad,Subsection);
+ DbgBreakPoint();
+ }
+ }
+#endif //DBG
+ }
+ }
+ }
+
+ //
+ // The PDE is now valid, delete the ptes
+ //
+
+ if (PointerPte->u.Long != 0) {
+#ifdef R4000
+ ASSERT (PointerPte->u.Hard.Global == 0);
+#endif
+
+ //
+ // One less used page table entry in this page table page.
+ //
+
+ *UsedPageTableCount -= 1;
+ ASSERT (*UsedPageTableCount < PTE_PER_PAGE);
+
+ if (IS_PTE_NOT_DEMAND_ZERO (*PointerPte)) {
+
+ if (LastProtoPte != NULL) {
+ if (ProtoPte >= LastProtoPte) {
+ ProtoPte = MiGetProtoPteAddress(Vad,Va);
+ Subsection = MiLocateSubsection (Vad,Va);
+ LastProtoPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+ }
+#if DBG
+ if (Vad->u.VadFlags.ImageMap != 1) {
+ if ((ProtoPte < Subsection->SubsectionBase) ||
+ (ProtoPte >= LastProtoPte)) {
+ DbgPrint ("bad proto pte %lx va %lx Vad %lx sub %lx\n",
+ ProtoPte,Va,Vad,Subsection);
+ DbgBreakPoint();
+ }
+ }
+#endif //DBG
+ }
+
+ MiDeletePte (PointerPte,
+ (PVOID)Va,
+ AddressSpaceDeletion,
+ CurrentProcess,
+ ProtoPte,
+ &FlushList);
+ } else {
+ *PointerPte = ZeroPte;
+ }
+ }
+ Va = Va + PAGE_SIZE;
+ PointerPte++;
+ ProtoPte++;
+
+ }
+
+ //
+ // Flush out entries for the last page table page.
+ //
+
+ MiFlushPteList (&FlushList, FALSE, ZeroPte);
+
+ //
+ // If all the entries have been eliminated from the previous
+ // page table page, delete the page table page itself.
+ //
+
+ if ((*UsedPageTableCount == 0) && (PointerPde->u.Long != 0)) {
+
+ TempVa = MiGetVirtualAddressMappedByPte(PointerPde);
+ MiDeletePte (PointerPde,
+ TempVa,
+ AddressSpaceDeletion,
+ CurrentProcess,
+ NULL,
+ NULL);
+ }
+
+ //
+ // All done, return.
+ //
+
+ return;
+}
+
+VOID
+MiDeletePte (
+ IN PMMPTE PointerPte,
+ IN PVOID VirtualAddress,
+ IN ULONG AddressSpaceDeletion,
+ IN PEPROCESS CurrentProcess,
+ IN PMMPTE PrototypePte,
+ IN PMMPTE_FLUSH_LIST PteFlushList OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes the contents of the specified PTE. The PTE
+ can be in one of the following states:
+
+ - active and valid
+ - transition
+ - in paging file
+ - in prototype PTE format
+
+Arguments:
+
+ PointerPte - Supplies a pointer to the PTE to delete.
+
+ VirtualAddress - Supplies the virtual address which corresponds to
+ the PTE. This is used to locate the working set entry
+ to eliminate it.
+
+ AddressSpaceDeletion - Supplies TRUE if the address space is being
+ deleted, FALSE otherwise. If TRUE is specified
+ the TB is not flushed and valid addresses are
+ not removed from the working set.
+
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+ PrototypePte - Supplies a pointer to the prototype PTE which currently
+ or originally mapped this page. This is used to determine
+ if pte is a fork PTE and should have it's reference block
+ decremented.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APCs disabled, PFN lock and working set mutex held.
+
+--*/
+
+{
+ PMMPTE PointerPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ MMPTE PteContents;
+ ULONG WorkingSetIndex;
+ ULONG Entry;
+ PVOID SwapVa;
+ MMWSLENTRY Locked;
+ ULONG WsPfnIndex;
+ PMMCLONE_BLOCK CloneBlock;
+ PMMCLONE_DESCRIPTOR CloneDescriptor;
+
+ MM_PFN_LOCK_ASSERT();
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ DbgPrint("deleting PTE\n");
+ MiFormatPte(PointerPte);
+ }
+#endif //DBG
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+#ifdef R4000
+ ASSERT (PteContents.u.Hard.Global == 0);
+#endif
+#ifdef _X86_
+#if DBG
+#if !defined(NT_UP)
+
+ if (PteContents.u.Hard.Writable == 1) {
+ ASSERT (PteContents.u.Hard.Dirty == 1);
+ }
+ ASSERT (PteContents.u.Hard.Accessed == 1);
+#endif //NTUP
+#endif //DBG
+#endif //X86
+
+ //
+ // Pte is valid. Check PFN database to see if this is a prototype PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ WsPfnIndex = Pfn1->u1.WsIndex;
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ MiFormatPfn(Pfn1);
+ }
+#endif //DBG
+
+ CloneDescriptor = NULL;
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ CloneBlock = (PMMCLONE_BLOCK)Pfn1->PteAddress;
+
+ //
+ // Capture the state of the modified bit for this
+ // pte.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiDecrementShareAndValidCount (PointerPde->u.Hard.PageFrameNumber);
+
+ //
+ // Decrement the share count for the physical page.
+ //
+
+ MiDecrementShareCount (PteContents.u.Hard.PageFrameNumber);
+
+ //
+ // Check to see if this is a fork prototype PTE and if so
+ // update the clone descriptor address.
+ //
+
+ if (PointerPte <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {
+
+ if (PrototypePte != Pfn1->PteAddress) {
+
+ //
+ // Locate the clone descriptor within the clone tree.
+ //
+
+ CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);
+
+#if DBG
+ if (CloneDescriptor == NULL) {
+ DbgPrint("1PrototypePte %lx Clone desc %lx pfn pte addr %lx\n",
+ PrototypePte, CloneDescriptor, Pfn1->PteAddress);
+ MiFormatPte(PointerPte);
+ ASSERT (FALSE);
+ }
+#endif // DBG
+
+ }
+ }
+ } else {
+
+ //
+ // This pte is a NOT a prototype PTE, delete the physical page.
+ //
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ //
+ // Decrement the share count for the physical page. As the page
+ // is private it will be put on the free list.
+ //
+
+ MiDecrementShareCountOnly (PteContents.u.Hard.PageFrameNumber);
+
+ //
+ // Decrement the count for the number of private pages.
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+ }
+
+ //
+ // Find the WSLE for this page and eliminate it.
+ //
+
+ //
+ // If we are deleting the system portion of the address space, do
+ // not remove WSLEs or flush translation buffers as there can be
+ // no other usage of this address space.
+ //
+
+ if (AddressSpaceDeletion == FALSE) {
+ WorkingSetIndex = MiLocateWsle (VirtualAddress,
+ MmWorkingSetList,
+ WsPfnIndex );
+
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
+
+ //
+ // Check to see if this entry is locked in the working set
+ // or locked in memory.
+ //
+
+ Locked = MmWsle[WorkingSetIndex].u1.e1;
+
+ MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);
+
+ //
+ // Add this entry to the list of free working set entries
+ // and adjust the working set count.
+ //
+
+ MiReleaseWsle (WorkingSetIndex, &CurrentProcess->Vm);
+
+ if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {
+
+ //
+ // This entry is locked.
+ //
+
+ ASSERT (WorkingSetIndex < MmWorkingSetList->FirstDynamic);
+ MmWorkingSetList->FirstDynamic -= 1;
+
+ if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
+
+ Entry = MmWorkingSetList->FirstDynamic;
+ ASSERT (MmWsle[Entry].u1.e1.Valid);
+ SwapVa = MmWsle[Entry].u1.VirtualAddress;
+ SwapVa = PAGE_ALIGN (SwapVa);
+ Pfn2 = MI_PFN_ELEMENT (
+ MiGetPteAddress (SwapVa)->u.Hard.PageFrameNumber);
+#if 0
+ Entry = MiLocateWsleAndParent (SwapVa,
+ &Parent,
+ MmWorkingSetList,
+ Pfn2->u1.WsIndex);
+
+ //
+ // Swap the removed entry with the last locked entry
+ // which is located at first dynamic.
+ //
+
+ MiSwapWslEntries (Entry,
+ Parent,
+ WorkingSetIndex,
+ MmWorkingSetList);
+#endif //0
+
+ MiSwapWslEntries (Entry,
+ WorkingSetIndex,
+ &CurrentProcess->Vm);
+ }
+ } else {
+ ASSERT (WorkingSetIndex >= MmWorkingSetList->FirstDynamic);
+ }
+
+ //
+ // Flush the entry out of the TB.
+ //
+
+ if (!ARGUMENT_PRESENT (PteFlushList)) {
+ KeFlushSingleTb (VirtualAddress,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ ZeroPte.u.Flush);
+ } else {
+ if (PteFlushList->Count != MM_MAXIMUM_FLUSH_COUNT) {
+ PteFlushList->FlushPte[PteFlushList->Count] = PointerPte;
+ PteFlushList->FlushVa[PteFlushList->Count] = VirtualAddress;
+ PteFlushList->Count += 1;
+ }
+ *PointerPte = ZeroPte;
+ }
+
+ if (CloneDescriptor != NULL) {
+
+ //
+ // Flush PTEs as this could release the PFN_LOCK.
+ //
+
+ MiFlushPteList (PteFlushList, FALSE, ZeroPte);
+
+ //
+ // Decrement the reference count for the clone block,
+ // note that this could release and reacquire
+ // the mutexes hence cannot be done until after the
+ // working set index has been removed.
+ //
+
+ if (MiDecrementCloneBlockReference ( CloneDescriptor,
+ CloneBlock,
+ CurrentProcess )) {
+
+ //
+ // The working set mutex was released. This may
+ // have removed the current page table page.
+ //
+
+ MiDoesPdeExistAndMakeValid (PointerPde,
+ CurrentProcess,
+ TRUE);
+ }
+ }
+ }
+
+ } else if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // This is a prototype PTE, if it is a fork PTE clean up the
+ // fork structures.
+ //
+
+ if (PteContents.u.Soft.PageFileHigh != 0xFFFFF) {
+
+ //
+ // Check to see if the prototype PTE is a fork prototype PTE.
+ //
+
+ if (PointerPte <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {
+
+ if (PrototypePte != MiPteToProto (PointerPte)) {
+
+ CloneBlock = (PMMCLONE_BLOCK)MiPteToProto (PointerPte);
+ CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);
+
+
+#if DBG
+ if (CloneDescriptor == NULL) {
+ DbgPrint("1PrototypePte %lx Clone desc %lx \n",
+ PrototypePte, CloneDescriptor);
+ MiFormatPte(PointerPte);
+ ASSERT (FALSE);
+ }
+#endif //DBG
+
+ //
+ // Decrement the reference count for the clone block,
+ // note that this could release and reacquire
+ // the mutexes.
+ //
+
+ *PointerPte = ZeroPte;
+
+ MiFlushPteList (PteFlushList, FALSE, ZeroPte);
+
+ if (MiDecrementCloneBlockReference ( CloneDescriptor,
+ CloneBlock,
+ CurrentProcess )) {
+
+ //
+ // The working set mutex was released. This may
+ // have removed the current page table page.
+ //
+
+ MiDoesPdeExistAndMakeValid (MiGetPteAddress (PointerPte),
+ CurrentProcess,
+ TRUE);
+ }
+ }
+ }
+ }
+
+ } else if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // This is a transition PTE. (Page is private)
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero, move the page to the free list, if the reference
+ // count is not zero, ignore this page. When the refernce count
+ // goes to zero, it will be placed on the free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+
+ //
+ // Decrement the count for the number of private pages.
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+
+ } else {
+
+ //
+ // Must be page file space.
+ //
+
+ if (PteContents.u.Soft.PageFileHigh != 0) {
+
+ if (MiReleasePageFileSpace (*PointerPte)) {
+
+ //
+ // Decrement the count for the number of private pages.
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+ }
+ }
+ }
+
+ //
+ // Zero the PTE contents.
+ //
+
+ *PointerPte = ZeroPte;
+
+ return;
+}
+
+
+ULONG
+FASTCALL
+MiReleasePageFileSpace (
+ IN MMPTE PteContents
+ )
+
+/*++
+
+Routine Description:
+
+ This routine frees the paging file allocated to the specified PTE
+ and adjusts the necessary quotas.
+
+Arguments:
+
+ PteContents - Supplies the PTE which is in page file format.
+
+Return Value:
+
+ Returns TRUE if any paging file space was deallocated.
+
+Environment:
+
+ Kernel mode, APCs disabled, PFN lock held.
+
+--*/
+
+{
+ ULONG FreeBit;
+ ULONG PageFileNumber;
+
+ MM_PFN_LOCK_ASSERT();
+
+ if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // Not in page file format.
+ //
+
+ return FALSE;
+ }
+
+ FreeBit = GET_PAGING_FILE_OFFSET (PteContents);
+
+ if ((FreeBit == 0) || (FreeBit == 0xFFFFF)) {
+
+ //
+ // Page is not in a paging file, just return.
+ //
+
+ return FALSE;
+ }
+
+ PageFileNumber = GET_PAGING_FILE_NUMBER (PteContents);
+
+ ASSERT (RtlCheckBit( MmPagingFile[PageFileNumber]->Bitmap, FreeBit) == 1);
+
+#if DBG
+ if ((FreeBit < 8192) && (PageFileNumber == 0)) {
+ ASSERT ((MmPagingFileDebug[FreeBit] & 1) != 0);
+ MmPagingFileDebug[FreeBit] &= 0xfffffffe;
+ }
+#endif //DBG
+
+ RtlClearBits ( MmPagingFile[PageFileNumber]->Bitmap, FreeBit, 1);
+
+ MmPagingFile[PageFileNumber]->FreeSpace += 1;
+ MmPagingFile[PageFileNumber]->CurrentUsage -= 1;
+
+ //
+ // Check to see if we should move some MDL entries for the
+ // modified page writer now that more free space is available.
+ //
+
+ if ((MmNumberOfActiveMdlEntries == 0) ||
+ (MmPagingFile[PageFileNumber]->FreeSpace == MM_USABLE_PAGES_FREE)) {
+
+ MiUpdateModifiedWriterMdls (PageFileNumber);
+ }
+
+ return TRUE;
+}
+
+
+VOID
+FASTCALL
+MiUpdateModifiedWriterMdls (
+ IN ULONG PageFileNumber
+ )
+
+/*++
+
+Routine Description:
+
+ This routine ensures the MDLs for the specified paging file
+ are in the proper state such that paging i/o can continue.
+
+Arguments:
+
+ PageFileNumber - Supplies the page file number to check the
+ MDLs for.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ PMMMOD_WRITER_MDL_ENTRY WriterEntry;
+ ULONG i;
+
+ if (MmNumberOfActiveMdlEntries == 0) {
+
+ //
+ // There are no free MDLs remove the entry for this file
+ // from the list and add it to the active list.
+ //
+
+ WriterEntry = MmPagingFile[PageFileNumber]->Entry[0];
+ RemoveEntryList (&WriterEntry->Links);
+ WriterEntry->CurrentList = &MmPagingFileHeader.ListHead;
+ KeSetEvent (&WriterEntry->PagingListHead->Event, 0, FALSE);
+
+ InsertTailList (&WriterEntry->PagingListHead->ListHead,
+ &WriterEntry->Links);
+ MmNumberOfActiveMdlEntries += 1;
+
+ } else {
+
+ if (MmPagingFile[PageFileNumber]->FreeSpace == MM_USABLE_PAGES_FREE) {
+
+ //
+ // Put the MDL entries into the active list.
+ //
+
+ i = 0;
+
+ do {
+
+ if ((MmPagingFile[PageFileNumber]->Entry[i]->Links.Flink !=
+ MM_IO_IN_PROGRESS)
+ &&
+ (MmPagingFile[PageFileNumber]->Entry[i]->CurrentList ==
+ &MmFreePagingSpaceLow)) {
+
+ //
+ // Remove this entry and put it on the active list.
+ //
+
+ WriterEntry = MmPagingFile[PageFileNumber]->Entry[i];
+ RemoveEntryList (&WriterEntry->Links);
+ WriterEntry->CurrentList = &MmPagingFileHeader.ListHead;
+
+ KeSetEvent (&WriterEntry->PagingListHead->Event, 0, FALSE);
+
+ InsertTailList (&WriterEntry->PagingListHead->ListHead,
+ &WriterEntry->Links);
+ MmNumberOfActiveMdlEntries += 1;
+ }
+ i += 1;
+ } while (i < MM_PAGING_FILE_MDLS);
+ }
+ }
+ return;
+}
+
+VOID
+MiFlushPteList (
+ IN PMMPTE_FLUSH_LIST PteFlushList,
+ IN ULONG AllProcessors,
+ IN MMPTE FillPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine flushes all the PTEs in the pte flush list.
+ Is the list has overflowed, the entire TB is flushed.
+
+Arguments:
+
+ PteFlushList - Supplies an optional pointer to the list to be flushed.
+
+ AllProcessors - Supplies TRUE if the flush occurs on all processors.
+
+ FillPte - Supplies the PTE to fill with.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ ULONG count;
+ ULONG i = 0;
+
+ ASSERT (ARGUMENT_PRESENT (PteFlushList));
+ MM_PFN_LOCK_ASSERT ();
+
+ count = PteFlushList->Count;
+
+ if (count != 0) {
+ if (count != 1) {
+ if (count < MM_MAXIMUM_FLUSH_COUNT) {
+ KeFlushMultipleTb (count,
+ &PteFlushList->FlushVa[0],
+ TRUE,
+ (BOOLEAN)AllProcessors,
+ &((PHARDWARE_PTE)PteFlushList->FlushPte[0]),
+ FillPte.u.Flush);
+ } else {
+
+ //
+ // Array has overflowed, flush the entire TB.
+ //
+
+ ExAcquireSpinLockAtDpcLevel ( &MmSystemSpaceLock );
+ KeFlushEntireTb (TRUE, (BOOLEAN)AllProcessors);
+ if (AllProcessors == TRUE) {
+ MmFlushCounter.u.List.NextEntry += 1;
+ }
+ ExReleaseSpinLockFromDpcLevel ( &MmSystemSpaceLock );
+ }
+ } else {
+ KeFlushSingleTb (PteFlushList->FlushVa[0],
+ TRUE,
+ (BOOLEAN)AllProcessors,
+ (PHARDWARE_PTE)PteFlushList->FlushPte[0],
+ FillPte.u.Flush);
+ }
+ PteFlushList->Count = 0;
+ }
+ return;
+}
diff --git a/private/ntos/mm/dirs b/private/ntos/mm/dirs
new file mode 100644
index 000000000..a2a38f0fd
--- /dev/null
+++ b/private/ntos/mm/dirs
@@ -0,0 +1,24 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ dirs.
+
+Abstract:
+
+ This file specifies the subdirectories of the current directory that
+ contain component makefiles.
+
+
+Author:
+
+
+NOTE: Commented description of this file is in \nt\bak\bin\dirs.tpl
+
+!ENDIF
+
+DIRS=up
+
+OPTIONAL_DIRS=mp
diff --git a/private/ntos/mm/dmpaddr.c b/private/ntos/mm/dmpaddr.c
new file mode 100644
index 000000000..6695b9e15
--- /dev/null
+++ b/private/ntos/mm/dmpaddr.c
@@ -0,0 +1,879 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ dmpaddr.c
+
+Abstract:
+
+ Temporary routine to print valid addresses within an
+ address space.
+
+Author:
+
+ Lou Perazzoli (loup) 20-Mar-1989
+
+Environment:
+
+ Kernel Mode.
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+
+BOOLEAN
+MiFlushUnusedSectionInternal (
+ IN PCONTROL_AREA ControlArea
+ );
+
+#endif //DBG
+
+#if DBG
+VOID
+MiDumpValidAddresses (
+ )
+
+{
+ ULONG va = 0;
+ ULONG i,j;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+
+ PointerPde = MiGetPdeAddress (va);
+
+
+ for (i = 0; i < PDE_PER_PAGE; i++) {
+ if (PointerPde->u.Hard.Valid) {
+ DbgPrint(" **valid PDE, element %ld %lx %lx\n",i,i,
+ PointerPde->u.Long);
+ PointerPte = MiGetPteAddress (va);
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid) {
+ DbgPrint("Valid address at %lx pte %lx\n", (ULONG)va,
+ PointerPte->u.Long);
+ }
+ va += PAGE_SIZE;
+ PointerPte++;
+ }
+ } else {
+ va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
+ }
+
+ PointerPde++;
+ }
+
+ return;
+
+}
+
+#endif //DBG
+
+#if DBG
+VOID
+MiFormatPte (
+ IN PMMPTE PointerPte
+ )
+
+{
+// int j;
+// unsigned long pte;
+ PMMPTE proto_pte;
+ PSUBSECTION subsect;
+
+// struct a_bit {
+// unsigned long biggies : 31;
+// unsigned long bitties : 1;
+// };
+//
+// struct a_bit print_pte;
+
+
+ proto_pte = MiPteToProto(PointerPte);
+ subsect = MiGetSubsectionAddress(PointerPte);
+
+ DbgPrint("***DumpPTE at %lx contains %lx protoaddr %lx subsect %lx\n\n",
+ (ULONG)PointerPte, PointerPte->u.Long, (ULONG)proto_pte,
+ (ULONG)subsect);
+
+ return;
+
+// DbgPrint("page frame number 0x%lx proto PTE address 0x%lx\n",
+//
+// DbgPrint("PTE is 0x%lx\n", PTETOULONG(the_pte));
+//
+// proto_pte = MiPteToProto(PointerPte);
+//
+// DbgPrint("page frame number 0x%lx proto PTE address 0x%lx\n",
+// PointerPte->u.Hard.PageFrameNumber,*(PULONG)&proto_pte);
+//
+// DbgPrint(" 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 \n");
+// DbgPrint(" +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ \n");
+// DbgPrint(" | pfn |c|p|t|r|r|d|a|c|p|o|w|v| \n");
+// DbgPrint(" | |o|r|r|s|s|t|c|a|b|w|r|l| \n");
+// DbgPrint(" | |w|o|n|v|v|y|c|c|o|n|t|d| \n");
+// DbgPrint(" +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ \n ");
+// pte = PTETOULONG(the_pte);
+//
+// for (j = 0; j < 32; j++) {
+// *(PULONG)& print_pte = pte;
+// DbgPrint(" %lx",print_pte.bitties);
+// pte = pte << 1;
+// }
+// DbgPrint("\n");
+//
+
+}
+#endif //DBG
+
+#if DBG
+
+VOID
+MiDumpWsl ( )
+
+{
+ ULONG i;
+ PMMWSLE wsle;
+
+ DbgPrint("***WSLE cursize %lx frstfree %lx Min %lx Max %lx\n",
+ PsGetCurrentProcess()->Vm.WorkingSetSize,
+ MmWorkingSetList->FirstFree,
+ PsGetCurrentProcess()->Vm.MinimumWorkingSetSize,
+ PsGetCurrentProcess()->Vm.MaximumWorkingSetSize);
+
+ DbgPrint(" quota %lx firstdyn %lx last ent %lx next slot %lx\n",
+ MmWorkingSetList->Quota,
+ MmWorkingSetList->FirstDynamic,
+ MmWorkingSetList->LastEntry,
+ MmWorkingSetList->NextSlot);
+
+ wsle = MmWsle;
+
+ for (i = 0; i < MmWorkingSetList->LastEntry; i++) {
+ DbgPrint(" index %lx %lx\n",i,wsle->u1.Long);
+ wsle++;
+ }
+ return;
+
+}
+
+#endif //DBG
+
+#if 0 //COMMENTED OUT!!!
+VOID
+MiFlushUnusedSections (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine rumages through the PFN database and attempts
+ to close any unused sections.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPFN LastPfn;
+ PMMPFN Pfn1;
+ PSUBSECTION Subsection;
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+ Pfn1 = MI_PFN_ELEMENT (MmLowestPhysicalPage + 1);
+ LastPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
+
+ while (Pfn1 < LastPfn) {
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+ if ((Pfn1->u3.e1.PageLocation == ModifiedPageList) ||
+ (Pfn1->u3.e1.PageLocation == StandbyPageList)) {
+
+ //
+ // Make sure the PTE is not waiting for I/O to complete.
+ //
+
+ if (MI_IS_PFN_DELETED (Pfn1)) {
+
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ MiFlushUnusedSectionInternal (Subsection->ControlArea);
+ }
+ }
+ }
+ Pfn1++;
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+BOOLEAN
+MiFlushUnusedSectionInternal (
+ IN PCONTROL_AREA ControlArea
+ )
+
+{
+ BOOLEAN result;
+ KIRQL OldIrql = APC_LEVEL;
+
+ if ((ControlArea->NumberOfMappedViews != 0) ||
+ (ControlArea->NumberOfSectionReferences != 0)) {
+
+ //
+ // The segment is currently in use.
+ //
+
+ return FALSE;
+ }
+
+ //
+ // The segment has no references, delete it. If the segment
+ // is already being deleted, set the event field in the control
+ // area and wait on the event.
+ //
+
+ if ((ControlArea->u.Flags.BeingDeleted) ||
+ (ControlArea->u.Flags.BeingCreated)) {
+
+ return TRUE;
+ }
+
+ //
+ // Set the being deleted flag and up the number of mapped views
+ // for the segment. Upping the number of mapped views prevents
+ // the segment from being deleted and passed to the deletion thread
+ // while we are forcing a delete.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ ControlArea->NumberOfMappedViews = 1;
+
+ //
+ // This is a page file backed or image Segment. The Segment is being
+ // deleted, remove all references to the paging file and physical memory.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ MiCleanSection (ControlArea);
+
+ LOCK_PFN (OldIrql);
+ return TRUE;
+}
+#endif //0
+
+
+#if DBG
+
+#define ALLOC_SIZE ((ULONG)8*1024)
+#define MM_SAVED_CONTROL 64
+#define MM_KERN_MAP_SIZE 64
+
+#define MM_NONPAGED_POOL_MARK ((PUCHAR)0xfffff123)
+#define MM_PAGED_POOL_MARK ((PUCHAR)0xfffff124)
+#define MM_KERNEL_STACK_MARK ((PUCHAR)0xfffff125)
+
+extern ULONG MmSystemPtesStart[MaximumPtePoolTypes];
+extern ULONG MmSystemPtesEnd[MaximumPtePoolTypes];
+
+typedef struct _KERN_MAP {
+ ULONG StartVa;
+ ULONG EndVa;
+ PLDR_DATA_TABLE_ENTRY Entry;
+} KERN_MAP, *PKERN_MAP;
+
+ULONG
+MiBuildKernelMap (
+ IN ULONG NumberOfElements,
+ IN OUT PKERN_MAP KernelMap
+ );
+
+NTSTATUS
+MmMemoryUsage (
+ IN PVOID Buffer,
+ IN ULONG Size,
+ IN ULONG Type,
+ OUT PULONG OutLength
+ )
+
+/*++
+
+Routine Description:
+
+ This routine (debugging only) dumps the current memory usage by
+ walking the PFN database.
+
+Arguments:
+
+ Buffer - Supplies a buffer in which to copy the data.
+
+ Size - Supplies the size of the buffer.
+
+ Type - Supplies a value of 0 to dump everything,
+ a value of 1 to dump only valid pages.
+
+ OutLength - Returns how much data was written into the buffer.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPFN LastPfn;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PSUBSECTION Subsection;
+ KIRQL OldIrql;
+ PSYSTEM_MEMORY_INFORMATION MemInfo;
+ PSYSTEM_MEMORY_INFO Info;
+ PSYSTEM_MEMORY_INFO InfoStart;
+ PSYSTEM_MEMORY_INFO InfoEnd;
+ PUCHAR String;
+ PUCHAR Master;
+ PCONTROL_AREA ControlArea;
+ BOOLEAN Found;
+ BOOLEAN FoundMap;
+ PMDL Mdl;
+ NTSTATUS status = STATUS_SUCCESS;
+ ULONG Length;
+ PEPROCESS Process;
+ PUCHAR End;
+ PCONTROL_AREA SavedControl[MM_SAVED_CONTROL];
+ PSYSTEM_MEMORY_INFO SavedInfo[MM_SAVED_CONTROL];
+ ULONG j;
+ ULONG ControlCount = 0;
+ PUCHAR PagedSection = NULL;
+ ULONG Failed;
+ UCHAR PageFileMapped[] = "PageFile Mapped";
+ UCHAR MetaFile[] = "Fs Meta File";
+ UCHAR NoName[] = "No File Name";
+ UCHAR NonPagedPool[] = "NonPagedPool";
+ UCHAR PagedPool[] = "PagedPool";
+ UCHAR KernelStack[] = "Kernel Stack";
+ PUCHAR NameString;
+ KERN_MAP KernMap[MM_KERN_MAP_SIZE];
+ ULONG KernSize;
+ ULONG VirtualAddress;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+
+ Mdl = MmCreateMdl (NULL, Buffer, Size);
+ try {
+
+ MmProbeAndLockPages (Mdl, KeGetPreviousMode(), IoWriteAccess);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ ExFreePool (Mdl);
+ return GetExceptionCode();
+ }
+
+ MemInfo = MmGetSystemAddressForMdl (Mdl);
+ InfoStart = &MemInfo->Memory[0];
+ InfoEnd = InfoStart;
+ End = (PUCHAR)MemInfo + Size;
+
+ Pfn1 = MI_PFN_ELEMENT (MmLowestPhysicalPage + 1);
+ LastPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
+
+ KernSize = MiBuildKernelMap (MM_KERN_MAP_SIZE, &KernMap[0]);
+
+ LOCK_PFN (OldIrql);
+
+ while (Pfn1 < LastPfn) {
+
+ Info = InfoStart;
+ FoundMap = FALSE;
+
+ if ((Pfn1->u3.e1.PageLocation != FreePageList) &&
+ (Pfn1->u3.e1.PageLocation != ZeroedPageList) &&
+ (Pfn1->u3.e1.PageLocation != BadPageList)) {
+
+ if (Type == 1) {
+ if (Pfn1->u3.e1.PageLocation != ActiveAndValid) {
+ Pfn1++;
+ continue;
+ }
+ }
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ Master = (PUCHAR)Subsection->ControlArea;
+ ControlArea = Subsection->ControlArea;
+ if (!MmIsAddressValid(ControlArea)) {
+ DbgPrint ("Pfnp %lx not found %lx\n",Pfn1 - MmPfnDatabase,
+ (ULONG)Pfn1->PteAddress);
+ Pfn1++;
+ continue;
+ }
+ if (ControlArea->FilePointer != NULL) {
+ if (!MmIsAddressValid(ControlArea->FilePointer)) {
+ Pfn1++;
+ continue;
+ }
+ }
+
+ } else {
+
+ FoundMap = TRUE;
+ VirtualAddress = (ULONG)MiGetVirtualAddressMappedByPte (Pfn1->PteAddress);
+
+ if ((VirtualAddress >= (ULONG)MmPagedPoolStart) &&
+ (VirtualAddress <= (ULONG)MmPagedPoolEnd)) {
+
+ //
+ // This is paged pool, put it in the paged pool cell.
+ //
+
+ Master = MM_PAGED_POOL_MARK;
+
+ } else if ((VirtualAddress >= (ULONG)MmNonPagedPoolStart) &&
+ (VirtualAddress <= (ULONG)MmNonPagedPoolEnd)) {
+
+ //
+ // This is nonpaged pool, put it in the nonpaged pool cell.
+ //
+
+ Master = MM_NONPAGED_POOL_MARK;
+
+ } else {
+ FoundMap = FALSE;
+ for (j=0; j < KernSize; j++) {
+ if ((VirtualAddress >= KernMap[j].StartVa) &&
+ (VirtualAddress < KernMap[j].EndVa)) {
+ Master = (PUCHAR)&KernMap[j];
+ FoundMap = TRUE;
+ break;
+ }
+ }
+ }
+
+ if (!FoundMap) {
+ if (((ULONG)Pfn1->PteAddress >= MmSystemPtesStart[SystemPteSpace]) &&
+ ((ULONG)Pfn1->PteAddress <= MmSystemPtesEnd[SystemPteSpace])) {
+
+ //
+ // This is kernel stack.
+ //
+
+ Master = MM_KERNEL_STACK_MARK;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT (Pfn1->PteFrame);
+ Master = (PUCHAR)Pfn2->PteFrame;
+ if (((ULONG)Master == 0) || ((ULONG)Master > MmHighestPhysicalPage)) {
+ DbgPrint ("Pfn %lx not found %lx\n",Pfn1 - MmPfnDatabase,
+ (ULONG)Pfn1->PteAddress);
+ Pfn1++;
+ continue;
+ }
+ }
+ }
+ }
+
+ //
+ // See if there is already a master info block.
+ //
+
+ Found = FALSE;
+ while (Info < InfoEnd) {
+ if (Info->StringOffset == Master) {
+ Found = TRUE;
+ break;
+ }
+ Info += 1;
+ }
+
+ if (!Found) {
+
+ Info = InfoEnd;
+ InfoEnd += 1;
+ if ((PUCHAR)Info >= ((PUCHAR)InfoStart + Size) - sizeof(SYSTEM_MEMORY_INFO)) {
+ status = STATUS_DATA_OVERRUN;
+ goto Done;
+ }
+
+ RtlZeroMemory (Info, sizeof(*Info));
+ Info->StringOffset = Master;
+ }
+
+ if ((Pfn1->u3.e1.PageLocation == StandbyPageList) ||
+ (Pfn1->u3.e1.PageLocation == TransitionPage)) {
+
+ Info->TransitionCount += 1;
+
+ } else if ((Pfn1->u3.e1.PageLocation == ModifiedPageList) ||
+ (Pfn1->u3.e1.PageLocation == ModifiedNoWritePageList)) {
+ Info->ModifiedCount += 1;
+
+ } else {
+ Info->ValidCount += 1;
+ if (Type == 1) {
+ if ((Pfn1->PteAddress >= MiGetPdeAddress (0x0)) &&
+ (Pfn1->PteAddress <= MiGetPdeAddress (0xFFFFFFFF))) {
+ Info->PageTableCount += 1;
+ }
+ }
+ }
+ if (Type != 1) {
+ if ((Pfn1->PteAddress >= MiGetPdeAddress (0x0)) &&
+ (Pfn1->PteAddress <= MiGetPdeAddress (0xFFFFFFFF))) {
+ Info->PageTableCount += 1;
+ }
+ }
+ }
+ Pfn1++;
+ }
+
+ MemInfo->StringStart = (ULONG)Buffer + (PUCHAR)InfoEnd - (PUCHAR)MemInfo;
+ String = (PUCHAR)InfoEnd;
+
+ //
+ // Process strings...
+ //
+
+ Info = InfoStart;
+ while (Info < InfoEnd) {
+ if (Info->StringOffset > (PUCHAR)0x80000000) {
+
+ //
+ // Make sure this is not stacks or other areas.
+ //
+
+ Length = 0;
+ ControlArea = NULL;
+
+ if (Info->StringOffset == MM_NONPAGED_POOL_MARK) {
+ Length = 14;
+ NameString = NonPagedPool;
+ } else if (Info->StringOffset == MM_PAGED_POOL_MARK) {
+ Length = 14;
+ NameString = PagedPool;
+ } else if (Info->StringOffset == MM_KERNEL_STACK_MARK) {
+ Length = 14;
+ NameString = KernelStack;
+ } else if (((PUCHAR)Info->StringOffset >= (PUCHAR)&KernMap[0]) &&
+ ((PUCHAR)Info->StringOffset <= (PUCHAR)&KernMap[MM_KERN_MAP_SIZE])) {
+
+ DataTableEntry = ((PKERN_MAP)Info->StringOffset)->Entry;
+ NameString = (PUCHAR)DataTableEntry->BaseDllName.Buffer;
+ Length = DataTableEntry->BaseDllName.Length;
+ } else {
+ //
+ // This points to a control area.
+ // Get the file name.
+ //
+
+ ControlArea = (PCONTROL_AREA)(Info->StringOffset);
+ NameString = (PUCHAR)&ControlArea->FilePointer->FileName.Buffer[0];
+ }
+
+ Info->StringOffset = NULL;
+ Failed = TRUE;
+ if (Length == 0) {
+ if (MmIsAddressValid (&ControlArea->FilePointer->FileName.Length)) {
+ Length = ControlArea->FilePointer->FileName.Length;
+ if (Length == 0) {
+ if (ControlArea->u.Flags.NoModifiedWriting) {
+ Length = 14;
+ NameString = MetaFile;
+ } else if (ControlArea->u.Flags.File == 0) {
+ NameString = PageFileMapped;
+ Length = 16;
+
+ } else {
+ NameString = NoName;
+ Length = 14;
+ }
+ }
+ }
+ }
+
+ if ((String+Length+2) >= End) {
+ status = STATUS_DATA_OVERRUN;
+ goto Done;
+ }
+ if (MmIsAddressValid (&NameString[0]) &&
+ MmIsAddressValid (&NameString[Length - 1])) {
+ RtlMoveMemory (String,
+ NameString,
+ Length );
+ Info->StringOffset = (PUCHAR)Buffer + ((PUCHAR)String - (PUCHAR)MemInfo);
+ String[Length] = 0;
+ String[Length + 1] = 0;
+ String += Length + 2;
+ Failed = FALSE;
+ }
+ if (Failed && ControlArea) {
+ if (!(ControlArea->u.Flags.BeingCreated ||
+ ControlArea->u.Flags.BeingDeleted) &&
+ (ControlCount < MM_SAVED_CONTROL)) {
+ SavedControl[ControlCount] = ControlArea;
+ SavedInfo[ControlCount] = Info;
+ ControlArea->NumberOfSectionReferences += 1;
+ ControlCount += 1;
+ }
+ }
+
+ } else {
+
+ //
+ // Process...
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (Info->StringOffset);
+ Info->StringOffset = NULL;
+ if ((String+16) >= End) {
+ status = STATUS_DATA_OVERRUN;
+ goto Done;
+ }
+
+ Process = (PEPROCESS)Pfn1->u1.Event;
+ if (Pfn1->PteAddress == MiGetPteAddress (PDE_BASE)) {
+ Info->StringOffset = (PUCHAR)Buffer + ((PUCHAR)String - (PUCHAR)MemInfo);
+ RtlMoveMemory (String,
+ &Process->ImageFileName[0],
+ 16);
+ String += 16;
+ } else {
+
+ Info->StringOffset = PagedSection;
+ if (PagedSection == NULL) {
+ Info->StringOffset = (PUCHAR)Buffer + ((PUCHAR)String - (PUCHAR)MemInfo);
+ RtlMoveMemory (String,
+ &PageFileMapped,
+ 16);
+ PagedSection = Info->StringOffset;
+ String += 16;
+ }
+ }
+ }
+
+ Info += 1;
+ }
+
+Done:
+ UNLOCK_PFN (OldIrql);
+ while (ControlCount != 0) {
+
+ //
+ // Process all the pagable name strings.
+ //
+
+ ControlCount -= 1;
+ ControlArea = SavedControl[ControlCount];
+ Info = SavedInfo[ControlCount];
+ NameString = (PUCHAR)&ControlArea->FilePointer->FileName.Buffer[0];
+ Length = ControlArea->FilePointer->FileName.Length;
+ if (Length == 0) {
+ if (ControlArea->u.Flags.NoModifiedWriting) {
+ Length = 12;
+ NameString = MetaFile;
+ } else if (ControlArea->u.Flags.File == 0) {
+ NameString = PageFileMapped;
+ Length = 16;
+
+ } else {
+ NameString = NoName;
+ Length = 12;
+ }
+ }
+ if ((String+Length+2) >= End) {
+ status = STATUS_DATA_OVERRUN;
+ }
+ if (status != STATUS_DATA_OVERRUN) {
+ RtlMoveMemory (String,
+ NameString,
+ Length );
+ Info->StringOffset = (PUCHAR)Buffer + ((PUCHAR)String - (PUCHAR)MemInfo);
+ String[Length] = 0;
+ String[Length + 1] = 0;
+ String += Length + 2;
+ }
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfSectionReferences -= 1;
+ MiCheckForControlAreaDeletion (ControlArea);
+ UNLOCK_PFN (OldIrql);
+ }
+ *OutLength = ((PUCHAR)String - (PUCHAR)MemInfo);
+ MmUnlockPages (Mdl);
+ ExFreePool (Mdl);;
+ return status;
+}
+#else //DBG
+
+NTSTATUS
+MmMemoryUsage (
+ IN PVOID Buffer,
+ IN ULONG Size,
+ IN ULONG Type,
+ OUT PULONG OutLength
+ )
+{
+ return STATUS_NOT_IMPLEMENTED;
+}
+
+#endif //DBG
+
+
+#if DBG
+ULONG
+MiBuildKernelMap (
+ IN ULONG NumberOfElements,
+ IN OUT PKERN_MAP KernelMap
+ )
+
+{
+ PLIST_ENTRY Next;
+ PLIST_ENTRY NextEntry;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG i = 0;
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceShared (&PsLoadedModuleResource, TRUE);
+
+ NextEntry = PsLoadedModuleList.Flink;
+ do {
+
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ KernelMap[i].Entry = DataTableEntry;
+ KernelMap[i].StartVa = (ULONG)DataTableEntry->DllBase;
+ KernelMap[i].EndVa = KernelMap[i].StartVa +
+ (ULONG)DataTableEntry->SizeOfImage;
+ i += 1;
+ if (i == NumberOfElements) {
+ break;
+ }
+ Next = DataTableEntry->InLoadOrderLinks.Flink;
+
+ NextEntry = NextEntry->Flink;
+ } while (NextEntry != &PsLoadedModuleList);
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+
+ return i;
+}
+#endif //DBG
+
+
+
+#if DBG
+VOID
+MiFlushCache (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine (debugging only) flushes the "cache" by moving
+ all pages from the standby list to the free list. Modified
+ pages are not effected.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG Page;
+
+ LOCK_PFN (OldIrql);
+
+ while (MmPageLocationList[StandbyPageList]->Total != 0) {
+
+ Page = MiRemovePageFromList (MmPageLocationList[StandbyPageList]);
+
+ //
+ // A page has been removed from the standby list. The
+ // PTE which refers to this page is currently in the transtion
+ // state and must have its original contents restored to free
+ // the the last reference to this physical page.
+ //
+
+ MiRestoreTransitionPte (Page);
+
+ //
+ // Put the page into the free list.
+ //
+
+ MiInsertPageInList (MmPageLocationList[FreePageList], Page);
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+VOID
+MiDumpReferencedPages (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine (debugging only) dumps all PFN entries which appear
+ to be locked in memory for i/o.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+ PMMPFN PfnLast;
+
+ LOCK_PFN (OldIrql);
+
+ Pfn1 = MI_PFN_ELEMENT (MmLowestPhysicalPage);
+ PfnLast = MI_PFN_ELEMENT (MmHighestPhysicalPage);
+
+ while (Pfn1 <= PfnLast) {
+
+ if ((Pfn1->u2.ShareCount == 0) && (Pfn1->u3.e2.ReferenceCount != 0)) {
+ MiFormatPfn (Pfn1);
+ }
+
+ if (Pfn1->u3.e2.ReferenceCount > 1) {
+ MiFormatPfn (Pfn1);
+ }
+
+ Pfn1 += 1;
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+#endif //DBG
diff --git a/private/ntos/mm/extsect.c b/private/ntos/mm/extsect.c
new file mode 100644
index 000000000..0230d27b8
--- /dev/null
+++ b/private/ntos/mm/extsect.c
@@ -0,0 +1,667 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ extsect.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtExtendSection service.
+
+Author:
+
+ Lou Perazzoli (loup) 8-May-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtExtendSection)
+#pragma alloc_text(PAGE,MmExtendSection)
+#endif
+
+#define MM_NUMBER_OF_PTES_IN_4GB 4*((1024*1024*1024) >> PAGE_SHIFT)
+
+
+NTSTATUS
+NtExtendSection(
+ IN HANDLE SectionHandle,
+ IN OUT PLARGE_INTEGER NewSectionSize
+ )
+
+/*++
+
+Routine Description:
+
+ This function extends the size of the specified section. If
+ the current size of the section is greater than or equal to the
+ specified section size, the size is not updated.
+
+Arguments:
+
+ SectionHandle - Supplies an open handle to a section object.
+
+ NewSectionSize - Supplies the new size for the section object.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ KPROCESSOR_MODE PreviousMode;
+ PVOID Section;
+ NTSTATUS Status;
+ LARGE_INTEGER CapturedNewSectionSize;
+
+ PAGED_CODE();
+
+ //
+ // Check to make sure the new section size is accessable.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+
+ if (PreviousMode != KernelMode) {
+
+ try {
+
+ ProbeForWrite (NewSectionSize,
+ sizeof(LARGE_INTEGER),
+ sizeof(ULONG ));
+
+ CapturedNewSectionSize = *NewSectionSize;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ } else {
+
+ CapturedNewSectionSize = *NewSectionSize;
+ }
+
+ //
+ // Reference the section object.
+ //
+
+ Status = ObReferenceObjectByHandle ( SectionHandle,
+ SECTION_EXTEND_SIZE,
+ MmSectionObjectType,
+ PreviousMode,
+ (PVOID *)&Section,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // Make sure this section is backed by a file.
+ //
+
+ if (((PSECTION)Section)->Segment->ControlArea->FilePointer == NULL) {
+ ObDereferenceObject (Section);
+ return STATUS_SECTION_NOT_EXTENDED;
+ }
+
+ Status = MmExtendSection (Section, &CapturedNewSectionSize, FALSE);
+
+ ObDereferenceObject (Section);
+
+ //
+ // Update the NewSectionSize field.
+ //
+
+ try {
+
+ //
+ // Return the captured section size.
+ //
+
+ *NewSectionSize = CapturedNewSectionSize;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+
+ return Status;
+}
+
+NTSTATUS
+MmExtendSection (
+ IN PVOID SectionToExtend,
+ IN OUT PLARGE_INTEGER NewSectionSize,
+ IN ULONG IgnoreFileSizeChecking
+ )
+
+/*++
+
+Routine Description:
+
+ This function extends the size of the specified section. If
+ the current size of the section is greater than or equal to the
+ specified section size, the size is not updated.
+
+Arguments:
+
+ Section - Supplies a pointer to a referenced section object.
+
+ NewSectionSize - Supplies the new size for the section object.
+
+ IgnoreFileSizeChecking - Supplies the value TRUE is file size
+ checking should be ignored (i.e., it
+ is being called from a file system which
+ has already done the checks). FALSE
+ if the checks still need made.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE ExtendedPtes;
+ MMPTE TempPte;
+ PCONTROL_AREA ControlArea;
+ PSECTION Section;
+ PSUBSECTION LastSubsection;
+ PSUBSECTION ExtendedSubsection;
+ ULONG RequiredPtes;
+ ULONG NumberOfPtes;
+ ULONG PtesUsed;
+ ULONG AllocationSize;
+ LARGE_INTEGER EndOfFile;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ Section = (PSECTION)SectionToExtend;
+
+ //
+ // Make sure the section is really extendable - physical and
+ // images sections are not.
+ //
+
+ ControlArea = Section->Segment->ControlArea;
+
+ if ((ControlArea->u.Flags.PhysicalMemory || ControlArea->u.Flags.Image) ||
+ (ControlArea->FilePointer == NULL)) {
+ return STATUS_SECTION_NOT_EXTENDED;
+ }
+
+ //
+ // Acquire the section extension mutex, this blocks other threads from
+ // updating the size at the same time.
+ //
+
+ KeEnterCriticalRegion ();
+ ExAcquireResourceExclusive (&MmSectionExtendResource, TRUE);
+
+ //
+ // If the specified size is less than the current size, return
+ // the current size.
+ //
+
+ NumberOfPtes = BYTES_TO_PAGES(NewSectionSize->LowPart) +
+ (NewSectionSize->HighPart * MM_NUMBER_OF_PTES_IN_4GB);
+
+ if (Section->Segment->ControlArea->u.Flags.WasPurged == 0) {
+
+ if (NewSectionSize->QuadPart <= Section->SizeOfSection.QuadPart) {
+ *NewSectionSize = Section->SizeOfSection;
+ goto ReleaseAndReturnSuccess;
+ }
+ }
+
+ //
+ // If a file handle was specified, set the allocation size of
+ // the file.
+ //
+
+ if (IgnoreFileSizeChecking == FALSE) {
+
+ //
+ // Release the resource so we don't deadlock with the file
+ // system trying to extend this section at the same time.
+ //
+
+ ExReleaseResource (&MmSectionExtendResource);
+
+ //
+ // Get a different resource to single thread query/set operations.
+ //
+
+ ExAcquireResourceExclusive (&MmSectionExtendSetResource, TRUE);
+
+
+ //
+ // Query the file size to see if this file really needs extended.
+ //
+
+ Status = FsRtlGetFileSize (Section->Segment->ControlArea->FilePointer,
+ &EndOfFile);
+
+ if (!NT_SUCCESS (Status)) {
+ ExReleaseResource (&MmSectionExtendSetResource);
+ KeLeaveCriticalRegion ();
+ return Status;
+ }
+
+ if (NewSectionSize->QuadPart > EndOfFile.QuadPart) {
+
+ //
+ // Current file is smaller, attempt to set a new end of file.
+ //
+
+ EndOfFile = *NewSectionSize;
+
+ Status = FsRtlSetFileSize (Section->Segment->ControlArea->FilePointer,
+ &EndOfFile);
+
+ if (!NT_SUCCESS (Status)) {
+ ExReleaseResource (&MmSectionExtendSetResource);
+ KeLeaveCriticalRegion ();
+ return Status;
+ }
+ }
+
+ //
+ // Release the query/set resource and reacquire the extend section
+ // resource.
+ //
+
+ ExReleaseResource (&MmSectionExtendSetResource);
+ ExAcquireResourceExclusive (&MmSectionExtendResource, TRUE);
+ }
+
+ //
+ // Find the last subsection.
+ //
+
+ LastSubsection = (PSUBSECTION)(ControlArea + 1);
+
+ while (LastSubsection->NextSubsection != NULL ) {
+ ASSERT (LastSubsection->UnusedPtes == 0);
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+
+ //
+ // Does the structure need extended?
+ //
+
+ if (NumberOfPtes <= Section->Segment->TotalNumberOfPtes) {
+
+ //
+ // The segment is already large enough, just update
+ // the section size and return.
+ //
+
+ Section->SizeOfSection = *NewSectionSize;
+ if (Section->Segment->SizeOfSegment.QuadPart < NewSectionSize->QuadPart) {
+
+ //
+ // Only update if it is really bigger.
+ //
+
+ Section->Segment->SizeOfSegment = *NewSectionSize;
+ LastSubsection->EndingSector = (ULONG)(NewSectionSize->QuadPart >>
+ MMSECTOR_SHIFT);
+ LastSubsection->u.SubsectionFlags.SectorEndOffset =
+ NewSectionSize->LowPart & MMSECTOR_MASK;
+ }
+ goto ReleaseAndReturnSuccess;
+ }
+
+ //
+ // Add new structures to the section - locate the last subsection
+ // and add there.
+ //
+
+ RequiredPtes = NumberOfPtes - Section->Segment->TotalNumberOfPtes;
+ PtesUsed = 0;
+
+ if (RequiredPtes < LastSubsection->UnusedPtes) {
+
+ //
+ // There are ample PTEs to extend the section
+ // already allocated.
+ //
+
+ PtesUsed = RequiredPtes;
+ RequiredPtes = 0;
+
+ } else {
+ PtesUsed = LastSubsection->UnusedPtes;
+ RequiredPtes -= PtesUsed;
+
+ }
+
+ LastSubsection->PtesInSubsection += PtesUsed;
+ LastSubsection->UnusedPtes -= PtesUsed;
+ ControlArea->Segment->TotalNumberOfPtes += PtesUsed;
+
+ if (RequiredPtes == 0) {
+
+ //
+ // There no extension is necessary, update the high vbn
+ //
+
+ LastSubsection->EndingSector = (ULONG)(NewSectionSize->QuadPart >>
+ MMSECTOR_SHIFT);
+ LastSubsection->u.SubsectionFlags.SectorEndOffset =
+ NewSectionSize->LowPart & MMSECTOR_MASK;
+ } else {
+
+ //
+ // An extension is required. Allocate paged pool
+ // and populate it with prototype PTEs.
+ //
+
+ AllocationSize = ROUND_TO_PAGES (RequiredPtes * sizeof(MMPTE));
+
+ ExtendedPtes = (PMMPTE)ExAllocatePoolWithTag (PagedPool,
+ AllocationSize,
+ 'ppmM');
+
+ if (ExtendedPtes == NULL) {
+
+ //
+ // The required pool could not be allocate. Reset
+ // the subsection and control area fields to their
+ // original values.
+ //
+
+ LastSubsection->PtesInSubsection -= PtesUsed;
+ LastSubsection->UnusedPtes += PtesUsed;
+ ControlArea->Segment->TotalNumberOfPtes -= PtesUsed;
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ReleaseAndReturn;
+ }
+
+ //
+ // Allocate an extended subsection descriptor.
+ //
+
+ ExtendedSubsection = (PSUBSECTION)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(SUBSECTION),
+ 'bSmM'
+ );
+ if (ExtendedSubsection == NULL) {
+
+ //
+ // The required pool could not be allocate. Reset
+ // the subsection and control area fields to their
+ // original values.
+ //
+
+ LastSubsection->PtesInSubsection -= PtesUsed;
+ LastSubsection->UnusedPtes += PtesUsed;
+ ControlArea->Segment->TotalNumberOfPtes -= PtesUsed;
+ ExFreePool (ExtendedPtes);
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ReleaseAndReturn;
+ }
+
+ LastSubsection->EndingSector =
+ ControlArea->Segment->TotalNumberOfPtes <<
+ (PAGE_SHIFT - MMSECTOR_SHIFT);
+
+ ExtendedSubsection->u.LongFlags = 0;
+ ExtendedSubsection->NextSubsection = NULL;
+ ExtendedSubsection->UnusedPtes = (AllocationSize / sizeof(MMPTE)) -
+ RequiredPtes;
+
+ ExtendedSubsection->ControlArea = ControlArea;
+ ExtendedSubsection->PtesInSubsection = RequiredPtes;
+
+ ExtendedSubsection->StartingSector = LastSubsection->EndingSector;
+
+ ExtendedSubsection->EndingSector = (ULONG)(
+ NewSectionSize->QuadPart >>
+ MMSECTOR_SHIFT);
+ ExtendedSubsection->u.SubsectionFlags.SectorEndOffset =
+ NewSectionSize->LowPart & MMSECTOR_MASK;
+
+
+ ExtendedSubsection->SubsectionBase = ExtendedPtes;
+
+ PointerPte = ExtendedPtes;
+ LastPte = ExtendedPtes + (AllocationSize / sizeof(MMPTE));
+
+ if (ControlArea->FilePointer != NULL) {
+ TempPte.u.Long = (ULONG)MiGetSubsectionAddressForPte(ExtendedSubsection);
+ }
+
+ TempPte.u.Soft.Protection = ControlArea->Segment->SegmentPteTemplate.u.Soft.Protection;
+ TempPte.u.Soft.Prototype = 1;
+ ExtendedSubsection->u.SubsectionFlags.Protection = TempPte.u.Soft.Protection;
+
+ while (PointerPte < LastPte) {
+ *PointerPte = TempPte;
+ PointerPte += 1;
+ }
+
+ //
+ // Link this into the list.
+ //
+
+ LastSubsection->NextSubsection = ExtendedSubsection;
+
+ ControlArea->Segment->TotalNumberOfPtes += RequiredPtes;
+ }
+
+ ControlArea->Segment->SizeOfSegment = *NewSectionSize;
+ Section->SizeOfSection = *NewSectionSize;
+
+ReleaseAndReturnSuccess:
+
+ Status = STATUS_SUCCESS;
+
+ReleaseAndReturn:
+
+ ExReleaseResource (&MmSectionExtendResource);
+ KeLeaveCriticalRegion ();
+
+ return Status;
+}
+
+PMMPTE
+FASTCALL
+MiGetProtoPteAddressExtended (
+ IN PMMVAD Vad,
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function calculates the address of the prototype PTE
+ for the corresponding virtual address.
+
+Arguments:
+
+
+ Vad - Supplies a pointer to the virtual address desciptor which
+ encompasses the virtual address.
+
+ VirtualAddress - Supplies the virtual address to locate a prototype PTE
+ for.
+
+Return Value:
+
+ The corresponding prototype PTE address.
+
+--*/
+
+{
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ ULONG PteOffset;
+
+ ControlArea = Vad->ControlArea;
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ //
+ // Locate the subsection which contains the First Prototype PTE
+ // for this VAD.
+ //
+
+ while ((Vad->FirstPrototypePte < Subsection->SubsectionBase) ||
+ (Vad->FirstPrototypePte >=
+ &Subsection->SubsectionBase[Subsection->PtesInSubsection])) {
+
+ //
+ // Get the next subsection.
+ //
+
+ Subsection = Subsection->NextSubsection;
+ }
+
+ //
+ // How many PTEs beyond this subsection must we go?
+ //
+
+ PteOffset = (((((ULONG)VirtualAddress - (ULONG)Vad->StartingVa) >>
+ PAGE_SHIFT) +
+ (ULONG)(Vad->FirstPrototypePte - Subsection->SubsectionBase)) -
+ Subsection->PtesInSubsection);
+
+// DbgPrint("map extended subsection offset = %lx\n",PteOffset);
+
+ ASSERT (PteOffset < 0xF0000000);
+
+ Subsection = Subsection->NextSubsection;
+
+ //
+ // Locate the subsection which contains the prototype PTEs.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ }
+
+ //
+ // The PTEs are in this subsection.
+ //
+
+ ASSERT (PteOffset < Subsection->PtesInSubsection);
+
+ return &Subsection->SubsectionBase[PteOffset];
+
+}
+
+PSUBSECTION
+FASTCALL
+MiLocateSubsection (
+ IN PMMVAD Vad,
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function calculates the address of the subsection
+ for the corresponding virtual address.
+
+ This function only works for mapped files NOT mapped images.
+
+Arguments:
+
+
+ Vad - Supplies a pointer to the virtual address desciptor which
+ encompasses the virtual address.
+
+ VirtualAddress - Supplies the virtual address to locate a prototype PTE
+ for.
+
+Return Value:
+
+ The corresponding prototype subsection.
+
+--*/
+
+{
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ ULONG PteOffset;
+
+ ControlArea = Vad->ControlArea;
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ if (Subsection->NextSubsection == NULL) {
+
+ //
+ // There is only one subsection, don't look any further.
+ //
+
+ return Subsection;
+ }
+
+ //
+ // Locate the subsection which contains the First Prototype PTE
+ // for this VAD.
+ //
+
+ while ((Vad->FirstPrototypePte < Subsection->SubsectionBase) ||
+ (Vad->FirstPrototypePte >=
+ &Subsection->SubsectionBase[Subsection->PtesInSubsection])) {
+
+ //
+ // Get the next subsection.
+ //
+
+ Subsection = Subsection->NextSubsection;
+ }
+
+ //
+ // How many PTEs beyond this subsection must we go?
+ //
+
+ PteOffset = ((((ULONG)VirtualAddress - (ULONG)Vad->StartingVa) >>
+ PAGE_SHIFT) +
+ (ULONG)(Vad->FirstPrototypePte - Subsection->SubsectionBase));
+
+ ASSERT (PteOffset < 0xF0000000);
+
+ //
+ // Locate the subsection which contains the prototype PTEs.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ }
+
+ //
+ // The PTEs are in this subsection.
+ //
+
+ return Subsection;
+}
diff --git a/private/ntos/mm/flushbuf.c b/private/ntos/mm/flushbuf.c
new file mode 100644
index 000000000..274e14568
--- /dev/null
+++ b/private/ntos/mm/flushbuf.c
@@ -0,0 +1,288 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ flushbuf.c
+
+Abstract:
+
+ This module contains the code to flush the write buffer or otherwise
+ synchronize writes on the host processor. Also, contains code
+ to flush instruction cache of specified process.
+
+Author:
+
+ David N. Cutler 24-Apr-1991
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtFlushWriteBuffer)
+#pragma alloc_text(PAGE,NtFlushInstructionCache)
+#endif
+
+
+NTSTATUS
+NtFlushWriteBuffer (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the write buffer on the current processor.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ STATUS_SUCCESS.
+
+--*/
+
+{
+ PAGED_CODE();
+
+ KeFlushWriteBuffer();
+ return STATUS_SUCCESS;
+}
+
+ULONG
+MiFlushRangeFilter (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PVOID *BaseAddress,
+ IN PULONG Length,
+ IN PBOOLEAN Retry
+ )
+
+/*++
+
+Routine Description:
+
+ This is the exception handler used by NtFlushInstructionCache to protect
+ against bad virtual addresses passed to KeSweepIcacheRange. If an
+ access violation occurs, this routine causes NtFlushInstructionCache to
+ restart the sweep at the page following the failing page.
+
+Arguments:
+
+ ExceptionPointers - Supplies exception information.
+
+ BaseAddress - Supplies a pointer to address the base of the region
+ being flushed. If the failing address is not in the last page
+ of the region, this routine updates BaseAddress to point to the
+ next page of the region.
+
+ Length - Supplies a pointer the length of the region being flushed.
+ If the failing address is not in the last page of the region,
+ this routine updates Length to reflect restarting the flush at
+ the next page of the region.
+
+ Retry - Supplies a pointer to a boolean that the caller has initialized
+ to FALSE. This routine sets this boolean to TRUE if an access
+ violation occurs in a page before the last page of the flush region.
+
+Return Value:
+
+ EXCEPTION_EXECUTE_HANDLER.
+
+--*/
+
+{
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG BadVa;
+ ULONG NextVa;
+ ULONG EndVa;
+
+ ExceptionRecord = ExceptionPointers->ExceptionRecord;
+
+ //
+ // If the exception was an access violation, skip the current page of the
+ // region and move to the next page.
+ //
+
+ if ( ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION ) {
+
+ //
+ // Get the failing address, calculate the base address of the next page,
+ // and calculate the address at the end of the region.
+ //
+
+ BadVa = ExceptionRecord->ExceptionInformation[1];
+ NextVa = ROUND_TO_PAGES( BadVa + 1 );
+ EndVa = *(PULONG)BaseAddress + *Length;
+
+ //
+ // If the next page didn't wrap, and the next page is below the end of
+ // the region, update Length and BaseAddress appropriately and set Retry
+ // to TRUE to indicate to NtFlushInstructionCache that it should call
+ // KeSweepIcacheRange again.
+ //
+
+ if ( (NextVa > BadVa) && (NextVa < EndVa) ) {
+ *Length = EndVa - NextVa;
+ *BaseAddress = (PVOID)NextVa;
+ *Retry = TRUE;
+ }
+ }
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+NTSTATUS
+NtFlushInstructionCache (
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress OPTIONAL,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache for the specified process.
+
+Arguments:
+
+ ProcessHandle - Supplies a handle to the process in which the instruction
+ cache is to be flushed. Must have PROCESS_VM_WRITE access to the
+ specified process.
+
+ BaseAddress - Supplies an optional pointer to base of the region that
+ is flushed.
+
+ Length - Supplies the length of the region that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ STATUS_SUCCESS.
+
+--*/
+
+{
+
+ KPROCESSOR_MODE PreviousMode;
+ PEPROCESS Process;
+ NTSTATUS Status;
+ BOOLEAN Retry;
+ PVOID RangeBase;
+ ULONG RangeLength;
+
+ PAGED_CODE();
+
+ PreviousMode = KeGetPreviousMode();
+
+ //
+ // If the base address is not specified, or the base address is specified
+ // and the length is not zero, then flush the specified instruction cache
+ // range.
+ //
+
+ if ((ARGUMENT_PRESENT(BaseAddress) == FALSE) || (Length != 0)) {
+
+ //
+ // If previous mode is user and the range specified falls in kernel
+ // address space, return an error.
+ //
+
+ if ((ARGUMENT_PRESENT(BaseAddress) != FALSE) &&
+ (PreviousMode != KernelMode)) {
+ try {
+ ProbeForRead(BaseAddress, Length, sizeof(UCHAR));
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+
+ //
+ // If the specified process is not the current process, then
+ // the process must be attached to during the flush.
+ //
+
+ if (ProcessHandle != NtCurrentProcess()) {
+
+ //
+ // Reference the specified process checking for PROCESS_VM_WRITE
+ // access.
+ //
+
+ Status = ObReferenceObjectByHandle(ProcessHandle,
+ PROCESS_VM_WRITE,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL);
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // Attach to the process.
+ //
+
+ KeAttachProcess(&Process->Pcb);
+ }
+
+ //
+ // If the base address is not specified, sweep the entire instruction
+ // cache. If the base address is specified, flush the specified range.
+ //
+
+ if (ARGUMENT_PRESENT(BaseAddress) == FALSE) {
+ KeSweepIcache(FALSE);
+
+ } else {
+
+ //
+ // Parts of the specified range may be invalid. An exception
+ // handler is used to skip over those parts. Before calling
+ // KeSweepIcacheRange, we set Retry to FALSE. If an access
+ // violation occurs in KeSweepIcacheRange, the MiFlushRangeFilter
+ // exception filter is called. It updates RangeBase and
+ // RangeLength to skip over the failing page, and sets Retry to
+ // TRUE. As long as Retry is TRUE, we continue to call
+ // KeSweepIcacheRange.
+ //
+
+ RangeBase = BaseAddress;
+ RangeLength = Length;
+
+ do {
+ Retry = FALSE;
+ try {
+ KeSweepIcacheRange(FALSE, RangeBase, RangeLength);
+ } except(MiFlushRangeFilter(GetExceptionInformation(),
+ &RangeBase,
+ &RangeLength,
+ &Retry)) {
+ if (GetExceptionCode() != STATUS_ACCESS_VIOLATION) {
+ Status = GetExceptionCode();
+ }
+ }
+ } while (Retry != FALSE);
+ }
+
+ //
+ // If the specified process is not the current process, then
+ // detach from it and dereference it.
+ //
+
+ if (ProcessHandle != NtCurrentProcess()) {
+ KeDetachProcess();
+ ObDereferenceObject(Process);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/mm/flushsec.c b/private/ntos/mm/flushsec.c
new file mode 100644
index 000000000..84cfd9c89
--- /dev/null
+++ b/private/ntos/mm/flushsec.c
@@ -0,0 +1,1883 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ flushsec.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtExtendSection service.
+
+Author:
+
+ Lou Perazzoli (loup) 8-May-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PSUBSECTION
+MiGetSystemCacheSubsection (
+ IN PVOID BaseAddress,
+ IN PEPROCESS Process,
+ OUT PMMPTE *ProtoPte
+ );
+
+VOID
+MiFlushDirtyBitsToPfn (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte,
+ IN PEPROCESS Process,
+ IN BOOLEAN SystemCache
+ );
+
+ULONG
+FASTCALL
+MiCheckProtoPtePageState (
+ IN PMMPTE PrototypePte,
+ IN ULONG PfnLockHeld
+ );
+
+NTSTATUS
+MiFlushSectionInternal (
+ IN PMMPTE StartingPte,
+ IN PMMPTE FinalPte,
+ IN PSUBSECTION FirstSubsection,
+ IN PSUBSECTION LastSubsection,
+ IN ULONG Synchronize,
+ OUT PIO_STATUS_BLOCK IoStatus
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtFlushVirtualMemory)
+#pragma alloc_text(PAGE,MmFlushVirtualMemory)
+#endif
+
+extern POBJECT_TYPE IoFileObjectType;
+
+NTSTATUS
+NtFlushVirtualMemory (
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ OUT PIO_STATUS_BLOCK IoStatus
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a range of virtual address which map
+ a data file back into the data file if they have been modified.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address the flushed region. The initial value
+ of this argument is the base address of the region of the
+ pages to flush.
+
+ RegionSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the flushed region of pages.
+ The initial value of this argument is rounded up to the
+ next host-page-size boundary.
+
+ If this value is specied as zero, the mapped range from
+ the base address to the end of the range is flushed.
+
+ IoStatus - Returns the value of the IoStatus for the last attempted
+ I/O operation.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+ PVOID CapturedBase;
+ ULONG CapturedRegionSize;
+ IO_STATUS_BLOCK TemporaryIoStatus;
+
+ PAGED_CODE();
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+
+ //
+ // Establish an exception handler, probe the specified addresses
+ // for write access and capture the initial values.
+ //
+
+ try {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ ProbeForWriteIoStatus (IoStatus);
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ } else {
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ }
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER_2;
+ }
+
+ if (((ULONG)MM_HIGHEST_USER_ADDRESS - (ULONG)CapturedBase) <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER_2;
+
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ Status = MmFlushVirtualMemory (Process,
+ &CapturedBase,
+ &CapturedRegionSize,
+ &TemporaryIoStatus);
+
+ ObDereferenceObject (Process);
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = PAGE_ALIGN (CapturedBase);
+ *IoStatus = TemporaryIoStatus;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ }
+
+ return Status;
+
+}
+
+NTSTATUS
+MmFlushVirtualMemory (
+ IN PEPROCESS Process,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ OUT PIO_STATUS_BLOCK IoStatus
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a range of virtual address which map
+ a data file back into the data file if they have been modified.
+
+ Note that the modification is this process's view of the pages,
+ on certain implementations (like the intel 386), the modify
+ bit is captured in the PTE and not forced to the PFN database
+ until the page is removed from the working set. This means
+ that pages which have been modified by another process will
+ not be flushed to the data file.
+
+Arguments:
+
+ Process - Supplies a pointer to a process object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address the flushed region. The initial value
+ of this argument is the base address of the region of the
+ pages to flush.
+
+ RegionSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the flushed region of pages.
+ The initial value of this argument is rounded up to the
+ next host-page-size boundary.
+
+ If this value is specied as zero, the mapped range from
+ the base address to the end of the range is flushed.
+
+ IoStatus - Returns the value of the IoStatus for the last attempted
+ I/O operation.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PMMVAD Vad;
+ PVOID EndingAddress;
+ PVOID Va;
+ PEPROCESS CurrentProcess;
+ BOOLEAN SystemCache;
+ PCONTROL_AREA ControlArea;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE LastPte;
+ PMMPTE FinalPte;
+ PSUBSECTION Subsection;
+ PSUBSECTION LastSubsection;
+ NTSTATUS Status;
+ ULONG Synchronize;
+
+ PAGED_CODE();
+
+ //
+ // Determine if the specified base address is within the system
+ // cache and if so, don't attach, the working set mutex is still
+ // required to "lock" paged pool pages (proto PTEs) into the
+ // working set.
+ //
+
+ CurrentProcess = PsGetCurrentProcess ();
+ EndingAddress = (PVOID)(((ULONG)*BaseAddress + *RegionSize - 1) |
+ (PAGE_SIZE - 1));
+ *BaseAddress = PAGE_ALIGN (*BaseAddress);
+
+ if ((*BaseAddress < MmSystemCacheStart) ||
+ (*BaseAddress > MmSystemCacheEnd)) {
+
+ SystemCache = FALSE;
+
+ //
+ // Attach to the specified process.
+ //
+
+ KeAttachProcess (&Process->Pcb);
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ Vad = MiLocateAddress (*BaseAddress);
+
+ if (Vad == (PMMVAD)NULL) {
+
+ //
+ // No Virtual Address Descriptor located for Base Address.
+ //
+
+ Status = STATUS_NOT_MAPPED_VIEW;
+ goto ErrorReturn;
+ }
+
+ if (*RegionSize == 0) {
+ EndingAddress = Vad->EndingVa;
+ }
+
+ if ((Vad->u.VadFlags.PrivateMemory == 1) ||
+ (EndingAddress > Vad->EndingVa)) {
+
+ //
+ // This virtual address descriptor does not refer to a Segment
+ // object.
+ //
+
+ Status = STATUS_NOT_MAPPED_VIEW;
+ goto ErrorReturn;
+ }
+
+ //
+ // Make sure this VAD maps a data file (not an image file).
+ //
+
+ ControlArea = Vad->ControlArea;
+
+ if ((ControlArea->FilePointer == NULL) ||
+ (Vad->u.VadFlags.ImageMap == 1)) {
+
+ //
+ // This virtual address descriptor does not refer to a Segment
+ // object.
+ //
+
+ Status = STATUS_NOT_MAPPED_DATA;
+ goto ErrorReturn;
+ }
+
+ } else {
+
+ SystemCache = TRUE;
+ Process = CurrentProcess;
+ LOCK_WS (Process);
+ }
+
+ PointerPde = MiGetPdeAddress (*BaseAddress);
+ PointerPte = MiGetPteAddress (*BaseAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+ *RegionSize = (ULONG)EndingAddress - (ULONG)*BaseAddress;
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No page table page exists for this address.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ if (PointerPte > LastPte) {
+ break;
+ }
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+ }
+
+ MiFlushDirtyBitsToPfn (PointerPte, LastPte, Process, SystemCache);
+
+ if (SystemCache) {
+
+ //
+ // No VADs exist for the system cache.
+ //
+
+ Subsection = MiGetSystemCacheSubsection (*BaseAddress,
+ Process,
+ &PointerPte);
+ LastSubsection = MiGetSystemCacheSubsection (EndingAddress,
+ Process,
+ &FinalPte);
+ }
+
+ if (!SystemCache) {
+
+ PointerPte = MiGetProtoPteAddress (Vad, *BaseAddress);
+ Subsection = MiLocateSubsection (Vad, *BaseAddress);
+ LastSubsection = MiLocateSubsection (Vad, EndingAddress);
+ FinalPte = MiGetProtoPteAddress (Vad, EndingAddress);
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ Synchronize = TRUE;
+ } else {
+ UNLOCK_WS (Process);
+ Synchronize = FALSE;
+ }
+
+ //
+ // Release working set mutex, lower IRQL and detach.
+ //
+
+ KeDetachProcess();
+
+ //
+ // If we are going to synchronize the flush, then we better
+ // preacquire the file.
+ //
+
+ if (Synchronize) {
+ FsRtlAcquireFileForCcFlush (ControlArea->FilePointer);
+ }
+
+ //
+ // Flush the PTEs from the specified section.
+ //
+
+ Status = MiFlushSectionInternal (PointerPte,
+ FinalPte,
+ Subsection,
+ LastSubsection,
+ Synchronize,
+ IoStatus);
+
+ //
+ // Release the file if we acquired it.
+ //
+
+ if (Synchronize) {
+ FsRtlReleaseFileForCcFlush (ControlArea->FilePointer);
+ }
+
+ return Status;
+
+ErrorReturn:
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ KeDetachProcess();
+ return Status;
+
+}
+
+NTSTATUS
+MmFlushSection (
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
+ IN PLARGE_INTEGER Offset,
+ IN ULONG RegionSize,
+ OUT PIO_STATUS_BLOCK IoStatus,
+ IN ULONG AcquireFile
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes to the backing file any modified pages within
+ the specified range of the section.
+
+Arguments:
+
+ SectionObjectPointer - Supplies a pointer to the section objects.
+
+ Offset - Supplies the offset into the section in which to begin
+ flushing pages. If this argument is not present, then the
+ whole section is flushed without regard to the region size
+ argument.
+
+ RegionSize - Supplies the size in bytes to flush. This is rounded
+ to a page multiple.
+
+ IoStatus - Returns the value of the IoStatus for the last attempted
+ I/O operation.
+
+ AcquireFile - Nonzero if the callback should be used to acquire the file
+
+Return Value:
+
+ Returns status of the operation.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ KIRQL OldIrql;
+ ULONG PteOffset;
+ PSUBSECTION Subsection;
+ PSUBSECTION LastSubsection;
+ BOOLEAN DeleteSegment = FALSE;
+ PETHREAD CurrentThread;
+ NTSTATUS status;
+ BOOLEAN OldClusterState;
+
+ //
+ // Initialize IoStatus for success, in case we take an early exit.
+ //
+
+ IoStatus->Status = STATUS_SUCCESS;
+ IoStatus->Information = RegionSize;
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea = ((PCONTROL_AREA)(SectionObjectPointer->DataSectionObject));
+
+ if ((ControlArea == NULL) ||
+ (ControlArea->u.Flags.BeingDeleted) ||
+ (ControlArea->u.Flags.BeingCreated) ||
+ (ControlArea->NumberOfPfnReferences == 0)) {
+
+ //
+ // This file no longer has an associated segment or is in the
+ // process of coming or going.
+ // If the number of PFN references is zero, then this control
+ // area does not have any valid or transition pages than need
+ // to be flushed.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // Locate the subsection.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ if (!ARGUMENT_PRESENT (Offset)) {
+
+ //
+ // If the offset is not specified, flush the complete file ignoring
+ // the region size.
+ //
+
+ PointerPte = &Subsection->SubsectionBase[0];
+ LastSubsection = Subsection;
+ while (LastSubsection->NextSubsection != NULL) {
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+ LastPte = &LastSubsection->SubsectionBase
+ [LastSubsection->PtesInSubsection - 1];
+ } else {
+
+ PteOffset = (ULONG)(Offset->QuadPart >> PAGE_SHIFT);
+
+ //
+ // Make sure the PTEs are not in the extended part of the
+ // segment.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ if (Subsection->NextSubsection == NULL) {
+
+ //
+ // Past end of mapping, just return success.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+ Subsection = Subsection->NextSubsection;
+ }
+
+ ASSERT (PteOffset < Subsection->PtesInSubsection);
+ PointerPte = &Subsection->SubsectionBase[PteOffset];
+
+ //
+ // Locate the address of the last prototype PTE to be flushed.
+ //
+
+ PteOffset += ((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT;
+
+ LastSubsection = Subsection;
+
+ while (PteOffset >= LastSubsection->PtesInSubsection) {
+ PteOffset -= LastSubsection->PtesInSubsection;
+ if (LastSubsection->NextSubsection == NULL) {
+ PteOffset = LastSubsection->PtesInSubsection - 1;
+ break;
+ }
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+
+ ASSERT (PteOffset < LastSubsection->PtesInSubsection);
+ LastPte = &LastSubsection->SubsectionBase[PteOffset];
+ }
+
+ //
+ // Up the map view count so the control area cannot be deleted
+ // out from under the call.
+ //
+
+ ControlArea->NumberOfMappedViews += 1;
+
+ UNLOCK_PFN (OldIrql);
+
+ CurrentThread = PsGetCurrentThread();
+
+ //
+ // Indicate that disk verify errors should be returned as exceptions.
+ //
+
+ OldClusterState = CurrentThread->ForwardClusterOnly;
+ CurrentThread->ForwardClusterOnly = TRUE;
+
+ if (AcquireFile) {
+ FsRtlAcquireFileForCcFlush (ControlArea->FilePointer);
+ }
+ status = MiFlushSectionInternal (PointerPte,
+ LastPte,
+ Subsection,
+ LastSubsection,
+ TRUE,
+ IoStatus);
+ if (AcquireFile) {
+ FsRtlReleaseFileForCcFlush (ControlArea->FilePointer);
+ }
+
+ CurrentThread->ForwardClusterOnly = OldClusterState;
+
+ LOCK_PFN (OldIrql);
+
+ ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
+ ControlArea->NumberOfMappedViews -= 1;
+
+ //
+ // Check to see if the control area should be deleted. This
+ // will release the PFN lock.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+
+ return status;
+
+}
+
+NTSTATUS
+MiFlushSectionInternal (
+ IN PMMPTE StartingPte,
+ IN PMMPTE FinalPte,
+ IN PSUBSECTION FirstSubsection,
+ IN PSUBSECTION LastSubsection,
+ IN ULONG Synchronize,
+ OUT PIO_STATUS_BLOCK IoStatus
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes to the backing file any modified pages within
+ the specified range of the section. The parameters describe the
+ section's prototype PTEs (start and end) and the subsections
+ which correpsond to the starting and ending PTE.
+
+ Each PTE in the subsection between the specified start and end
+ is examined and if the page is either valid or transition AND
+ the page has been modified, the modify bit is cleared in the PFN
+ database and the page is flushed to it's backing file.
+
+Arguments:
+
+ StartingPte - Supplies a pointer to the first prototype PTE to
+ be examined for flushing.
+
+ FinalPte - Supplies a pointer to the last prototype PTE to be
+ examined for flushing.
+
+ FirstSubsection - Supplies the subsection that contains the
+ StartingPte.
+
+ LastSubsection - Supplies the subsection that contains the
+ FinalPte.
+
+ Synchronize - Supplies TRUE if synchonization with all threads
+ doing flush operations to this section should occur.
+
+ IoStatus - Returns the value of the IoStatus for the last attempted
+ I/O operation.
+
+Return Value:
+
+ Returns status of the operation.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE LastWritten;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+ PMDL Mdl;
+ KEVENT IoEvent;
+ PSUBSECTION Subsection;
+ ULONG Amount;
+ PULONG Page;
+ ULONG PageFrameIndex;
+ PULONG EndingPage;
+ PULONG LastPage;
+ NTSTATUS Status;
+ LARGE_INTEGER StartingOffset;
+ LARGE_INTEGER TempOffset;
+ BOOLEAN WriteNow = FALSE;
+ ULONG MdlHack[(sizeof(MDL)/4) + (MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE) + 1];
+
+ IoStatus->Status = STATUS_SUCCESS;
+ IoStatus->Information = 0;
+ Mdl = (PMDL)&MdlHack[0];
+
+ KeInitializeEvent (&IoEvent, NotificationEvent, FALSE);
+
+ FinalPte += 1; // Point to 1 past the last one.
+
+ LastWritten = NULL;
+ EndingPage = (PULONG)(Mdl + 1) + MmModifiedWriteClusterSize;
+ LastPage = NULL;
+ Subsection = FirstSubsection;
+ PointerPte = StartingPte;
+ ControlArea = FirstSubsection->ControlArea;
+
+ LOCK_PFN (OldIrql);
+
+ if (ControlArea->NumberOfPfnReferences == 0) {
+
+ //
+ // No transition or valid protoptype PTEs present, hence
+ // no need to flush anything.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+
+ while ((Synchronize) && (ControlArea->FlushInProgressCount != 0)) {
+
+ //
+ // Another thread is currently performing a flush operation on
+ // this file. Wait for that flush to complete.
+ //
+
+ KeEnterCriticalRegion();
+ ControlArea->u.Flags.CollidedFlush = 1;
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject (&MmCollidedFlushEvent,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ &MmOneSecond);
+ KeLeaveCriticalRegion();
+ LOCK_PFN (OldIrql);
+ }
+
+ ControlArea->FlushInProgressCount += 1;
+
+ for (;;) {
+
+ if (LastSubsection != Subsection) {
+
+ //
+ // Flush to the last PTE in this subsection.
+ //
+
+ LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+ } else {
+
+ //
+ // Flush to the end of the range.
+ //
+
+ LastPte = FinalPte;
+ }
+
+ //
+ // If the prototype PTEs are paged out or have a share count
+ // of 1, they cannot contain any transition or valid PTEs.
+ //
+
+ if (!MiCheckProtoPtePageState(PointerPte, TRUE)) {
+ PointerPte = (PMMPTE)(((ULONG)PointerPte | (PAGE_SIZE - 1)) + 1);
+ }
+
+ while (PointerPte < LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // We are on a page boundary, make sure this PTE is resident.
+ //
+
+ if (!MiCheckProtoPtePageState(PointerPte, TRUE)) {
+ PointerPte = (PMMPTE)((ULONG)PointerPte + PAGE_SIZE);
+
+ //
+ // If there are dirty pages to be written, write them
+ // now as we are skipping over PTEs.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ goto CheckForWrite;
+ }
+ continue;
+ }
+ }
+
+ PteContents = *PointerPte;
+
+ if ((PteContents.u.Hard.Valid == 1) ||
+ ((PteContents.u.Soft.Prototype == 0) &&
+ (PteContents.u.Soft.Transition == 1))) {
+
+ //
+ // Prototype PTE in transition, there are 3 possible cases:
+ // 1. The page is part of an image which is shareable and
+ // refers to the paging file - dereference page file
+ // space and free the physical page.
+ // 2. The page refers to the segment but is not modified -
+ // free the phyisical page.
+ // 3. The page refers to the segment and is modified -
+ // write the page to the file and free the physical page.
+ //
+
+ if (PteContents.u.Hard.Valid == 1) {
+ PageFrameIndex = PteContents.u.Hard.PageFrameNumber;
+ } else {
+ PageFrameIndex = PteContents.u.Trans.PageFrameNumber;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn1->OriginalPte.u.Soft.Prototype == 1);
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+
+ //
+ // If the page is modified OR a write is in progress
+ // flush it. The write in progress case catches problems
+ // where the modified page write continually writes a
+ // page and gets errors writting it, by writing pages
+ // in this state, the error will be propagated back to
+ // the caller.
+ //
+
+ if ((Pfn1->u3.e1.Modified == 1) ||
+ (Pfn1->u3.e1.WriteInProgress)) {
+
+ if (LastWritten == NULL) {
+
+ //
+ // This is the first page of a cluster, initialize
+ // the MDL, etc.
+ //
+
+ LastPage = (PULONG)(Mdl + 1);
+
+ //
+ // Calculate the offset to read into the file.
+ // offset = base + ((thispte - basepte) << PAGE_SHIFT)
+ //
+
+ StartingOffset.QuadPart = MI_STARTING_OFFSET (
+ Subsection,
+ Pfn1->PteAddress);
+ MI_INITIALIZE_ZERO_MDL (Mdl);
+
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+ Mdl->StartVa =
+ (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT);
+ Mdl->Size = (CSHORT)(sizeof(MDL) +
+ (sizeof(ULONG) * MmModifiedWriteClusterSize));
+ }
+
+ LastWritten = PointerPte;
+ Mdl->ByteCount += PAGE_SIZE;
+ if (Mdl->ByteCount == (PAGE_SIZE * MmModifiedWriteClusterSize)) {
+ WriteNow = TRUE;
+ }
+
+ if (PteContents.u.Hard.Valid == 0) {
+
+ //
+ // The page is in transition.
+ //
+
+ MiUnlinkPageFromList (Pfn1);
+ }
+
+ //
+ // Clear the modified bit for this page.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ *LastPage = PageFrameIndex;
+ LastPage += 1;
+ } else {
+
+ //
+ // This page was not modified and therefore ends the
+ // current write cluster if any. Set WriteNow to TRUE
+ // if there is a cluster being built.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+ }
+ } else {
+
+ //
+ // This page was not modified and therefore ends the
+ // current write cluster if any. Set WriteNow to TRUE
+ // if there is a cluster being built.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+ }
+
+ PointerPte += 1;
+
+CheckForWrite:
+
+ //
+ // Write the current cluster if it is complete,
+ // full, or the loop is now complete.
+ //
+
+ if ((WriteNow) ||
+ ((PointerPte == LastPte) && (LastWritten != NULL))) {
+
+ //
+ // Issue the write request.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ WriteNow = FALSE;
+
+ KeClearEvent (&IoEvent);
+
+ //
+ // Make sure the write does not go past the
+ // end of file. (segment size).
+ //
+
+ TempOffset.QuadPart =
+ ((LONGLONG)Subsection->EndingSector << MMSECTOR_SHIFT) +
+ Subsection->u.SubsectionFlags.SectorEndOffset;
+
+ if ((StartingOffset.QuadPart + Mdl->ByteCount) >
+ TempOffset.QuadPart) {
+
+ ASSERT ((ULONG)(TempOffset.QuadPart -
+ StartingOffset.QuadPart) >
+ (Mdl->ByteCount - PAGE_SIZE));
+
+ Mdl->ByteCount = (ULONG)(TempOffset.QuadPart -
+ StartingOffset.QuadPart);
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_FLUSH_SECTION) {
+ DbgPrint("flush page write begun %lx\n",
+ Mdl->ByteCount);
+ }
+#endif //DBG
+
+ Status = IoSynchronousPageWrite (ControlArea->FilePointer,
+ Mdl,
+ &StartingOffset,
+ &IoEvent,
+ IoStatus );
+
+ //
+ // If success is returned, wait for the i/o event to be set.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ KeWaitForSingleObject( &IoEvent,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ //
+ // Otherwise, copy the error to the IoStatus, for error
+ // handling below.
+ //
+
+ } else {
+ IoStatus->Status = Status;
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ Page = (PULONG)(Mdl + 1);
+
+ LOCK_PFN (OldIrql);
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
+
+ //
+ // The next PTE is not in a different page, make
+ // sure this page did not leave memory when the
+ // I/O was in progress.
+ //
+
+ MiMakeSystemAddressValidPfn (PointerPte);
+ }
+
+ if (NT_SUCCESS(IoStatus->Status)) {
+
+ //
+ // I/O complete successfully, unlock pages.
+ //
+
+ while (Page < LastPage) {
+
+ Pfn2 = MI_PFN_ELEMENT (*Page);
+ MiDecrementReferenceCount (*Page);
+ Page += 1;
+ }
+ } else {
+
+ //
+ // I/O complete unsuccessfully, unlock pages
+ // return error status.
+ //
+
+ Amount = PAGE_SIZE;
+ while (Page < LastPage) {
+
+ Pfn2 = MI_PFN_ELEMENT (*Page);
+
+ //
+ // There is a byte count in the information
+ // field.
+
+ if (IoStatus->Information < Amount) {
+ Pfn2->u3.e1.Modified = 1;
+ }
+
+ MiDecrementReferenceCount (*Page);
+ Page += 1;
+ Amount += PAGE_SIZE;
+ }
+
+ //
+ // Calculate how much was written thus far
+ // and add that to the information field
+ // of the IOSB.
+ //
+
+ //
+ // There is a byte count in the information
+ // field.
+
+ IoStatus->Information +=
+ (((LastWritten - StartingPte) << PAGE_SHIFT) -
+ Mdl->ByteCount);
+
+ goto ErrorReturn;
+ }
+
+ //
+ // As the PFN lock has been released and
+ // reacquired, do this loop again as the
+ // PTE may have changed state.
+ //
+
+ LastWritten = NULL;
+ }
+
+ } //end while
+
+ if (LastSubsection != Subsection) {
+ Subsection = Subsection->NextSubsection;
+ PointerPte = Subsection->SubsectionBase;
+
+ } else {
+
+ //
+ // The last range has been flushed, exit the top FOR loop
+ // and return.
+ //
+
+ break;
+ }
+
+ } //end for
+
+ ASSERT (LastWritten == NULL);
+
+ErrorReturn:
+
+ ControlArea->FlushInProgressCount -= 1;
+ if ((ControlArea->u.Flags.CollidedFlush == 1) &&
+ (ControlArea->FlushInProgressCount == 0)) {
+ ControlArea->u.Flags.CollidedFlush = 0;
+ KePulseEvent (&MmCollidedFlushEvent, 0, FALSE);
+ }
+ UNLOCK_PFN (OldIrql);
+ return IoStatus->Status;
+}
+
+BOOLEAN
+MmPurgeSection (
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
+ IN PLARGE_INTEGER Offset,
+ IN ULONG RegionSize,
+ IN ULONG IgnoreCacheViews
+ )
+
+/*++
+
+Routine Description:
+
+ This function determines if any views of the specified section
+ are mapped, and if not, purges a valid pages (even modified ones)
+ from the specified section and returns any used pages to the free
+ list. This is accomplished by examining the prototype PTEs
+ from the specified offset to the end of the section, and if
+ any prototype PTEs are in the transition state, putting the
+ prototype PTE back into its original state and putting the
+ physical page on the free list.
+
+ NOTE:
+
+ If there is an I/O operation ongoing for one of the pages,
+ that page is eliminated from the segment and allowed to "float"
+ until the i/o is complete. Once the share count goes to zero
+ the page will be added to the free page list.
+
+Arguments:
+
+ SectionObjectPointer - Supplies a pointer to the section objects.
+
+ Offset - Supplies the offset into the section in which to begin
+ purging pages. If this argument is not present, then the
+ whole section is purged without regard to the region size
+ argument.
+
+
+ RegionSize - Supplies the size of the region to purge. If this
+ is specified as zero and Offset is specified, the
+ region from Offset to the end of the file is purged.
+
+ Note: The largest value acceptable for RegionSize is
+ 0xFFFF0000;
+
+ IgnoreCacheViews - Supplies FALSE if mapped views in the system
+ cache should cause the function to return FALSE.
+ This is the normal case.
+ Supplies TRUE if mapped views should be ignored
+ and the flush should occur. NOTE THAT IF TRUE
+ IS SPECIFIED AND ANY DATA PURGED IS CURRENTLY MAPPED
+ AND VALID A BUGCHECK WILL OCCUR!!
+
+Return Value:
+
+ Returns TRUE if either no section exists for the file object or
+ the section is not mapped and the purge was done, FALSE otherwise.
+
+ Note that FALSE is returned if during the purge operation, a page
+ could not be purged due to a non-zero reference count.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE FinalPte;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ ULONG PteOffset;
+ PSUBSECTION Subsection;
+ PSUBSECTION LastSubsection;
+ LARGE_INTEGER LocalOffset;
+ BOOLEAN DeleteSegment = FALSE;
+ BOOLEAN LockHeld;
+ BOOLEAN ReturnValue;
+#if DBG
+ ULONG LastLocked = 0;
+#endif //DBG
+
+ //
+ // Capture caller's file size, since we may modify it.
+ //
+
+ if (ARGUMENT_PRESENT(Offset)) {
+
+ LocalOffset = *Offset;
+ Offset = &LocalOffset;
+ }
+
+ //
+ // See if we can truncate this file to where the caller wants
+ // us to.
+ //
+
+ if (!MmCanFileBeTruncatedInternal(SectionObjectPointer, Offset, &OldIrql)) {
+ return FALSE;
+ }
+
+ //
+ // PFN LOCK IS NOW HELD!
+ //
+
+ ControlArea = (PCONTROL_AREA)(SectionObjectPointer->DataSectionObject);
+ if (ControlArea == NULL) {
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+
+ //
+ // Even though MmCanFileBeTruncatedInternal returned TRUE, there could
+ // still be a system cache mapped view. We cannot truncate while
+ // the Cache Manager has a view mapped.
+ //
+
+ } else if ((IgnoreCacheViews == FALSE) &&
+ (ControlArea->NumberOfSystemCacheViews != 0)) {
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+
+ //
+ // Purge the section - locate the subsection which
+ // contains the PTEs.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ if (!ARGUMENT_PRESENT (Offset)) {
+
+ //
+ // If the offset is not specified, flush the complete file ignoring
+ // the region size.
+ //
+
+ PointerPte = &Subsection->SubsectionBase[0];
+ RegionSize = 0;
+
+ } else {
+
+ PteOffset = (ULONG)(Offset->QuadPart >> PAGE_SHIFT);
+
+ //
+ // Make sure the PTEs are not in the extended part of the
+ // segment.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ if (Subsection == NULL) {
+
+ //
+ // The offset must be equal to the size of
+ // the section, don't purge anything just return.
+ //
+
+ //ASSERT (PteOffset == 0);
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+ }
+ }
+
+ ASSERT (PteOffset < Subsection->PtesInSubsection);
+ PointerPte = &Subsection->SubsectionBase[PteOffset];
+ }
+
+
+ //
+ // Locate the address of the last prototype PTE to be flushed.
+ //
+
+ if (RegionSize == 0) {
+
+ //
+ // Flush to end of section.
+ //
+
+ LastSubsection = Subsection;
+ while (LastSubsection->NextSubsection != NULL) {
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+
+ //
+ // Set the final pte to 1 beyond the last page.
+ //
+
+ FinalPte = &LastSubsection->SubsectionBase
+ [LastSubsection->PtesInSubsection];
+ } else {
+
+ //
+ // Calculate the end of the region.
+ //
+
+ PteOffset +=
+ ((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT;
+
+ LastSubsection = Subsection;
+
+ while (PteOffset >= LastSubsection->PtesInSubsection) {
+ PteOffset -= LastSubsection->PtesInSubsection;
+ if (LastSubsection->NextSubsection == NULL) {
+ PteOffset = LastSubsection->PtesInSubsection - 1;
+ break;
+ }
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+
+ ASSERT (PteOffset < LastSubsection->PtesInSubsection);
+
+ //
+ // Point final PTE to 1 beyond the end.
+ //
+
+ FinalPte = &LastSubsection->SubsectionBase[PteOffset + 1];
+ }
+
+ //
+ // Increment the number of mapped views to
+ // prevent the section from being deleted while the purge is
+ // in progress.
+ //
+
+ ControlArea->NumberOfMappedViews += 1;
+
+ //
+ // Set being purged so no one can map a view
+ // while the purge is going on.
+ //
+
+ ControlArea->u.Flags.BeingPurged = 1;
+ ControlArea->u.Flags.WasPurged = 1;
+
+ UNLOCK_PFN (OldIrql);
+ LockHeld = FALSE;
+ ReturnValue = TRUE;
+
+ for (;;) {
+
+ if (LastSubsection != Subsection) {
+
+ //
+ // Flush to the last PTE in this subsection.
+ //
+
+ LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+ } else {
+
+ //
+ // Flush to the end of the range.
+ //
+
+ LastPte = FinalPte;
+ }
+
+ //
+ // If the page table page containing the PTEs is not
+ // resident, then no PTEs can be in the valid or tranition
+ // state! Skip over the PTEs.
+ //
+
+ if (!MiCheckProtoPtePageState(PointerPte, LockHeld)) {
+ PointerPte = (PMMPTE)(((ULONG)PointerPte | (PAGE_SIZE - 1)) + 1);
+ }
+
+ while (PointerPte < LastPte) {
+
+ //
+ // If the page table page containing the PTEs is not
+ // resident, then no PTEs can be in the valid or tranition
+ // state! Skip over the PTEs.
+ //
+
+ if (!MiCheckProtoPtePageState(PointerPte, LockHeld)) {
+ PointerPte = (PMMPTE)((ULONG)PointerPte + PAGE_SIZE);
+ continue;
+ }
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // A valid PTE was found, it must be mapped in the
+ // system cache. Just exit the loop and return FALSE
+ // and let the caller fix this.
+ //
+
+ ReturnValue = FALSE;
+ break;
+ }
+
+ if ((PteContents.u.Soft.Prototype == 0) &&
+ (PteContents.u.Soft.Transition == 1)) {
+
+ if (!LockHeld) {
+ LockHeld = TRUE;
+ LOCK_PFN (OldIrql);
+ MiMakeSystemAddressValidPfn (PointerPte);
+ continue;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ ASSERT (Pfn1->OriginalPte.u.Soft.Prototype == 1);
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+
+#if DBG
+ if ((Pfn1->u3.e2.ReferenceCount != 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // There must be an I/O in progress on this
+ // page.
+ //
+
+ if (PteContents.u.Trans.PageFrameNumber != LastLocked) {
+ UNLOCK_PFN (OldIrql);
+
+ DbgPrint("MM:PURGE - page %lx locked, file:%Z\n",
+ PteContents.u.Trans.PageFrameNumber,
+ &ControlArea->FilePointer->FileName
+ );
+ LastLocked = PteContents.u.Trans.PageFrameNumber;
+ //DbgBreakPoint();
+ LOCK_PFN (OldIrql);
+ MiMakeSystemAddressValidPfn (PointerPte);
+ continue;
+ }
+ }
+#endif //DBG
+
+ //
+ // If the modified page writer has page locked for I/O
+ // wait for the I/O's to be completed and the pages
+ // to be unlocked. The eliminates a race condition
+ // when the modified page writer locks the pages, then
+ // a purge occurs and completes before the mapped
+ // writer thread runs.
+ //
+
+ if (Pfn1->u3.e1.WriteInProgress == 1) {
+ ASSERT (ControlArea->ModifiedWriteCount != 0);
+ ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
+
+ ControlArea->u.Flags.SetMappedFileIoComplete = 1;
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(&MmMappedFileIoComplete,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+ LOCK_PFN (OldIrql);
+ MiMakeSystemAddressValidPfn (PointerPte);
+ continue;
+ }
+
+ if (Pfn1->u3.e1.ReadInProgress == 1) {
+
+ //
+ // The page currently is being read in from the
+ // disk. Treat this just like a valid PTE and
+ // return false.
+ //
+
+ ReturnValue = FALSE;
+ break;
+ }
+
+ ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)));
+
+ *PointerPte = Pfn1->OriginalPte;
+
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+
+ MiUnlinkPageFromList (Pfn1);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // If the reference count for the page is zero, insert
+ // it into the free page list, otherwize leave it alone
+ // and when the reference count is decremented to zero
+ // the page will go to the free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+ }
+ PointerPte += 1;
+
+ if ((((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) &&
+ (LockHeld)) {
+
+ //
+ // Unlock PFN so large requests will not block other
+ // threads on MP systems.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ LockHeld = FALSE;
+ }
+
+ } //end while
+
+ if (LockHeld) {
+ UNLOCK_PFN (OldIrql);
+ LockHeld = FALSE;
+ }
+
+ if ((LastSubsection != Subsection) && (ReturnValue)) {
+
+ //
+ // Get the next subsection in the list.
+ //
+
+ Subsection = Subsection->NextSubsection;
+ PointerPte = Subsection->SubsectionBase;
+
+ } else {
+
+ //
+ // The last range has been flushed, exit the top FOR loop
+ // and return.
+ //
+
+ break;
+ }
+ } //end for
+
+ LOCK_PFN (OldIrql);
+
+ ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1);
+ ControlArea->NumberOfMappedViews -= 1;
+
+ ControlArea->u.Flags.BeingPurged = 0;
+
+ //
+ // Check to see if the control area should be deleted. This
+ // will release the PFN lock.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+ return ReturnValue;
+}
+
+BOOLEAN
+MmFlushImageSection (
+ IN PSECTION_OBJECT_POINTERS SectionPointer,
+ IN MMFLUSH_TYPE FlushType
+ )
+
+/*++
+
+Routine Description:
+
+ This function determines if any views of the specified image section
+ are mapped, and if not, flushes valid pages (even modified ones)
+ from the specified section and returns any used pages to the free
+ list. This is accomplished by examining the prototype PTEs
+ from the specified offset to the end of the section, and if
+ any prototype PTEs are in the transition state, putting the
+ prototype PTE back into its original state and putting the
+ physical page on the free list.
+
+Arguments:
+
+ SectionPointer - Supplies a pointer to a section object pointers
+ within the FCB.
+
+ FlushType - Supplies the type of flush to check for. One of
+ MmFlushForDelete or MmFlushForWrite.
+
+Return Value:
+
+ Returns TRUE if either no section exists for the file object or
+ the section is not mapped and the purge was done, FALSE otherwise.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ KIRQL OldIrql;
+ ULONG state;
+
+ if (FlushType == MmFlushForDelete) {
+
+ //
+ // Do a quick check to see if there are any mapped views for
+ // the data section. If there are, just return FALSE.
+ //
+
+ LOCK_PFN (OldIrql);
+ ControlArea = (PCONTROL_AREA)(SectionPointer->DataSectionObject);
+ if (ControlArea != NULL) {
+ if ((ControlArea->NumberOfUserReferences != 0) ||
+ (ControlArea->u.Flags.BeingCreated)) {
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+ }
+ UNLOCK_PFN (OldIrql);
+ }
+
+ //
+ // Check the status of the control area, if the control area is in use
+ // or the control area is being deleted, this operation cannot continue.
+ //
+
+ state = MiCheckControlAreaStatus (CheckImageSection,
+ SectionPointer,
+ FALSE,
+ &ControlArea,
+ &OldIrql);
+
+ if (ControlArea == NULL) {
+ return (BOOLEAN)state;
+ }
+
+ //
+ // PFN LOCK IS NOW HELD!
+ //
+
+ //
+ // Set the being deleted flag and up the number of mapped views
+ // for the segment. Upping the number of mapped views prevents
+ // the segment from being deleted and passed to the deletion thread
+ // while we are forcing a delete.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ ControlArea->NumberOfMappedViews = 1;
+
+ //
+ // This is a page file backed or image Segment. The Segment is being
+ // deleted, remove all references to the paging file and physical memory.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ MiCleanSection (ControlArea);
+ return TRUE;
+}
+
+VOID
+MiFlushDirtyBitsToPfn (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte,
+ IN PEPROCESS Process,
+ IN BOOLEAN SystemCache
+ )
+
+{
+ KIRQL OldIrql;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ PVOID Va;
+ PMMPTE PointerPde;
+
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte <= LastPte) {
+
+ PteContents = *PointerPte;
+
+ if ((PteContents.u.Hard.Valid == 1) &&
+ (MI_IS_PTE_DIRTY (PteContents))) {
+
+ //
+ // Flush the modify bit to the PFN database.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.Modified = 1;
+
+ MI_SET_PTE_CLEAN (PteContents);
+
+ //
+ // No need to capture the PTE contents as we are going to
+ // write the page anyway and the Modify bit will be cleared
+ // before the write is done.
+ //
+
+ (VOID)KeFlushSingleTb (Va,
+ FALSE,
+ SystemCache,
+ (PHARDWARE_PTE)PointerPte,
+ PteContents.u.Flush);
+ }
+
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ PointerPte += 1;
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while ((PointerPte <= LastPte) &&
+ (!MiDoesPdeExistAndMakeValid(PointerPde, Process, TRUE))) {
+
+ //
+ // No page table page exists for this address.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ }
+
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+ }
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+PSUBSECTION
+MiGetSystemCacheSubsection (
+ IN PVOID BaseAddress,
+ IN PEPROCESS Process,
+ OUT PMMPTE *ProtoPte
+ )
+
+{
+ KIRQL OldIrql;
+ PMMPTE PointerPte;
+ PSUBSECTION Subsection;
+
+ LOCK_PFN (OldIrql);
+
+ PointerPte = MiGetPteAddress (BaseAddress);
+
+ Subsection = MiGetSubsectionAndProtoFromPte (PointerPte,
+ ProtoPte,
+ Process);
+ UNLOCK_PFN (OldIrql);
+ return Subsection;
+}
+
+
+ULONG
+FASTCALL
+MiCheckProtoPtePageState (
+ IN PMMPTE PrototypePte,
+ IN ULONG PfnLockHeld
+ )
+
+/*++
+
+Routine Description:
+
+ Checks the state of the page containing the specified
+ prototype PTE.
+
+ If the page is valid or transition and has transition or valid prototype
+ PTEs contained with it, TRUE is returned and the page is made valid
+ (if transition). Otherwize return FALSE indicating no prototype
+ PTEs within this page are of interest.
+
+Arguments:
+
+ PrototypePte - Supplies a pointer to a prototype PTE within the page.
+
+Return Value:
+
+ TRUE if the page containing the proto PTE was made resident.
+ FALSE if otherwise.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ MMPTE PteContents;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn;
+
+ PointerPte = MiGetPteAddress(PrototypePte);
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+ PageFrameIndex = PteContents.u.Hard.PageFrameNumber;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ if (Pfn->u2.ShareCount != 1) {
+ return TRUE;
+ }
+ } else if ((PteContents.u.Soft.Prototype == 0) &&
+ (PteContents.u.Soft.Transition == 1)) {
+
+ //
+ // Transition, if on standby or modified, return false.
+ //
+
+ PageFrameIndex = PteContents.u.Trans.PageFrameNumber;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ if (Pfn->u3.e1.PageLocation >= ActiveAndValid) {
+ if (PfnLockHeld) {
+ MiMakeSystemAddressValidPfn (PrototypePte);
+ }
+ return TRUE;
+ }
+ }
+
+ //
+ // Page is not resident or is on standby / modified list.
+ //
+
+ return FALSE;
+}
diff --git a/private/ntos/mm/forksup.c b/private/ntos/mm/forksup.c
new file mode 100644
index 000000000..1fb726c7c
--- /dev/null
+++ b/private/ntos/mm/forksup.c
@@ -0,0 +1,1853 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ forksup.c
+
+Abstract:
+
+ This module contains the routines which support the POSIX fork operation.
+
+Author:
+
+ Lou Perazzoli (loup) 22-Jul-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+VOID
+MiUpPfnReferenceCount (
+ IN ULONG Page,
+ IN USHORT Count
+ );
+
+VOID
+MiDownPfnReferenceCount (
+ IN ULONG Page
+ );
+
+VOID
+MiUpControlAreaRefs (
+ IN PCONTROL_AREA ControlArea
+ );
+
+ULONG
+MiDoneWithThisPageGetAnother (
+ IN PULONG PageFrameIndex,
+ IN PMMPTE PointerPde,
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+MiUpForkPageShareCount(
+ IN PMMPFN PfnForkPtePage
+ );
+
+VOID
+MiUpCloneProtoRefCount (
+ IN PMMCLONE_BLOCK CloneProto,
+ IN PEPROCESS CurrentProcess
+ );
+
+ULONG
+MiHandleForkTransitionPte (
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerNewPte,
+ IN PMMCLONE_BLOCK ForkProtoPte
+ );
+
+VOID
+MiDownShareCountFlushEntireTb (
+ IN ULONG PageFrameIndex
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,MiCloneProcessAddressSpace)
+#endif
+
+
+
+NTSTATUS
+MiCloneProcessAddressSpace (
+ IN PEPROCESS ProcessToClone,
+ IN PEPROCESS ProcessToInitialize,
+ IN ULONG PdePhysicalPage,
+ IN ULONG HyperPhysicalPage
+ )
+
+/*++
+
+Routine Description:
+
+ This routine stands on its head to produce a copy of the specified
+ process's address space in the process to initialize. This
+ is done by examining each virtual address descriptor's inherit
+ attributes. If the pages described by the VAD should be inherited,
+ each PTE is examined and copied into the new address space.
+
+ For private pages, fork prototype PTEs are constructed and the pages
+ become shared, copy-on-write, between the two processes.
+
+
+Arguments:
+
+ ProcessToClone - Supplies the process whose address space should be
+ cloned.
+
+ ProcessToInitialize - Supplies the process whose address space is to
+ be created.
+
+ PdePhysicalPage - Supplies the physical page number of the page directory
+ of the process to initialize.
+
+ HyperPhysicalPage - Supplies the physical page number of the page table
+ page which maps hyperspace for the process to
+ initialize.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ PEPROCESS CurrentProcess;
+ PMMWSL HyperBase;
+ PMMPTE PdeBase;
+ PMMCLONE_HEADER CloneHeader;
+ PMMCLONE_BLOCK CloneProtos;
+ PMMCLONE_DESCRIPTOR CloneDescriptor;
+ PMMVAD NewVad;
+ PMMVAD Vad;
+ PMMVAD NextVad;
+ PMMVAD *VadList;
+ PMMVAD FirstNewVad;
+ PMMCLONE_DESCRIPTOR *CloneList;
+ PMMCLONE_DESCRIPTOR FirstNewClone;
+ PMMCLONE_DESCRIPTOR Clone;
+ PMMCLONE_DESCRIPTOR NextClone;
+ PMMCLONE_DESCRIPTOR NewClone;
+ ULONG Attached = FALSE;
+ ULONG CloneFailed;
+ ULONG VadInsertFailed;
+ ULONG WorkingSetIndex;
+ PVOID VirtualAddress;
+ NTSTATUS status;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPFN PfnPdPage;
+ MMPTE TempPte;
+ MMPTE PteContents;
+ PMDL Mdl0;
+ PMDL Mdl1;
+ PMDL Mdl2;
+ ULONG MdlHack0[(sizeof(MDL)/4) + 1];
+ ULONG MdlHack1[(sizeof(MDL)/4) + 1];
+ ULONG MdlHack2[(sizeof(MDL)/4) + 1];
+ PULONG MdlPage;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE LastPte;
+ PMMPTE PointerNewPte;
+ PMMPTE PointerNewPde;
+ ULONG PageFrameIndex = 0xFFFFFFFF;
+ PMMCLONE_BLOCK ForkProtoPte;
+ PMMCLONE_BLOCK CloneProto;
+ PMMCLONE_BLOCK LockedForkPte;
+ PMMPTE ContainingPte;
+ ULONG NumberOfForkPtes = 0;
+ ULONG NumberOfPrivatePages;
+ ULONG PageTablePage;
+ ULONG TotalPagedPoolCharge;
+ ULONG TotalNonPagedPoolCharge;
+ PMMPFN PfnForkPtePage;
+ PUSHORT UsedPageTableEntries;
+ ULONG ReleasedWorkingSetMutex;
+ ULONG FirstTime;
+
+#if DBG
+ if (MmDebug & MM_DBG_FORK) {
+ DbgPrint("beginning clone operation process to clone = %lx\n",
+ ProcessToClone);
+ }
+#endif //DBG
+
+ PAGED_CODE();
+
+ if (ProcessToClone != PsGetCurrentProcess()) {
+ Attached = TRUE;
+ KeAttachProcess (&ProcessToClone->Pcb);
+ }
+
+ CurrentProcess = ProcessToClone;
+
+ //
+ // Get the working set mutex and the address creation mutex
+ // of the process to clone. This prevents page faults while we
+ // are examining the address map and prevents virtual address space
+ // from being created or deleted.
+ //
+
+ LOCK_ADDRESS_SPACE (CurrentProcess);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (CurrentProcess->AddressSpaceDeleted != 0) {
+ status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn1;
+ }
+
+ //
+ // Attempt to acquire the needed pool before starting the
+ // clone operation, this allows an easier failure path in
+ // the case of insufficient system resources.
+ //
+
+ NumberOfPrivatePages = CurrentProcess->NumberOfPrivatePages;
+
+ CloneProtos = ExAllocatePoolWithTag (PagedPool, sizeof(MMCLONE_BLOCK) *
+ NumberOfPrivatePages,
+ ' mM');
+ if (CloneProtos == NULL) {
+ status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn1;
+ }
+
+ CloneHeader = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMCLONE_HEADER),
+ ' mM');
+ if (CloneHeader == NULL) {
+ status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn2;
+ }
+
+ CloneDescriptor = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMCLONE_DESCRIPTOR),
+ ' mM');
+ if (CloneDescriptor == NULL) {
+ status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn3;
+ }
+
+ Vad = MiGetFirstVad (CurrentProcess);
+ VadList = &FirstNewVad;
+
+ while (Vad != (PMMVAD)NULL) {
+
+ //
+ // If the VAD does not go to the child, ignore it.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory == 1) ||
+ (Vad->u.VadFlags.Inherit == MM_VIEW_SHARE)) {
+
+ NewVad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV');
+
+ if (NewVad == NULL) {
+
+ //
+ // Unable to allocate pool for all the VADs. Deallocate
+ // all VADs and other pool obtained so far.
+ //
+
+ *VadList = (PMMVAD)NULL;
+ NewVad = FirstNewVad;
+ while (NewVad != NULL) {
+ Vad = NewVad->Parent;
+ ExFreePool (NewVad);
+ NewVad = Vad;
+ }
+ status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn4;
+ }
+ *VadList = NewVad;
+ VadList = &NewVad->Parent;
+ }
+ Vad = MiGetNextVad (Vad);
+ }
+
+ //
+ // Terminate list of VADs for new process.
+ //
+
+ *VadList = (PMMVAD)NULL;
+
+
+ //
+ // Charge the current process the quota for the paged and nonpage
+ // global structures. This consists of the array of clone blocks
+ // in paged pool and the clone header in non-paged pool.
+ //
+
+ try {
+ PageTablePage = 1;
+ PsChargePoolQuota (CurrentProcess, PagedPool, sizeof(MMCLONE_BLOCK) *
+ NumberOfPrivatePages);
+ PageTablePage = 0;
+ PsChargePoolQuota (CurrentProcess, NonPagedPool, sizeof(MMCLONE_HEADER));
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (PageTablePage == 0) {
+ PsReturnPoolQuota (CurrentProcess, PagedPool, sizeof(MMCLONE_BLOCK) *
+ NumberOfPrivatePages);
+ }
+
+ //
+ // Unable to allocate pool for all the VADs. Deallocate
+ // all VADs and other pool obtained so far.
+ //
+
+ NewVad = FirstNewVad;
+ while (NewVad != NULL) {
+ Vad = NewVad->Parent;
+ ExFreePool (NewVad);
+ NewVad = Vad;
+ }
+ status = GetExceptionCode();
+ goto ErrorReturn4;
+ }
+
+ LOCK_WS (CurrentProcess);
+
+ ASSERT (CurrentProcess->ForkInProgress == NULL);
+
+ //
+ // Indicate to the pager that the current process is being
+ // forked. This blocks other threads in that process from
+ // modifying clone blocks counts and contents.
+ //
+
+ CurrentProcess->ForkInProgress = PsGetCurrentThread();
+
+ //
+ // Map the PDE and the hyperspace page into the system address space
+ // This is accomplished by building an MDL to describe the
+ // Page directory and the hyperspace page.
+ //
+
+ Mdl0 = (PMDL)&MdlHack0[0];
+ MdlPage = (PULONG)(Mdl0 + 1);
+
+ MmInitializeMdl(Mdl0, (PVOID)PDE_BASE, PAGE_SIZE);
+ Mdl0->MdlFlags |= MDL_PAGES_LOCKED;
+ *MdlPage = PdePhysicalPage;
+
+ //
+ // Increment the reference count for the pages which are being "locked"
+ // in MDLs. This prevents the page from being reused while it is
+ // being double mapped.
+ //
+
+ MiUpPfnReferenceCount (PdePhysicalPage,1);
+ MiUpPfnReferenceCount (HyperPhysicalPage,2);
+
+ PdeBase = (PMMPTE)MmMapLockedPages (Mdl0, KernelMode);
+
+ Mdl1 = (PMDL)&MdlHack1[0];
+
+ MdlPage = (PULONG)(Mdl1 + 1);
+ MmInitializeMdl(Mdl1, (PVOID)MmWorkingSetList, PAGE_SIZE);
+ Mdl1->MdlFlags |= MDL_PAGES_LOCKED;
+ *MdlPage = HyperPhysicalPage;
+
+ HyperBase = (PMMWSL)MmMapLockedPages (Mdl1, KernelMode);
+
+ PfnPdPage = MI_PFN_ELEMENT (PdePhysicalPage);
+
+ //
+ // Initialize MDL2 to lock and map the hyperspace page so it
+ // can be unlocked in the loop and the end of the loop without
+ // any testing to see if has a valid value the first time through.
+ //
+
+ Mdl2 = (PMDL)&MdlHack2[0];
+ MdlPage = (PULONG)(Mdl2 + 1);
+ MmInitializeMdl(Mdl2, (PVOID)MmWorkingSetList, PAGE_SIZE);
+ Mdl2->MdlFlags |= MDL_PAGES_LOCKED;
+ *MdlPage = HyperPhysicalPage;
+
+ PointerNewPte = (PMMPTE)MmMapLockedPages (Mdl2, KernelMode);
+
+ //
+ // Build new clone prototype PTE block and descriptor, note that
+ // each prototype PTE has a reference count following it.
+ //
+
+ ForkProtoPte = CloneProtos;
+
+ LockedForkPte = ForkProtoPte;
+ MiLockPagedAddress (LockedForkPte, FALSE);
+
+ CloneHeader->NumberOfPtes = NumberOfPrivatePages;
+ CloneHeader->NumberOfProcessReferences = 1;
+ CloneHeader->ClonePtes = CloneProtos;
+
+
+
+ CloneDescriptor->StartingVa = (PVOID)CloneProtos;
+ CloneDescriptor->EndingVa = (PVOID)((ULONG)CloneProtos +
+ NumberOfPrivatePages *
+ sizeof(MMCLONE_BLOCK));
+ CloneDescriptor->NumberOfReferences = 0;
+ CloneDescriptor->NumberOfPtes = NumberOfPrivatePages;
+ CloneDescriptor->CloneHeader = CloneHeader;
+ CloneDescriptor->PagedPoolQuotaCharge = sizeof(MMCLONE_BLOCK) *
+ NumberOfPrivatePages;
+
+ //
+ // Insert the clone descriptor for this fork operation into the
+ // process which was cloned.
+ //
+
+ MiInsertClone (CloneDescriptor);
+
+ //
+ // Examine each virtual address descriptor and create the
+ // proper structures for the new process.
+ //
+
+ Vad = MiGetFirstVad (CurrentProcess);
+ NewVad = FirstNewVad;
+
+ while (Vad != (PMMVAD)NULL) {
+
+ //
+ // Examine the VAD to determine its type and inheritence
+ // attribute.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory == 1) ||
+ (Vad->u.VadFlags.Inherit == MM_VIEW_SHARE)) {
+
+ //
+ // The virtual address descriptor should be shared in the
+ // forked process.
+ //
+
+ //
+ // Make a copy of the VAD for the new process, the new vads
+ // are preallocated and linked together through the parent
+ // field.
+ //
+
+ NextVad = NewVad->Parent;
+
+
+ if (Vad->u.VadFlags.PrivateMemory == 1) {
+ *(PMMVAD_SHORT)NewVad = *(PMMVAD_SHORT)Vad;
+ NewVad->u.VadFlags.NoChange = 0;
+ } else {
+ *NewVad = *Vad;
+ }
+
+ if (NewVad->u.VadFlags.NoChange) {
+ if ((NewVad->u2.VadFlags2.OneSecured) ||
+ (NewVad->u2.VadFlags2.MultipleSecured)) {
+
+ //
+ // Eliminate these as the memory was secured
+ // only in this process, not in the new one.
+ //
+
+ NewVad->u2.VadFlags2.OneSecured = 0;
+ NewVad->u2.VadFlags2.MultipleSecured = 0;
+ NewVad->u2.VadFlags2.StoredInVad = 0;
+ NewVad->u3.List.Flink = NULL;
+ NewVad->u3.List.Blink = NULL;
+ }
+ if (NewVad->u2.VadFlags2.SecNoChange == 0) {
+ NewVad->u.VadFlags.NoChange = 0;
+ }
+ }
+ NewVad->Parent = NextVad;
+
+ //
+ // If the VAD refers to a section, up the view count for that
+ // section. This requires the PFN mutex to be held.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != (PCONTROL_AREA)NULL)) {
+
+ //
+ // Increment the count of the number of views for the
+ // section object. This requires the PFN mutex to be held.
+ //
+
+ MiUpControlAreaRefs (Vad->ControlArea);
+ }
+
+ //
+ // Examine each PTE and create the appropriate PTE for the
+ // new process.
+ //
+
+ PointerPde = MiGetPdeAddress (Vad->StartingVa);
+ PointerPte = (volatile PMMPTE) MiGetPteAddress (Vad->StartingVa);
+ LastPte = MiGetPteAddress (Vad->EndingVa);
+ FirstTime = TRUE;
+
+ while ((PMMPTE)PointerPte <= LastPte) {
+
+ //
+ // For each PTE contained in the VAD check the page table
+ // page, and if non-zero, make the appropriate modifications
+ // to copy the PTE to the new process.
+ //
+
+ if ((FirstTime) || (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0)) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while (!MiDoesPdeExistAndMakeValid (PointerPde,
+ CurrentProcess,
+ FALSE)) {
+
+ //
+ // This page directory is empty, go to the next one.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ if ((PMMPTE)PointerPte > LastPte) {
+
+ //
+ // All done with this VAD, exit loop.
+ //
+
+ goto AllDone;
+ }
+ }
+
+ FirstTime = FALSE;
+
+ //
+ // Calculate the address of the pde in the new process's
+ // page table page.
+ //
+
+ PointerNewPde = &PdeBase[MiGetPteOffset(PointerPte)];
+
+ if (PointerNewPde->u.Long == 0) {
+
+ //
+ // No physical page has been allocated yet, get a page
+ // and map it in as a transition page. This will
+ // become a page table page for the new process.
+ //
+
+
+ ReleasedWorkingSetMutex =
+ MiDoneWithThisPageGetAnother (&PageFrameIndex,
+ PointerPde,
+ CurrentProcess);
+ if (ReleasedWorkingSetMutex) {
+ MiDoesPdeExistAndMakeValid (PointerPde,
+ CurrentProcess,
+ FALSE);
+ }
+
+ //
+ // Hand initialize this PFN as normal initialization
+ // would do it for the process whose context we are
+ // attached to.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->OriginalPte = DemandZeroPde;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u3.e1.Modified = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->PteFrame = PdePhysicalPage;
+
+ //
+ // Increment the share count for the page containing
+ // this PTE as the PTE is in transition.
+ //
+
+ PfnPdPage->u2.ShareCount += 1;
+
+ //
+ // Put the PDE into the transition state as it is not
+ // really mapped and decrement share count does not
+ // put private pages into transition, only prototypes.
+ //
+
+ *PointerNewPde = TransitionPde;
+
+ //
+ // Make the PTE owned by user mode.
+ //
+
+#ifndef _ALPHA_
+ MI_SET_OWNER_IN_PTE (PointerNewPde, UserMode);
+#endif //_ALPHA_
+ PointerNewPde->u.Trans.PageFrameNumber = PageFrameIndex;
+
+ //
+ // Map the new page table page into the system portion
+ // of the address space. Note that hyperspace
+ // cannot be used as other operations (allocating
+ // nonpaged pool at DPC level) could cause the
+ // hyperspace page being used to be reused.
+ //
+
+ MmUnmapLockedPages (Mdl2->MappedSystemVa, Mdl2);
+
+ MiDownPfnReferenceCount (*MdlPage);
+
+ Mdl2->StartVa = MiGetVirtualAddressMappedByPte(PointerPde);
+
+ *MdlPage = PageFrameIndex;
+
+ MiUpPfnReferenceCount (PageFrameIndex, 1);
+
+ PointerNewPte = (PMMPTE)MmMapLockedPages (Mdl2,
+ KernelMode);
+
+ UsedPageTableEntries = &HyperBase->UsedPageTableEntries
+ [MiGetPteOffset( PointerPte )];
+
+ }
+
+ //
+ // Calculate the address of the new pte to build.
+ // Note that FirstTime could be true, yet the page
+ // table page already built.
+ //
+
+ PointerNewPte = (PMMPTE)((ULONG)PAGE_ALIGN(PointerNewPte) |
+ BYTE_OFFSET (PointerPte));
+ }
+
+ //
+ // Make the forkprototype Pte location resident.
+ //
+
+ if (PAGE_ALIGN (ForkProtoPte) != PAGE_ALIGN (LockedForkPte)) {
+ MiUnlockPagedAddress (LockedForkPte, FALSE);
+ LockedForkPte = ForkProtoPte;
+ MiLockPagedAddress (LockedForkPte, FALSE);
+ }
+
+ MiMakeSystemAddressValid (PointerPte,
+ CurrentProcess);
+
+ PteContents = *PointerPte;
+
+ //
+ // Check each PTE.
+ //
+
+ if (PteContents.u.Long == 0) {
+ NOTHING;
+
+ } else if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Valid.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ WorkingSetIndex = MiLocateWsle (VirtualAddress,
+ MmWorkingSetList,
+ Pfn2->u1.WsIndex);
+
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
+
+ if (Pfn2->u3.e1.PrototypePte == 1) {
+
+ //
+ // This PTE is already in prototype PTE format.
+ //
+
+ //
+ // This is a prototype PTE. The PFN database does
+ // not contain the contents of this PTE it contains
+ // the contents of the prototype PTE. This PTE must
+ // be reconstructed to contain a pointer to the
+ // prototype PTE.
+ //
+ // The working set list entry contains information about
+ // how to reconstruct the PTE.
+ //
+
+ if (MmWsle[WorkingSetIndex].u1.e1.SameProtectAsProto
+ == 0) {
+
+ //
+ // The protection for the prototype PTE is in the
+ // WSLE.
+ //
+
+ TempPte.u.Long = 0;
+ TempPte.u.Soft.Protection =
+ MmWsle[WorkingSetIndex].u1.e1.Protection;
+ TempPte.u.Soft.PageFileHigh = 0xFFFFF;
+
+ } else {
+
+ //
+ // The protection is in the prototype PTE.
+ //
+
+ TempPte.u.Long = MiProtoAddressForPte (
+ Pfn2->PteAddress);
+ // TempPte.u.Proto.ForkType =
+ // MmWsle[WorkingSetIndex].u1.e1.ForkType;
+ }
+
+ TempPte.u.Proto.Prototype = 1;
+ *PointerNewPte = TempPte;
+
+ //
+ // A PTE is now non-zero, increment the used page
+ // table entries counter.
+ //
+
+ *UsedPageTableEntries += 1;
+
+ //
+ // Check to see if this is a fork prototype PTE,
+ // and if it is increment the reference count
+ // which is in the longword following the PTE.
+ //
+
+ if (MiLocateCloneAddress ((PVOID)Pfn2->PteAddress) !=
+ (PMMCLONE_DESCRIPTOR)NULL) {
+
+ //
+ // The reference count field, or the prototype PTE
+ // for that matter may not be in the working set.
+ //
+
+ CloneProto = (PMMCLONE_BLOCK)Pfn2->PteAddress;
+
+ MiUpCloneProtoRefCount (CloneProto,
+ CurrentProcess);
+
+ if (PAGE_ALIGN (ForkProtoPte) !=
+ PAGE_ALIGN (LockedForkPte)) {
+ MiUnlockPagedAddress (LockedForkPte, FALSE);
+ LockedForkPte = ForkProtoPte;
+ MiLockPagedAddress (LockedForkPte, FALSE);
+ }
+
+ MiMakeSystemAddressValid (PointerPte,
+ CurrentProcess);
+ }
+
+ } else {
+
+ //
+ // This is a private page, create a fork prototype PTE
+ // which becomes the "prototype" PTE for this page.
+ // The protection is the same as that in the prototype'
+ // PTE so the WSLE does not need to be updated.
+ //
+
+ MI_MAKE_VALID_PTE_WRITE_COPY (PointerPte);
+
+ ForkProtoPte->ProtoPte = *PointerPte;
+ ForkProtoPte->CloneRefCount = 2;
+
+ //
+ // Transform the PFN element to reference this new fork
+ // prototype PTE.
+ //
+
+ Pfn2->PteAddress = &ForkProtoPte->ProtoPte;
+ Pfn2->u3.e1.PrototypePte = 1;
+
+ ContainingPte = MiGetPteAddress(&ForkProtoPte->ProtoPte);
+ Pfn2->PteFrame = ContainingPte->u.Hard.PageFrameNumber;
+
+
+ //
+ // Increment the share count for the page containing the
+ // fork prototype PTEs as we have just placed a valid
+ // PTE into the page.
+ //
+
+ PfnForkPtePage = MI_PFN_ELEMENT (
+ ContainingPte->u.Hard.PageFrameNumber );
+
+ MiUpForkPageShareCount (PfnForkPtePage);
+
+ //
+ // Change the protection in the PFN database to COPY
+ // on write, if writable.
+ //
+
+ MI_MAKE_PROTECT_WRITE_COPY (Pfn2->OriginalPte);
+
+ //
+ // Put the protection into the WSLE and mark the WSLE
+ // to indicate that the protection field for the PTE
+ // is the same as the prototype PTE.
+ //
+
+ MmWsle[WorkingSetIndex].u1.e1.Protection =
+ Pfn2->OriginalPte.u.Soft.Protection;
+
+ MmWsle[WorkingSetIndex].u1.e1.SameProtectAsProto = 1;
+
+ TempPte.u.Long = MiProtoAddressForPte (Pfn2->PteAddress);
+ TempPte.u.Proto.Prototype = 1;
+ *PointerNewPte = TempPte;
+
+ //
+ // A PTE is now non-zero, increment the used page
+ // table entries counter.
+ //
+
+ *UsedPageTableEntries += 1;
+
+ //
+ // One less private page (it's now shared).
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+
+ ForkProtoPte += 1;
+ NumberOfForkPtes += 1;
+
+ }
+
+ } else if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // Prototype PTE, check to see if this is a fork
+ // prototype PTE already. Note that if COW is set,
+ // the PTE can just be copied (fork compatible format).
+ //
+
+ *PointerNewPte = PteContents;
+
+ //
+ // A PTE is now non-zero, increment the used page
+ // table entries counter.
+ //
+
+ *UsedPageTableEntries += 1;
+
+ //
+ // Check to see if this is a fork prototype PTE,
+ // and if it is increment the reference count
+ // which is in the longword following the PTE.
+ //
+
+ CloneProto = (PMMCLONE_BLOCK)(MiPteToProto(PointerPte));
+
+ if (MiLocateCloneAddress ((PVOID)CloneProto) !=
+ (PMMCLONE_DESCRIPTOR)NULL) {
+
+ //
+ // The reference count field, or the prototype PTE
+ // for that matter may not be in the working set.
+ //
+
+ MiUpCloneProtoRefCount (CloneProto,
+ CurrentProcess);
+
+ if (PAGE_ALIGN (ForkProtoPte) !=
+ PAGE_ALIGN (LockedForkPte)) {
+ MiUnlockPagedAddress (LockedForkPte, FALSE);
+ LockedForkPte = ForkProtoPte;
+ MiLockPagedAddress (LockedForkPte, FALSE);
+ }
+
+ MiMakeSystemAddressValid (PointerPte,
+ CurrentProcess);
+ }
+
+ } else if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // Transition.
+ //
+
+ if (MiHandleForkTransitionPte (PointerPte,
+ PointerNewPte,
+ ForkProtoPte)) {
+ //
+ // PTE is no longer transition, try again.
+ //
+
+ continue;
+ }
+
+ //
+ // A PTE is now non-zero, increment the used page
+ // table entries counter.
+ //
+
+ *UsedPageTableEntries += 1;
+
+ //
+ // One less private page (it's now shared).
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+
+ ForkProtoPte += 1;
+ NumberOfForkPtes += 1;
+
+ } else {
+
+ //
+ // Page file format (may be demand zero).
+ //
+
+ if (IS_PTE_NOT_DEMAND_ZERO (PteContents)) {
+
+ if (PteContents.u.Soft.Protection == MM_DECOMMIT) {
+
+ //
+ // This is a decommitted PTE, just move it
+ // over to the new process. Don't increment
+ // the count of private pages.
+ //
+
+ *PointerNewPte = PteContents;
+ } else {
+
+ //
+ // The PTE is not demand zero, move the PTE to
+ // a fork prototype PTE and make this PTE and
+ // the new processes PTE refer to the fork
+ // prototype PTE.
+ //
+
+ ForkProtoPte->ProtoPte = PteContents;
+
+ //
+ // Make the protection write-copy if writable.
+ //
+
+ MI_MAKE_PROTECT_WRITE_COPY (ForkProtoPte->ProtoPte);
+
+ ForkProtoPte->CloneRefCount = 2;
+
+ TempPte.u.Long =
+ MiProtoAddressForPte (&ForkProtoPte->ProtoPte);
+
+ TempPte.u.Proto.Prototype = 1;
+
+ *PointerPte = TempPte;
+ *PointerNewPte = TempPte;
+
+ //
+ // One less private page (it's now shared).
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+
+ ForkProtoPte += 1;
+ NumberOfForkPtes += 1;
+ }
+ } else {
+
+ //
+ // The page is demand zero, make the new process's
+ // page demand zero.
+ //
+
+ *PointerNewPte = PteContents;
+ }
+
+ //
+ // A PTE is now non-zero, increment the used page
+ // table entries counter.
+ //
+
+ *UsedPageTableEntries += 1;
+ }
+
+ PointerPte += 1;
+ PointerNewPte += 1;
+
+ } // end while for PTEs
+AllDone:
+ NewVad = NewVad->Parent;
+ }
+ Vad = MiGetNextVad (Vad);
+
+ } // end while for VADs
+
+ //
+ // Unlock paged pool page.
+ //
+
+ MiUnlockPagedAddress (LockedForkPte, FALSE);
+
+ //
+ // Unmap the PD Page and hyper space page.
+ //
+
+ MmUnmapLockedPages (PdeBase, Mdl0);
+ MmUnmapLockedPages (HyperBase, Mdl1);
+ MmUnmapLockedPages (Mdl2->MappedSystemVa, Mdl2);
+
+ MiDownPfnReferenceCount (*(PULONG)((Mdl0 + 1)));
+ MiDownPfnReferenceCount (*(PULONG)((Mdl1 + 1)));
+ MiDownPfnReferenceCount (*(PULONG)((Mdl2 + 1)));
+
+ //
+ // Make the count of private pages match between the two processes.
+ //
+
+ ASSERT ((LONG)CurrentProcess->NumberOfPrivatePages >= 0);
+
+ ProcessToInitialize->NumberOfPrivatePages =
+ CurrentProcess->NumberOfPrivatePages;
+
+ ASSERT (NumberOfForkPtes <= CloneDescriptor->NumberOfPtes);
+
+ if (NumberOfForkPtes != 0) {
+
+ //
+ // The number of fork PTEs is non-zero, set the values
+ // into the structres.
+ //
+
+ CloneHeader->NumberOfPtes = NumberOfForkPtes;
+ CloneDescriptor->NumberOfReferences = NumberOfForkPtes;
+ CloneDescriptor->NumberOfPtes = NumberOfForkPtes;
+
+ } else {
+
+ //
+ // There were no fork ptes created. Remove the clone descriptor
+ // from this process and clean up the related structures.
+ // Note - must be holding the working set mutex and not holding
+ // the PFN lock.
+ //
+
+ MiRemoveClone (CloneDescriptor);
+
+ UNLOCK_WS (CurrentProcess);
+
+ ExFreePool (CloneDescriptor->CloneHeader->ClonePtes);
+
+ ExFreePool (CloneDescriptor->CloneHeader);
+
+ //
+ // Return the pool for the global structures referenced by the
+ // clone descriptor.
+ //
+
+ PsReturnPoolQuota (CurrentProcess,
+ PagedPool,
+ CloneDescriptor->PagedPoolQuotaCharge);
+
+ PsReturnPoolQuota (CurrentProcess, NonPagedPool, sizeof(MMCLONE_HEADER));
+
+ ExFreePool (CloneDescriptor);
+
+ LOCK_WS (CurrentProcess);
+ }
+
+ MiDownShareCountFlushEntireTb (PageFrameIndex);
+
+ PageFrameIndex = 0xFFFFFFFF;
+
+ //
+ // Copy the clone descriptors from this process to the new process.
+ //
+
+ Clone = MiGetFirstClone ();
+ CloneList = &FirstNewClone;
+ CloneFailed = FALSE;
+
+ while (Clone != (PMMCLONE_DESCRIPTOR)NULL) {
+
+ //
+ // Increment the count of processes referencing this clone block.
+ //
+
+ Clone->CloneHeader->NumberOfProcessReferences += 1;
+
+ NewClone = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof( MMCLONE_DESCRIPTOR),
+ ' mM');
+
+ if (NewClone == NULL) {
+
+ //
+ // There are insuffienct resources to continue this operation,
+ // however, to properly clean up at this point, all the
+ // clone headers must be allocated, so when the cloned process
+ // is deleted, the clone headers will be found. Get MustSucceed
+ // pool, but force the operation to fail so the pool will be
+ // soon released.
+ //
+
+ CloneFailed = TRUE;
+ status = STATUS_INSUFFICIENT_RESOURCES;
+ NewClone = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ sizeof( MMCLONE_DESCRIPTOR),
+ ' mM');
+ }
+
+ *NewClone = *Clone;
+
+ *CloneList = NewClone;
+ CloneList = &NewClone->Parent;
+ Clone = MiGetNextClone (Clone);
+ }
+
+ *CloneList = (PMMCLONE_DESCRIPTOR)NULL;
+
+ //
+ // Release the working set mutex and the address creation mutex from
+ // the current process as all the neccessary information is now
+ // captured.
+ //
+
+ UNLOCK_WS (CurrentProcess);
+
+ CurrentProcess->ForkInProgress = NULL;
+
+ UNLOCK_ADDRESS_SPACE (CurrentProcess);
+
+ //
+ // As we have updated many PTEs to clear dirty bits, flush the
+ // tb cache. Note that this was not done every time we changed
+ // a valid PTE so other threads could be modifying the address
+ // space without causing copy on writes. (Too bad).
+ //
+
+
+ //
+ // attach to the process to initialize and insert the vad and clone
+ // descriptors into the tree.
+ //
+
+ if (Attached) {
+ KeDetachProcess ();
+ Attached = FALSE;
+ }
+
+ if (PsGetCurrentProcess() != ProcessToInitialize) {
+ Attached = TRUE;
+ KeAttachProcess (&ProcessToInitialize->Pcb);
+ }
+
+ CurrentProcess = ProcessToInitialize;
+
+ //
+ // We are now in the context of the new process, build the
+ // VAD list and the clone list.
+ //
+
+ Vad = FirstNewVad;
+ VadInsertFailed = FALSE;
+
+ LOCK_WS (CurrentProcess);
+
+ while (Vad != (PMMVAD)NULL) {
+
+ NextVad = Vad->Parent;
+
+ try {
+
+
+ if (VadInsertFailed) {
+ Vad->u.VadFlags.CommitCharge = MM_MAX_COMMIT;
+ }
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Charging quota for the VAD failed, set the
+ // remaining quota fields in this VAD and all
+ // subsequent VADs to zero so the VADs can be
+ // inserted and later deleted.
+ //
+
+ VadInsertFailed = TRUE;
+ status = GetExceptionCode();
+
+ //
+ // Do the loop again for this VAD.
+ //
+
+ continue;
+ }
+
+ //
+ // Update the current virtual size.
+ //
+
+ CurrentProcess->VirtualSize += 1 + (ULONG)Vad->EndingVa -
+ (ULONG)Vad->StartingVa;
+
+ Vad = NextVad;
+ }
+
+ UNLOCK_WS (CurrentProcess);
+ //MmUnlockCode (MiCloneProcessAddressSpace, 5000);
+
+ //
+ // Update the peak virtual size.
+ //
+
+ CurrentProcess->PeakVirtualSize = CurrentProcess->VirtualSize;
+
+ Clone = FirstNewClone;
+ TotalPagedPoolCharge = 0;
+ TotalNonPagedPoolCharge = 0;
+
+ while (Clone != (PMMCLONE_DESCRIPTOR)NULL) {
+
+ NextClone = Clone->Parent;
+ MiInsertClone (Clone);
+
+ //
+ // Calculate the page pool and non-paged pool to charge for these
+ // operations.
+ //
+
+ TotalPagedPoolCharge += Clone->PagedPoolQuotaCharge;
+ TotalNonPagedPoolCharge += sizeof(MMCLONE_HEADER);
+
+ Clone = NextClone;
+ }
+
+ if (CloneFailed || VadInsertFailed) {
+
+ if (Attached) {
+ KeDetachProcess ();
+ }
+ KdPrint(("MMFORK: vad insert failed\n"));
+
+ return status;
+ }
+
+ try {
+
+ PageTablePage = 1;
+ PsChargePoolQuota (CurrentProcess, PagedPool, TotalPagedPoolCharge);
+ PageTablePage = 0;
+ PsChargePoolQuota (CurrentProcess, NonPagedPool, TotalNonPagedPoolCharge);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (PageTablePage == 0) {
+ PsReturnPoolQuota (CurrentProcess, PagedPool, TotalPagedPoolCharge);
+ }
+ KdPrint(("MMFORK: pool quota failed\n"));
+
+ if (Attached) {
+ KeDetachProcess ();
+ }
+ return GetExceptionCode();
+ }
+
+ CurrentProcess->ForkWasSuccessful = TRUE;
+ if (Attached) {
+ KeDetachProcess ();
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_FORK) {
+ DbgPrint("ending clone operation process to clone = %lx\n",
+ ProcessToClone);
+ }
+#endif //DBG
+
+ return STATUS_SUCCESS;
+
+ //
+ // Error returns.
+ //
+
+ErrorReturn4:
+ ExFreePool (CloneDescriptor);
+ErrorReturn3:
+ ExFreePool (CloneHeader);
+ErrorReturn2:
+ ExFreePool (CloneProtos);
+ErrorReturn1:
+ UNLOCK_ADDRESS_SPACE (CurrentProcess);
+ if (Attached) {
+ KeDetachProcess ();
+ }
+ return status;
+}
+
+ULONG
+MiDecrementCloneBlockReference (
+ IN PMMCLONE_DESCRIPTOR CloneDescriptor,
+ IN PMMCLONE_BLOCK CloneBlock,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the reference count field of a "fork prototype
+ PTE" (clone-block). If the reference count becomes zero, the reference
+ count for the clone-descriptor is decremented and if that becomes zero,
+ it is deallocated and the number of process count for the clone header is
+ decremented. If the number of process count becomes zero, the clone
+ header is deallocated.
+
+Arguments:
+
+ CloneDescriptor - Supplies the clone descriptor which describes the
+ clone block.
+
+ CloneBlock - Supplies the clone block to decrement the reference count of.
+
+ CurrentProcess - Supplies the current process.
+
+Return Value:
+
+ TRUE if the working set mutex was released, FALSE if it was not.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set mutex and PFN mutex held.
+
+--*/
+
+{
+
+ ULONG MutexReleased = FALSE;
+ MMPTE CloneContents;
+ PMMPFN Pfn3;
+ KIRQL OldIrql;
+ PMMCLONE_BLOCK OldCloneBlock;
+ LONG NewCount;
+
+ OldIrql = APC_LEVEL;
+
+ MutexReleased = MiMakeSystemAddressValidPfnWs (CloneBlock, CurrentProcess);
+
+ while (CurrentProcess->ForkInProgress) {
+ MiWaitForForkToComplete (CurrentProcess);
+ MiMakeSystemAddressValidPfnWs (CloneBlock, CurrentProcess);
+ MutexReleased = TRUE;
+ }
+
+ CloneBlock->CloneRefCount -= 1;
+ NewCount = CloneBlock->CloneRefCount;
+
+ ASSERT (NewCount >= 0);
+
+ if (NewCount == 0) {
+ CloneContents = CloneBlock->ProtoPte;
+ } else {
+ CloneContents = ZeroPte;
+ }
+
+ if ((NewCount == 0) && (CloneContents.u.Long != 0)) {
+
+ //
+ // The last reference to a fork prototype PTE
+ // has been removed. Deallocate any page file
+ // space and the transition page, if any.
+ //
+
+
+ //
+ // Assert that the page is no longer valid.
+ //
+
+ ASSERT (CloneContents.u.Hard.Valid == 0);
+
+ //
+ // Assert that the PTE is not in subsection format (doesn't point
+ // to a file).
+ //
+
+ ASSERT (CloneContents.u.Soft.Prototype == 0);
+
+ if (CloneContents.u.Soft.Transition == 1) {
+
+ //
+ // Prototype PTE in transition, put the page
+ // on the free list.
+ //
+
+ Pfn3 = MI_PFN_ELEMENT (CloneContents.u.Trans.PageFrameNumber);
+ MI_SET_PFN_DELETED (Pfn3);
+
+ MiDecrementShareCount (Pfn3->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero and the page is not on the freelist,
+ // move the page to the free list, if the reference
+ // count is not zero, ignore this page.
+ // When the refernce count goes to zero, it will be placed on the
+ // free list.
+ //
+
+ if ((Pfn3->u3.e2.ReferenceCount == 0) &&
+ (Pfn3->u3.e1.PageLocation != FreePageList)) {
+
+ MiUnlinkPageFromList (Pfn3);
+ MiReleasePageFileSpace (Pfn3->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ CloneContents.u.Trans.PageFrameNumber);
+ }
+ } else {
+
+ if (IS_PTE_NOT_DEMAND_ZERO (CloneContents)) {
+ MiReleasePageFileSpace (CloneContents);
+ }
+ }
+ }
+
+ //
+ // Decrement the number of references to the
+ // clone descriptor.
+ //
+
+ CloneDescriptor->NumberOfReferences -= 1;
+
+ if (CloneDescriptor->NumberOfReferences == 0) {
+
+ //
+ // There are no longer any PTEs in this process which refer
+ // to the fork prototype PTEs for this clone descriptor.
+ // Remove the CloneDescriptor and decrement the CloneHeader
+ // number of process's reference count.
+ //
+
+ CloneDescriptor->CloneHeader->NumberOfProcessReferences -= 1;
+
+ if (CloneDescriptor->CloneHeader->NumberOfProcessReferences == 0) {
+
+ //
+ // There are no more processes pointing to this fork header
+ // blow it away.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_WS (CurrentProcess);
+ MutexReleased = TRUE;
+
+ OldCloneBlock = CloneDescriptor->CloneHeader->ClonePtes;
+
+#if DBG
+ {
+ ULONG i;
+ for (i = 0; i < CloneDescriptor->CloneHeader->NumberOfPtes; i++) {
+ if (OldCloneBlock->CloneRefCount != 0) {
+ DbgPrint("fork block with non zero ref count %lx %lx %lx\n",
+ OldCloneBlock, CloneDescriptor,
+ CloneDescriptor->CloneHeader);
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+ }
+
+ if (MmDebug & MM_DBG_FORK) {
+ DbgPrint("removing clone header at address %lx\n",
+ CloneDescriptor->CloneHeader);
+ }
+ }
+#endif //DBG
+
+ ExFreePool (CloneDescriptor->CloneHeader->ClonePtes);
+
+ ExFreePool (CloneDescriptor->CloneHeader);
+
+ LOCK_WS (CurrentProcess);
+ LOCK_PFN (OldIrql);
+
+ }
+
+ MiRemoveClone (CloneDescriptor);
+
+#if DBG
+ if (MmDebug & MM_DBG_FORK) {
+ DbgPrint("removing clone descriptor at address %lx\n",CloneDescriptor);
+ }
+#endif //DBG
+
+ //
+ // Return the pool for the global structures referenced by the
+ // clone descriptor.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ if (CurrentProcess->ForkWasSuccessful != FALSE) {
+
+ PsReturnPoolQuota (CurrentProcess,
+ PagedPool,
+ CloneDescriptor->PagedPoolQuotaCharge);
+
+ PsReturnPoolQuota (CurrentProcess,
+ NonPagedPool,
+ sizeof(MMCLONE_HEADER));
+ }
+
+ ExFreePool (CloneDescriptor);
+ LOCK_PFN (OldIrql);
+ }
+
+ return MutexReleased;
+}
+
+VOID
+MiWaitForForkToComplete (
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine waits for the current process to complete a fork
+ operation.
+
+Arguments:
+
+ CurrentProcess - Supplies the current process value.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set mutex and PFN mutex held.
+
+--*/
+
+{
+ KIRQL OldIrql = APC_LEVEL;
+
+ //
+ // A fork operation is in progress and the count of clone-blocks
+ // and other structures may not be changed. Release the mutexes
+ // and wait for the address creation mutex which governs the
+ // fork operation.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_WS (CurrentProcess);
+
+ LOCK_WS_AND_ADDRESS_SPACE (CurrentProcess);
+
+ //
+ // Release the address creation mutex, the working set mutex
+ // must be held to set the ForkInProgress field.
+ //
+
+ UNLOCK_ADDRESS_SPACE (CurrentProcess);
+
+ //
+ // Get the PFN mutex again.
+ //
+
+ LOCK_PFN (OldIrql);
+ return;
+}
+#if DBG
+VOID
+CloneTreeWalk (
+ PMMCLONE_DESCRIPTOR Start
+ )
+
+{
+ Start;
+ NodeTreeWalk ( (PMMADDRESS_NODE)(PsGetCurrentProcess()->CloneRoot));
+ return;
+}
+#endif //DBG
+
+VOID
+MiUpPfnReferenceCount (
+ IN ULONG Page,
+ IN USHORT Count
+ )
+
+ // non paged helper routine.
+
+{
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+
+ Pfn1 = MI_PFN_ELEMENT (Page);
+ LOCK_PFN (OldIrql);
+ Pfn1->u3.e2.ReferenceCount += Count;
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MiDownPfnReferenceCount (
+ IN ULONG Page
+ )
+
+ // non paged helper routine.
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+ MiDecrementReferenceCount (Page);
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MiUpControlAreaRefs (
+ IN PCONTROL_AREA ControlArea
+ )
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfUserReferences += 1;
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+
+ULONG
+MiDoneWithThisPageGetAnother (
+ IN PULONG PageFrameIndex,
+ IN PMMPTE PointerPde,
+ IN PEPROCESS CurrentProcess
+ )
+
+{
+ KIRQL OldIrql;
+ ULONG ReleasedMutex;
+
+ LOCK_PFN (OldIrql);
+
+ if (*PageFrameIndex != 0xFFFFFFFF) {
+
+ //
+ // Decrement the share count of the last page which
+ // we operated on.
+ //
+
+ MiDecrementShareCountOnly (*PageFrameIndex);
+ }
+
+ ReleasedMutex =
+ MiEnsureAvailablePageOrWait (
+ CurrentProcess,
+ NULL);
+
+ *PageFrameIndex = MiRemoveZeroPage (
+ MI_PAGE_COLOR_PTE_PROCESS (PointerPde,
+ &CurrentProcess->NextPageColor));
+ UNLOCK_PFN (OldIrql);
+ return ReleasedMutex;
+}
+
+VOID
+MiUpCloneProtoRefCount (
+ IN PMMCLONE_BLOCK CloneProto,
+ IN PEPROCESS CurrentProcess
+ )
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ MiMakeSystemAddressValidPfnWs (CloneProto,
+ CurrentProcess );
+
+ CloneProto->CloneRefCount += 1;
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+ULONG
+MiHandleForkTransitionPte (
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerNewPte,
+ IN PMMCLONE_BLOCK ForkProtoPte
+ )
+
+{
+ KIRQL OldIrql;
+ PMMPFN Pfn2;
+ MMPTE PteContents;
+ PMMPTE ContainingPte;
+ ULONG PageTablePage;
+ MMPTE TempPte;
+ PMMPFN PfnForkPtePage;
+
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Now that we have the PFN mutex which prevents pages from
+ // leaving the transition state, examine the PTE again to
+ // ensure that it is still transtion.
+ //
+
+ PteContents = *(volatile PMMPTE)PointerPte;
+
+ if ((PteContents.u.Soft.Transition == 0) ||
+ (PteContents.u.Soft.Prototype == 1)) {
+
+ //
+ // The PTE is no longer in transition... do this
+ // loop again.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+
+ } else {
+
+ //
+ // The PTE is still in transition, handle like a
+ // valid PTE.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ //
+ // Assertion that PTE is ont in prototype PTE format.
+ //
+
+ ASSERT (Pfn2->u3.e1.PrototypePte != 1);
+
+ //
+ // This is a private page in transition state,
+ // create a fork prototype PTE
+ // which becomes the "prototype" PTE for this page.
+ //
+
+ ForkProtoPte->ProtoPte = PteContents;
+
+ //
+ // Make the protection write-copy if writable.
+ //
+
+ MI_MAKE_PROTECT_WRITE_COPY (ForkProtoPte->ProtoPte);
+
+ ForkProtoPte->CloneRefCount = 2;
+
+ //
+ // Transform the PFN element to reference this new fork
+ // prototype PTE.
+ //
+
+ //
+ // Decrement the share count for the page table
+ // page which contains the PTE as it is no longer
+ // valid or in transition.
+ //
+ Pfn2->PteAddress = &ForkProtoPte->ProtoPte;
+ Pfn2->u3.e1.PrototypePte = 1;
+
+ //
+ // Make original PTE copy on write.
+ //
+
+ MI_MAKE_PROTECT_WRITE_COPY (Pfn2->OriginalPte);
+
+ ContainingPte = MiGetPteAddress(&ForkProtoPte->ProtoPte);
+
+ PageTablePage = Pfn2->PteFrame;
+
+ Pfn2->PteFrame =
+ ContainingPte->u.Hard.PageFrameNumber;
+
+ //
+ // Increment the share count for the page containing
+ // the fork prototype PTEs as we have just placed
+ // a transition PTE into the page.
+ //
+
+ PfnForkPtePage = MI_PFN_ELEMENT (
+ ContainingPte->u.Hard.PageFrameNumber );
+
+ PfnForkPtePage->u2.ShareCount += 1;
+
+ TempPte.u.Long =
+ MiProtoAddressForPte (Pfn2->PteAddress);
+ TempPte.u.Proto.Prototype = 1;
+ *PointerPte = TempPte;
+ *PointerNewPte = TempPte;
+
+ //
+ // Decrement the share count for the page table
+ // page which contains the PTE as it is no longer
+ // valid or in transition.
+ //
+
+ MiDecrementShareCount (PageTablePage);
+ }
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+}
+
+VOID
+MiDownShareCountFlushEntireTb (
+ IN ULONG PageFrameIndex
+ )
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ if (PageFrameIndex != 0xFFFFFFFF) {
+
+ //
+ // Decrement the share count of the last page which
+ // we operated on.
+ //
+
+ MiDecrementShareCountOnly (PageFrameIndex);
+ }
+
+ KeFlushEntireTb (FALSE, FALSE);
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MiUpForkPageShareCount(
+ IN PMMPFN PfnForkPtePage
+ )
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+ PfnForkPtePage->u2.ShareCount += 1;
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
diff --git a/private/ntos/mm/freevm.c b/private/ntos/mm/freevm.c
new file mode 100644
index 000000000..d935a79ae
--- /dev/null
+++ b/private/ntos/mm/freevm.c
@@ -0,0 +1,1363 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ freevm.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtFreeVirtualMemory service.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#define MEM_CHECK_COMMIT_STATE 0x400000
+
+#define MM_VALID_PTE_SIZE (256)
+
+
+MMPTE MmDecommittedPte = {MM_DECOMMIT << MM_PROTECT_FIELD_SHIFT};
+
+#if DBG
+extern PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+// #include "ntos.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtFreeVirtualMemory)
+#pragma alloc_text(PAGE,MiIsEntireRangeCommitted)
+#endif
+
+VOID
+MiProcessValidPteList (
+ IN PMMPTE *PteList,
+ IN ULONG Count
+ );
+
+ULONG
+MiDecommitPages (
+ IN PVOID StartingAddress,
+ IN PMMPTE EndingPte,
+ IN PEPROCESS Process,
+ IN PMMVAD_SHORT Vad
+ );
+
+VOID
+MiDeleteFreeVm (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress
+ );
+
+
+NTSTATUS
+NtFreeVirtualMemory(
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ IN ULONG FreeType
+ )
+
+/*++
+
+Routine Description:
+
+ This function deletes a region of pages within the virtual address
+ space of a subject process.
+
+Arguments:
+
+ ProcessHandle - An open handle to a process object.
+
+ BaseAddress - The base address of the region of pages
+ to be freed. This value is rounded down to the
+ next host page address boundary.
+
+ RegionSize - A pointer to a variable that will receive
+ the actual size in bytes of the freed region of
+ pages. The initial value of this argument is
+ rounded up to the next host page size boundary.
+
+ FreeType - A set of flags that describe the type of
+ free that is to be performed for the specified
+ region of pages.
+
+
+ FreeType Flags
+
+
+ MEM_DECOMMIT - The specified region of pages is to
+ be decommitted.
+
+ MEM_RELEASE - The specified region of pages is to
+ be released.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PMMVAD_SHORT Vad;
+ PMMVAD_SHORT NewVad;
+ PMMVAD PreviousVad;
+ PMMVAD NextVad;
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ NTSTATUS Status;
+ ULONG Attached = FALSE;
+ ULONG CapturedRegionSize;
+ PVOID CapturedBase;
+ PMMPTE StartingPte;
+ PMMPTE EndingPte;
+ ULONG OldQuota;
+ ULONG QuotaCharge;
+ ULONG CommitReduction;
+ PVOID OldEnd;
+
+ PAGED_CODE();
+
+ //
+ // Check to make sure FreeType is good.
+ //
+
+ if ((FreeType & ~(MEM_DECOMMIT | MEM_RELEASE)) != 0) {
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // One of MEM_DECOMMIT or MEM_RELEASE must be specified, but not both.
+ //
+
+ if (((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) == 0) ||
+ ((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) ==
+ (MEM_DECOMMIT | MEM_RELEASE))) {
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ //
+ // Establish an exception handler, probe the specified addresses
+ // for write access and capture the initial values.
+ //
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ }
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess ) {
+ DbgPrint("freevm processhandle %lx base %lx size %lx type %lx\n",
+ ProcessHandle, CapturedBase, CapturedRegionSize, FreeType);
+ }
+ }
+#endif
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER_2;
+ }
+
+ if ((ULONG)MM_HIGHEST_USER_ADDRESS - (ULONG)CapturedBase <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER_3;
+
+ }
+
+ EndingAddress = (PVOID)(((ULONG)CapturedBase + CapturedRegionSize - 1) |
+ (PAGE_SIZE - 1));
+
+ StartingAddress = (PVOID)PAGE_ALIGN(CapturedBase);
+
+ if ( ProcessHandle == NtCurrentProcess() ) {
+ Process = PsGetCurrentProcess();
+ } else {
+ //
+ // Reference the specified process handle for VM_OPERATION access.
+ //
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+ }
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ if (PsGetCurrentProcess() != Process) {
+ KeAttachProcess (&Process->Pcb);
+ Attached = TRUE;
+ }
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked. Block APCs to prevent page faults while
+ // we own the working set mutex.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ Vad = (PMMVAD_SHORT)MiLocateAddress (StartingAddress);
+
+ if (Vad == NULL) {
+
+ //
+ // No Virtual Address Descriptor located for Base Address.
+ //
+
+ Status = STATUS_MEMORY_NOT_ALLOCATED;
+ goto ErrorReturn;
+ }
+
+ //
+ // Found the associated Virtual Address Descriptor.
+ //
+
+ if (Vad->EndingVa < EndingAddress) {
+
+ //
+ // The entire range to delete is not contained within a single
+ // virtual address descriptor. Return an error.
+ //
+
+ Status = STATUS_UNABLE_TO_FREE_VM;
+ goto ErrorReturn;
+ }
+
+ //
+ // Check to ensure this Vad is deletable. Delete is required
+ // for both decommit and release.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) ||
+ (Vad->u.VadFlags.PhysicalMapping == 1)) {
+ Status = STATUS_UNABLE_TO_DELETE_SECTION;
+ goto ErrorReturn;
+ }
+
+ if (Vad->u.VadFlags.NoChange == 1) {
+
+ //
+ // An attempt is being made to delete a secured VAD, check
+ // to see if this deletion is allowed.
+ //
+
+ if (FreeType & MEM_RELEASE) {
+
+ //
+ // Specifiy the whole range, this solves the problem with
+ // splitting the VAD and trying to decide where the various
+ // secure ranges need to go.
+ //
+
+ Status = MiCheckSecuredVad ((PMMVAD)Vad,
+ Vad->StartingVa,
+ (PCHAR)Vad->EndingVa - (PCHAR)Vad->StartingVa,
+ MM_SECURE_DELETE_CHECK);
+
+ } else {
+ Status = MiCheckSecuredVad ((PMMVAD)Vad,
+ CapturedBase,
+ CapturedRegionSize,
+ MM_SECURE_DELETE_CHECK);
+ }
+ if (!NT_SUCCESS (Status)) {
+ goto ErrorReturn;
+ }
+ }
+
+ PreviousVad = MiGetPreviousVad (Vad);
+ NextVad = MiGetNextVad (Vad);
+ if (FreeType & MEM_RELEASE) {
+
+ //
+ // *****************************************************************
+ // MEM_RELEASE was specified.
+ // *****************************************************************
+ //
+
+ //
+ // The descriptor for the address range is deletable. Remove or split
+ // the descriptor.
+ //
+
+ //
+ // If the region size is zero, remove the whole VAD.
+ //
+
+ if (CapturedRegionSize == 0) {
+
+ //
+ // If the region size is specified as 0, the base address
+ // must be the starting address for the region.
+ //
+
+ if (CapturedBase != Vad->StartingVa) {
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto ErrorReturn;
+ }
+
+ //
+ // This Virtual Address Descriptor has been deleted.
+ //
+
+ StartingAddress = Vad->StartingVa;
+ EndingAddress = Vad->EndingVa;
+ MiRemoveVad ((PMMVAD)Vad);
+ ExFreePool (Vad);
+
+ } else {
+
+ //
+ // Regions size was not specified as zero, delete the
+ // whole VAD or split the VAD.
+ //
+
+ if (StartingAddress == Vad->StartingVa) {
+ if (EndingAddress == Vad->EndingVa) {
+
+ //
+ // This Virtual Address Descriptor has been deleted.
+ //
+
+ MiRemoveVad ((PMMVAD)Vad);
+ ExFreePool (Vad);
+
+ } else {
+
+ //
+ // This Virtual Address Descriptor has a new starting
+ // address.
+ //
+
+ CommitReduction = MiCalculatePageCommitment (
+ StartingAddress,
+ EndingAddress,
+ (PMMVAD)Vad,
+ Process );
+
+ Vad->StartingVa = (PVOID)((ULONG)EndingAddress + 1L);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ ASSERT ((LONG)Vad->u.VadFlags.CommitCharge >= 0);
+ MiReturnPageFileQuota (CommitReduction, Process);
+ MiReturnCommitment (CommitReduction);
+ Process->CommitCharge -= CommitReduction;
+ PreviousVad = (PMMVAD)Vad;
+ }
+
+ } else {
+
+ //
+ // Starting address is greater than start of VAD.
+ //
+
+ if (EndingAddress == Vad->EndingVa) {
+
+ //
+ // Change the ending address of the VAD.
+ //
+
+ CommitReduction = MiCalculatePageCommitment (
+ StartingAddress,
+ EndingAddress,
+ (PMMVAD)Vad,
+ Process );
+
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ MiReturnPageFileQuota (CommitReduction, Process);
+ MiReturnCommitment (CommitReduction);
+ Process->CommitCharge -= CommitReduction;
+
+ Vad->EndingVa = (PVOID)((ULONG)StartingAddress - 1L);
+ PreviousVad = (PMMVAD)Vad;
+
+ } else {
+
+ //
+ // Split this VAD as the address range is within the VAD.
+ //
+
+ //
+ // Allocate an new VAD under an exception handler
+ // as there may not be enough quota.
+ //
+
+ NewVad = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD_SHORT),
+ 'SdaV');
+ if ( NewVad == NULL ) {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn;
+ }
+
+ CommitReduction = MiCalculatePageCommitment (
+ StartingAddress,
+ EndingAddress,
+ (PMMVAD)Vad,
+ Process );
+
+ OldQuota = Vad->u.VadFlags.CommitCharge - CommitReduction;
+ OldEnd = Vad->EndingVa;
+
+ *NewVad = *Vad;
+
+ Vad->EndingVa = (PVOID)((ULONG)StartingAddress - 1L);
+ NewVad->StartingVa = (PVOID)((ULONG)EndingAddress + 1L);
+
+ //
+ // Set the commit charge to zero so MiInsertVad will
+ // not charge committment for splitting the VAD.
+ //
+
+ NewVad->u.VadFlags.CommitCharge = 0;
+
+ try {
+
+ //
+ // Insert the VAD, this could get an exception
+ // on charging quota.
+ //
+
+ MiInsertVad ((PMMVAD)NewVad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Inserting the Vad failed, reset the original
+ // VAD, free new vad and return an error.
+ //
+
+ Vad->EndingVa = OldEnd;
+
+ ExFreePool (NewVad);
+ Status = GetExceptionCode();
+ goto ErrorReturn;
+ }
+
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ MiReturnPageFileQuota (CommitReduction, Process);
+ MiReturnCommitment (CommitReduction);
+ Process->CommitCharge -= CommitReduction;
+
+ //
+ // As we have split the original VAD into 2 seperate VADs
+ // there is know way of knowing what the commit charge
+ // is for each VAD. Calculate the charge and reset
+ // each VAD. Note that we also use the previous value
+ // to make sure the books stay balanced.
+ //
+
+ QuotaCharge = MiCalculatePageCommitment (Vad->StartingVa,
+ Vad->EndingVa,
+ (PMMVAD)Vad,
+ Process );
+
+ Vad->u.VadFlags.CommitCharge = QuotaCharge;
+
+ //
+ // Give the remaining charge to the new VAD.
+ //
+
+ NewVad->u.VadFlags.CommitCharge = OldQuota - QuotaCharge;
+ PreviousVad = (PMMVAD)Vad;
+ NextVad = (PMMVAD)NewVad;
+ }
+ }
+ }
+
+ //
+ // Return commitment for page table pages if possibible.
+ //
+
+ MiReturnPageTablePageCommitment (StartingAddress,
+ EndingAddress,
+ Process,
+ PreviousVad,
+ NextVad);
+
+ //
+ // Get the PFN mutex so the MiDeleteVirtualAddresses can be called.
+ //
+
+ MiDeleteFreeVm (StartingAddress, EndingAddress);
+ UNLOCK_WS (Process);
+
+ CapturedRegionSize = 1 + (ULONG)EndingAddress - (ULONG)StartingAddress;
+
+ //
+ // Update the virtual size in the process header.
+ //
+
+ Process->VirtualSize -= CapturedRegionSize;
+
+ UNLOCK_ADDRESS_SPACE (Process);
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = StartingAddress;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception occurred, don't take any action (just handle
+ // the exception and return success.
+
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( MmWatchProcess ) {
+ if ( MmWatchProcess == PsGetCurrentProcess() ) {
+ DbgPrint("\n--- FREE Type 0x%lx Base %lx Size %lx\n",
+ FreeType, StartingAddress, CapturedRegionSize);
+ MmFooBar();
+ }
+ }
+ }
+#endif
+
+#if DBG
+ if (RtlAreLogging( RTL_EVENT_CLASS_VM )) {
+ RtlLogEvent( MiFreeVmEventId,
+ RTL_EVENT_CLASS_VM,
+ StartingAddress,
+ CapturedRegionSize,
+ FreeType
+ );
+
+ }
+#endif // DBG
+
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // **************************************************************
+ //
+ // MEM_DECOMMIT was specified.
+ //
+ // **************************************************************
+ //
+
+ //
+ // Check to ensure the complete range of pages is already committed.
+ //
+
+ if (CapturedRegionSize == 0) {
+
+ if (CapturedBase != Vad->StartingVa) {
+ Status = STATUS_FREE_VM_NOT_AT_BASE;
+ goto ErrorReturn;
+ }
+ EndingAddress = Vad->EndingVa;
+ }
+
+#if 0
+ if (FreeType & MEM_CHECK_COMMIT_STATE) {
+ if ( !MiIsEntireRangeCommitted(StartingAddress,
+ EndingAddress,
+ Vad,
+ Process)) {
+
+ //
+ // The entire range to be decommited is not committed,
+ // return an errror.
+ //
+
+ Status = STATUS_UNABLE_TO_DECOMMIT_VM;
+ goto ErrorReturn;
+ }
+ }
+#endif //0
+
+ //
+ // The address range is entirely committed, decommit it now.
+ //
+
+ //
+ // Calculate the initial quotas and commit charges for this VAD.
+ //
+
+ StartingPte = MiGetPteAddress (StartingAddress);
+ EndingPte = MiGetPteAddress (EndingAddress);
+
+ CommitReduction = 1 + EndingPte - StartingPte;
+
+ //
+ // Check to see if the entire range can be decommitted by
+ // just updating the virtual address descriptor.
+ //
+
+ CommitReduction -= MiDecommitPages (StartingAddress,
+ EndingPte,
+ Process,
+ Vad);
+
+ //
+ // Adjust the quota charges.
+ //
+
+ ASSERT ((LONG)CommitReduction >= 0);
+ MiReturnPageFileQuota (CommitReduction, Process);
+ MiReturnCommitment (CommitReduction);
+ Vad->u.VadFlags.CommitCharge -= CommitReduction;
+ Process->CommitCharge -= CommitReduction;
+ ASSERT ((LONG)Vad->u.VadFlags.CommitCharge >= 0);
+
+ UNLOCK_WS (Process);
+
+ UNLOCK_ADDRESS_SPACE (Process);
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = 1 + (ULONG)EndingAddress - (ULONG)StartingAddress;
+ *BaseAddress = StartingAddress;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+
+#if DBG
+ if (RtlAreLogging( RTL_EVENT_CLASS_VM )) {
+ RtlLogEvent( MiFreeVmEventId,
+ RTL_EVENT_CLASS_VM,
+ StartingAddress,
+ 1 + (ULONG)EndingAddress - (ULONG)StartingAddress,
+ FreeType
+ );
+
+ }
+#endif // DBG
+
+ return STATUS_SUCCESS;
+
+ErrorReturn:
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (Process);
+ }
+ return Status;
+}
+
+ULONG
+MiIsEntireRangeCommitted (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the range of pages from the starting address
+ up to and including the ending address and returns TRUE if every
+ page in the range is committed, FALSE otherwise.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ EndingAddress - Supplies the ending address of the range.
+
+ Vad - Supplies the virtual address descriptor which describes the range.
+
+ Process - Supplies the current process.
+
+Return Value:
+
+ TRUE if the entire range is committed.
+ FALSE if any page within the range is not committed.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ ULONG FirstTime = TRUE;
+ PVOID Va;
+
+ PAGED_CODE();
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ //
+ // Set the Va to the starting address + 8, this solves problems
+ // associated with address 0 (NULL) being used as a valid virtual
+ // address and NULL in the VAD commitment field indicating no pages
+ // are committed.
+ //
+
+ Va = (PVOID)((PCHAR)StartingAddress + 8);
+
+ while (PointerPte <= LastPte) {
+
+ if ((((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) ||
+ (FirstTime)) {
+
+ //
+ // This is a PDE boundary, check to see if the entire
+ // PDE page exists.
+ //
+
+ FirstTime = FALSE;
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, check the VAD
+ // to see if the pages are committed.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ if (PointerPte > LastPte) {
+
+ //
+ // Make sure the entire range is committed.
+ //
+
+ if (Vad->u.VadFlags.MemCommit == 0) {
+
+ //
+ // The entire range to be decommited is not committed,
+ // return an errror.
+ //
+
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+ }
+
+ //
+ // Make sure the range thus far is committed.
+ //
+
+ if (Vad->u.VadFlags.MemCommit == 0) {
+
+ //
+ // The entire range to be decommited is not committed,
+ // return an errror.
+ //
+
+ return FALSE;
+ }
+ }
+ }
+
+ //
+ // The page table page exists, check each PTE for commitment.
+ //
+
+ if (PointerPte->u.Long == 0) {
+
+ //
+ // This page has not been committed, check the VAD.
+ //
+
+ if (Vad->u.VadFlags.MemCommit == 0) {
+
+ //
+ // The entire range to be decommited is not committed,
+ // return an errror.
+ //
+
+ return FALSE;
+ }
+ } else {
+
+ //
+ // Has this page been explicitly decommited?
+ //
+
+ if (MiIsPteDecommittedPage (PointerPte)) {
+
+ //
+ // This page has been explicitly decommitted, return an error.
+ //
+
+ return FALSE;
+ }
+ }
+ PointerPte += 1;
+ Va = (PVOID)((PCHAR)(Va) + PAGE_SIZE);
+ }
+ return TRUE;
+}
+
+ULONG
+MiDecommitPages (
+ IN PVOID StartingAddress,
+ IN PMMPTE EndingPte,
+ IN PEPROCESS Process,
+ IN PMMVAD_SHORT Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decommits the specficed range of pages.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ EndingPte - Supplies the ending PTE of the range.
+
+ Process - Supplies the current process.
+
+ Vad - Supplies the virtual address descriptor which describes the range.
+
+Return Value:
+
+ Value to reduce commitment by for the VAD.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PVOID Va;
+ ULONG PdeOffset;
+ ULONG CommitReduction = 0;
+ PMMPTE CommitLimitPte;
+ KIRQL OldIrql;
+ PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
+ ULONG count = 0;
+ ULONG WorkingSetIndex;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PVOID SwapVa;
+ ULONG Entry;
+ MMWSLENTRY Locked;
+ MMPTE PteContents;
+
+ if (Vad->u.VadFlags.MemCommit) {
+ CommitLimitPte = MiGetPteAddress (Vad->EndingVa);
+ } else {
+ CommitLimitPte = NULL;
+ }
+
+ //
+ // Decommit each page by setting the PTE to be explicitly
+ // decommitted. The PTEs cannot be deleted all at once as
+ // this would set the PTEs to zero which would auto-evaluate
+ // as committed if referenced by another thread when a page
+ // table page is being in-paged.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ Va = StartingAddress;
+ PdeOffset = MiGetPdeOffset (Va);
+
+ //
+ // Loop through all the PDEs which map this region and ensure that
+ // they exist. If they don't exist create them by touching a
+ // PTE mapped by the PDE.
+ //
+
+ //
+ // Get the PFN mutex so the MiDeletePte can be called.
+ //
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ while (PointerPte <= EndingPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PdeOffset = MiGetPdeOffset (Va);
+ PointerPde = MiGetPdeAddress (Va);
+ if (count != 0) {
+ MiProcessValidPteList (&ValidPteList[0], count);
+ count = 0;
+ }
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+ }
+
+ //
+ // The working set lock is held. No PTEs can go from
+ // invalid to valid or valid to invalid. Transition
+ // PTEs can go from transition to pagefile.
+ //
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Long != 0) {
+
+ if (PointerPte->u.Long == MmDecommittedPte.u.Long) {
+
+ //
+ // This PTE is already decommitted.
+ //
+
+ CommitReduction += 1;
+
+ } else {
+
+ Process->NumberOfPrivatePages -= 1;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Make sure this is not a forked PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if (Pfn1->u3.e1.PrototypePte) {
+
+ LOCK_PFN (OldIrql);
+ MiDeletePte (PointerPte,
+ Va,
+ FALSE,
+ Process,
+ NULL,
+ NULL);
+ UNLOCK_PFN (OldIrql);
+ Process->NumberOfPrivatePages += 1;
+ *PointerPte = MmDecommittedPte;
+ } else {
+
+ //
+ // Pte is valid, process later when PFN lock is held.
+ //
+
+ if (count == MM_VALID_PTE_SIZE) {
+ MiProcessValidPteList (&ValidPteList[0], count);
+ count = 0;
+ }
+ ValidPteList[count] = PointerPte;
+ count += 1;
+
+ //
+ // Remove address from working set list.
+ //
+
+
+ WorkingSetIndex = Pfn1->u1.WsIndex;
+
+ ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
+ Va);
+ //
+ // Check to see if this entry is locked in the working set
+ // or locked in memory.
+ //
+
+ Locked = MmWsle[WorkingSetIndex].u1.e1;
+
+ MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);
+
+ //
+ // Add this entry to the list of free working set entries
+ // and adjust the working set count.
+ //
+
+ MiReleaseWsle (WorkingSetIndex, &Process->Vm);
+
+ if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {
+
+ //
+ // This entry is locked.
+ //
+
+ MmWorkingSetList->FirstDynamic -= 1;
+
+ if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
+
+ SwapVa = MmWsle[MmWorkingSetList->FirstDynamic].u1.VirtualAddress;
+ SwapVa = PAGE_ALIGN (SwapVa);
+ Pfn2 = MI_PFN_ELEMENT (
+ MiGetPteAddress (SwapVa)->u.Hard.PageFrameNumber);
+
+ Entry = MiLocateWsle (SwapVa,
+ MmWorkingSetList,
+ Pfn2->u1.WsIndex);
+
+ MiSwapWslEntries (Entry,
+ WorkingSetIndex,
+ &Process->Vm);
+ }
+ }
+ }
+ } else if (PteContents.u.Soft.Prototype) {
+
+ //
+ // This is a forked PTE, just delete it.
+ //
+
+ LOCK_PFN (OldIrql);
+ MiDeletePte (PointerPte,
+ Va,
+ FALSE,
+ Process,
+ NULL,
+ NULL);
+ UNLOCK_PFN (OldIrql);
+ Process->NumberOfPrivatePages += 1;
+ *PointerPte = MmDecommittedPte;
+
+ } else if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // Transition PTE, get the PFN database lock
+ // and reprocess this one.
+ //
+
+ LOCK_PFN (OldIrql);
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // PTE is still in transition, delete it.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the
+ // reference count is zero, move the page to the
+ // free list, if the reference count is not zero,
+ // ignore this page. When the refernce count
+ // goes to zero, it will be placed on the free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+
+ *PointerPte = MmDecommittedPte;
+
+ } else {
+
+ //
+ // Page MUST be in page file format!
+ //
+
+ ASSERT (PteContents.u.Soft.Valid == 0);
+ ASSERT (PteContents.u.Soft.Prototype == 0);
+ ASSERT (PteContents.u.Soft.PageFileHigh != 0);
+ MiReleasePageFileSpace (PteContents);
+ *PointerPte = MmDecommittedPte;
+ }
+ UNLOCK_PFN (OldIrql);
+ } else {
+
+ //
+ // Must be demand zero or paging file format.
+ //
+
+ if (PteContents.u.Soft.PageFileHigh != 0) {
+ LOCK_PFN (OldIrql);
+ MiReleasePageFileSpace (PteContents);
+ UNLOCK_PFN (OldIrql);
+ } else {
+
+ //
+ // Don't subtract out the private page count for
+ // a demand zero page.
+ //
+
+ Process->NumberOfPrivatePages += 1;
+ }
+
+ *PointerPte = MmDecommittedPte;
+ }
+ }
+
+ } else {
+
+ //
+ // The PTE is already zero.
+ //
+
+ //
+ // Increment the count of non-zero page table entires for this
+ // page table and the number of private pages for the process.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries[PdeOffset] += 1;
+
+ if (PointerPte > CommitLimitPte) {
+
+ //
+ // Pte is not committed.
+ //
+
+ CommitReduction += 1;
+ }
+ *PointerPte = MmDecommittedPte;
+ }
+
+ PointerPte += 1;
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ }
+ if (count != 0) {
+ MiProcessValidPteList (&ValidPteList[0], count);
+ }
+
+ return CommitReduction;
+}
+
+
+VOID
+MiProcessValidPteList (
+ IN PMMPTE *ValidPteList,
+ IN ULONG Count
+ )
+
+/*++
+
+Routine Description:
+
+ This routine flushes the specified range of valid PTEs.
+
+Arguments:
+
+ ValidPteList - Supplies a pointer to an array of PTEs to flush.
+
+ Count - Supplies the count of the number of elements in the array.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ ULONG i = 0;
+ MMPTE_FLUSH_LIST PteFlushList;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ PteFlushList.Count = Count;
+
+ LOCK_PFN (OldIrql);
+
+ do {
+ PteContents = *ValidPteList[i];
+ ASSERT (PteContents.u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ //
+ // Decrement the share count for the physical page. As the page
+ // is private it will be put on the free list.
+ //
+
+ MiDecrementShareCountOnly (PteContents.u.Hard.PageFrameNumber);
+
+ if (Count < MM_MAXIMUM_FLUSH_COUNT) {
+ PteFlushList.FlushPte[i] = ValidPteList[i];
+ PteFlushList.FlushVa[i] =
+ MiGetVirtualAddressMappedByPte (ValidPteList[i]);
+ }
+ *ValidPteList[i] = MmDecommittedPte;
+ i += 1;
+ } while (i != Count);
+
+ MiFlushPteList (&PteFlushList, FALSE, MmDecommittedPte);
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+
+VOID
+MiDeleteFreeVm (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress
+ )
+
+/*++
+
+Routine Description:
+
+ Nonpagable routine to call acquire PFN lock and call
+ MiDeleteVirtualAddresses.
+
+Arguments:
+
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Delete the address range.
+ //
+
+ MiDeleteVirtualAddresses (StartingAddress,
+ EndingAddress,
+ FALSE,
+ (PMMVAD)NULL);
+
+ UNLOCK_PFN (OldIrql);
+
+}
diff --git a/private/ntos/mm/i386/data386.c b/private/ntos/mm/i386/data386.c
new file mode 100644
index 000000000..8f7c57175
--- /dev/null
+++ b/private/ntos/mm/i386/data386.c
@@ -0,0 +1,147 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ data386.c
+
+Abstract:
+
+ This module contains the private hardware specific global storage for
+ the memory management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 22-Jan-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+//
+// A zero Pte.
+//
+
+MMPTE ZeroPte = { 0 };
+
+
+//
+// A kernel zero PTE.
+//
+
+MMPTE ZeroKernelPte = {0x0};
+
+ULONG MmPteGlobal = 0; // Set to one later if processor supports Global Page
+
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_ACCESS_MASK };
+// NOTE - MM_PTE_GLOBAL_MASK or'ed in later if processor supports Global Page
+
+
+MMPTE ValidUserPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_OWNER_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_ACCESS_MASK };
+
+
+MMPTE ValidPtePte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_ACCESS_MASK };
+
+
+MMPTE ValidPdePde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_ACCESS_MASK };
+
+
+MMPTE ValidKernelPde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_ACCESS_MASK };
+
+// NOTE - MM_PTE_GLOBAL_MASK or'ed in later if processor supports Global Page
+
+
+MMPTE DemandZeroPde = { MM_READWRITE << 5 };
+
+
+MMPTE DemandZeroPte = { MM_READWRITE << 5 };
+
+
+MMPTE TransitionPde = { MM_PTE_WRITE_MASK |
+ MM_PTE_OWNER_MASK |
+ MM_PTE_TRANSITION_MASK |
+ MM_READWRITE << 5 };
+
+
+MMPTE PrototypePte = { 0xFFFFF000 |
+ MM_PTE_PROTOTYPE_MASK |
+ MM_READWRITE << 5 };
+
+
+//
+// PTE which generates an access violation when referenced.
+//
+
+MMPTE NoAccessPte = {MM_NOACCESS << 5};
+
+//
+// Pool start and end.
+//
+
+PVOID MmNonPagedPoolStart;
+
+PVOID MmNonPagedPoolEnd = (PVOID)MM_NONPAGED_POOL_END;
+
+PVOID MmPagedPoolStart = (PVOID)MM_PAGED_POOL_START;
+
+PVOID MmPagedPoolEnd;
+
+ULONG MmKseg2Frame;
+
+//
+// Color tables for free and zeroed pages.
+//
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+//
+// Color tables for modified pages destined for the paging file.
+//
+
+MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST};
+
+
+ULONG MmSecondaryColorMask;
+
+//
+// Count of the number of modified pages destined for the paging file.
+//
+
+ULONG MmTotalPagesForPagingFile = 0;
+
+//
+// Pte reserved for mapping pages for the debugger.
+//
+
+PMMPTE MmDebugPte = (MiGetPteAddress(MM_DEBUG_VA));
+
+//
+// 16 PTEs reserved for mapping MDLs (64k max).
+//
+
+PMMPTE MmCrashDumpPte = (MiGetPteAddress(MM_CRASH_DUMP_VA));
diff --git a/private/ntos/mm/i386/debugsup.c b/private/ntos/mm/i386/debugsup.c
new file mode 100644
index 000000000..22d2abce2
--- /dev/null
+++ b/private/ntos/mm/i386/debugsup.c
@@ -0,0 +1,163 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ debugsup.c
+
+Abstract:
+
+ This module contains routines which provide support for the
+ kernel debugger.
+
+Author:
+
+ Lou Perazzoli (loup) 02-Aug-90
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PVOID
+MmDbgReadCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ i386/486 implementation specific:
+
+ This routine checks the specified virtual address and if it is
+ valid and readable, it returns that virtual address, otherwise
+ it returns NULL.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the virtual address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+
+}
+
+PVOID
+MmDbgWriteCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ i386/486 implementation specific:
+
+ This routine checks the specified virtual address and if it is
+ valid and writeable, it returns that virtual address, otherwise
+ it returns NULL.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or writable, otherwise
+ returns the virtual address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ PointerPte = MiGetPdeAddress (VirtualAddress);
+ if (PointerPte->u.Hard.LargePage == 0) {
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ }
+
+ if ((PointerPte->u.Hard.Write == 0) &&
+ ((PointerPte->u.Long & HARDWARE_PTE_DIRTY_MASK) == 0)) {
+
+ //
+ // PTE is not writable, return NULL.
+ //
+
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgTranslatePhysicalAddress (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ i386/486 implementation specific:
+
+ This routine maps the specified physical address and returns
+ the virtual address which maps the physical address.
+
+ The next call to MmDbgTranslatePhyiscalAddress removes the
+ previous phyiscal address translation, hence on a single
+ physical address can be examined at a time (can't cross page
+ boundaries).
+
+Arguments:
+
+ PhysicalAddress - Supplies the phyiscal address to map and translate.
+
+Return Value:
+
+ The virtual address which corresponds to the phyiscal address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PVOID BaseAddress;
+
+ BaseAddress = MiGetVirtualAddressMappedByPte (MmDebugPte);
+
+ KiFlushSingleTb (TRUE, BaseAddress);
+
+ *MmDebugPte = ValidKernelPte;
+ MmDebugPte->u.Hard.PageFrameNumber = PhysicalAddress.LowPart >> PAGE_SHIFT;
+
+ return (PVOID)((ULONG)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart));
+}
diff --git a/private/ntos/mm/i386/hypermap.c b/private/ntos/mm/i386/hypermap.c
new file mode 100644
index 000000000..75704a53e
--- /dev/null
+++ b/private/ntos/mm/i386/hypermap.c
@@ -0,0 +1,370 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ hypermap.c
+
+Abstract:
+
+ This module contains the routines which map physical pages into
+ reserved PTEs within hyper space.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ IN PKIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and returns the virtual address which maps the page.
+
+ ************************************
+ * *
+ * Returns with a spin lock held!!! *
+ * *
+ ************************************
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+
+Return Value:
+
+ Returns the address where the requested page was mapped.
+
+ RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
+
+ The routine MiUnmapHyperSpaceMap MUST be called to release the lock!!!!
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ ULONG offset;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ LOCK_HYPERSPACE(OldIrql);
+
+ if( PageFrameIndex < MmKseg2Frame){
+ return (PVOID)(MM_KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+
+ PointerPte = MmFirstReservedMappingPte;
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // All the reserved PTEs have been used, make
+ // them all invalid.
+ //
+
+ MI_MAKING_MULTIPLE_PTES_INVALID (FALSE);
+
+ RtlZeroMemory (MmFirstReservedMappingPte,
+ (NUMBER_OF_MAPPING_PTES + 1) * sizeof(MMPTE));
+
+ //
+ // Use the page frame number field of the first PTE as an
+ // offset into the available mapping PTEs.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
+
+ //
+ // Flush entire TB only on this processor.
+ //
+
+ KeFlushEntireTb (TRUE, FALSE);
+ }
+
+ //
+ // Get offset to first free PTE.
+ //
+
+ offset = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Change offset for next time through.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = offset - 1;
+
+ //
+ // Point to free entry and make it valid.
+ //
+
+ PointerPte += offset;
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ //
+ // Return the VA that map the page.
+ //
+
+ return MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into the
+ PTE within hyper space reserved explicitly for image page
+ header mapping. By reserving an explicit PTE for mapping
+ the PTE, page faults can occur while the PTE is mapped within
+ hyperspace and no other hyperspace maps will affect this PTE.
+
+ Note that if another thread attempts to map an image at the
+ same time, it will be forced into a wait state until the
+ header is "unmapped".
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte->u.Long != 0) {
+
+ //
+ // If there is no event specified, set one up.
+ //
+
+ if (MmWorkingSetList->WaitingForImageMapping == (PKEVENT)NULL) {
+
+ //
+ // Set the global event into the field and wait for it.
+ //
+
+ MmWorkingSetList->WaitingForImageMapping = &MmImageMappingPteEvent;
+ }
+
+ //
+ // Release the PFN lock and wait on the event in an
+ // atomic operation.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(MmWorkingSetList->WaitingForImageMapping,
+ Executive,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ return (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure unmaps the PTE reserved for mapping the image
+ header, flushes the TB, and, if the WaitingForImageMapping field
+ is not NULL, sets the specified event.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PKEVENT Event;
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ TempPte.u.Long = 0;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the current state of the event field and clear it out.
+ //
+
+ Event = MmWorkingSetList->WaitingForImageMapping;
+
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+
+ ASSERT (PointerPte->u.Long != 0);
+
+ KeFlushSingleTb (IMAGE_MAPPING_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != (PKEVENT)NULL) {
+
+ //
+ // If there was an event specified, set the event.
+ //
+
+ KePulseEvent (Event, 0, FALSE);
+ }
+
+ return;
+}
+
+PVOID
+MiMapPageToZeroInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and returns the virtual address which maps the page.
+
+ NOTE: it maps it into the same location reserved for zeroing operations.
+ This is only to be used by the zeroing page thread.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Must be holding the PFN lock.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PVOID MappedAddress;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif
+
+ MM_PFN_LOCK_ASSERT();
+
+ if (PageFrameIndex < MmKseg2Frame) {
+ return (PVOID)(MM_KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+
+ PointerPte = MiGetPteAddress (ZEROING_PAGE_PTE);
+
+ MappedAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ TempPte.u.Long = 0;
+
+ KeFlushSingleTb (MappedAddress,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ return MappedAddress;
+}
diff --git a/private/ntos/mm/i386/init386.c b/private/ntos/mm/i386/init386.c
new file mode 100644
index 000000000..2de988056
--- /dev/null
+++ b/private/ntos/mm/i386/init386.c
@@ -0,0 +1,1326 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ init386.c
+
+Abstract:
+
+ This module contains the machine dependent initialization for the
+ memory management component. It is specifically tailored to the
+ INTEL 486 machine.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Jan-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiInitMachineDependent)
+#endif
+
+extern ULONG MmAllocatedNonPagedPool;
+
+#define MM_BIOS_START (0xA0000 >> PAGE_SHIFT)
+#define MM_BIOS_END (0xFFFFF >> PAGE_SHIFT)
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs the necessary operations to enable virtual
+ memory. This includes building the page directory page, building
+ page table pages to map the code section, the data section, the'
+ stack section and the trap handler.
+
+ It also initializes the PFN database and populates the free list.
+
+
+Arguments:
+
+ LoaderBlock - Supplies a pointer to the firmware setup loader block.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPFN BasePfn;
+ PMMPFN BottomPfn;
+ PMMPFN TopPfn;
+ BOOLEAN PfnInKseg0 = FALSE;
+ ULONG HighPage;
+ ULONG PagesLeft;
+ ULONG Range;
+ ULONG i, j;
+ ULONG PdePageNumber;
+ ULONG PdePage;
+ ULONG PageFrameIndex;
+ ULONG NextPhysicalPage;
+ ULONG OldFreeDescriptorLowMemCount;
+ ULONG OldFreeDescriptorLowMemBase;
+ ULONG OldFreeDescriptorCount;
+ ULONG OldFreeDescriptorBase;
+ ULONG PfnAllocation;
+ ULONG NumberOfPages;
+ ULONG MaxPool;
+ PEPROCESS CurrentProcess;
+ ULONG DirBase;
+ ULONG MostFreePage = 0;
+ ULONG MostFreeLowMem = 0;
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MMPTE TempPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE Pde;
+ PMMPTE StartPde;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG va;
+ ULONG SavedSize;
+ KIRQL OldIrql;
+ ULONG MapLargePages = 0;
+ PVOID NonPagedPoolStartVirtual;
+ ULONG LargestFreePfnCount = 0;
+ ULONG LargestFreePfnStart;
+
+ if ( InitializationPhase == 1) {
+
+ if ((KeFeatureBits & KF_LARGE_PAGE) &&
+ (MmNumberOfPhysicalPages > ((31*1024*1024) >> PAGE_SHIFT))) {
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Map lower 512MB of physical memory as large pages starting
+ // at address 0x80000000
+ //
+
+ PointerPde = MiGetPdeAddress (MM_KSEG0_BASE);
+ LastPte = MiGetPdeAddress (MM_KSEG2_BASE);
+ TempPte = ValidKernelPde;
+ TempPte.u.Hard.PageFrameNumber = 0;
+ TempPte.u.Hard.LargePage = 1;
+
+ do {
+ if (PointerPde->u.Hard.Valid == 1) {
+ PageFrameIndex = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementReferenceCount (PageFrameIndex);
+ KeFlushSingleTb (MiGetVirtualAddressMappedByPte (PointerPde),
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPde,
+ TempPte.u.Flush);
+ KeFlushEntireTb (TRUE, TRUE); //p6 errata...
+ } else {
+ *PointerPde = TempPte;
+ }
+ TempPte.u.Hard.PageFrameNumber += MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT;
+ PointerPde += 1;
+ } while (PointerPde < LastPte);
+
+ UNLOCK_PFN (OldIrql);
+ MmKseg2Frame = (512*1024*1024) >> PAGE_SHIFT;
+ }
+
+ return;
+ }
+
+ ASSERT (InitializationPhase == 0);
+
+ if (KeFeatureBits & KF_GLOBAL_PAGE) {
+ ValidKernelPte.u.Long |= MM_PTE_GLOBAL_MASK;
+ ValidKernelPde.u.Long |= MM_PTE_GLOBAL_MASK;
+ MmPteGlobal = 1;
+ }
+
+ TempPte = ValidKernelPte;
+
+ PointerPte = MiGetPdeAddress (PDE_BASE);
+
+ PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
+
+ DirBase = PointerPte->u.Hard.PageFrameNumber << PAGE_SHIFT;
+
+ PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = *( (PULONG) &DirBase);
+
+ KeSweepDcache (FALSE);
+
+ //
+ // Unmap low 2Gb of memory.
+ //
+
+ PointerPde = MiGetPdeAddress(0);
+ LastPte = MiGetPdeAddress (MM_HIGHEST_USER_ADDRESS);
+
+ while (PointerPde <= LastPte) {
+ PointerPde->u.Long = 0;
+ PointerPde += 1;
+ }
+
+ //
+ // Get the lower bound of the free physical memory and the
+ // number of physical pages by walking the memory descriptor lists.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) &&
+ (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) {
+
+ MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
+ if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
+ MmLowestPhysicalPage = MemoryDescriptor->BasePage;
+ }
+ if ((MemoryDescriptor->BasePage + MemoryDescriptor->PageCount) >
+ MmHighestPhysicalPage) {
+ MmHighestPhysicalPage =
+ MemoryDescriptor->BasePage + MemoryDescriptor->PageCount -1;
+ }
+
+ //
+ // Locate the largest free block and the largest free block
+ // below 16mb.
+ //
+
+ if ((MemoryDescriptor->MemoryType == LoaderFree) ||
+ (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
+ (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
+ (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
+
+ if (MemoryDescriptor->PageCount > MostFreePage) {
+ MostFreePage = MemoryDescriptor->PageCount;
+ FreeDescriptor = MemoryDescriptor;
+ }
+ if (MemoryDescriptor->BasePage < 0x1000) {
+
+ //
+ // This memory descriptor is below 16mb.
+ //
+
+ if ((MostFreeLowMem < MemoryDescriptor->PageCount) &&
+ (MostFreeLowMem < ((ULONG)0x1000 - MemoryDescriptor->BasePage))) {
+
+ MostFreeLowMem = (ULONG)0x1000 - MemoryDescriptor->BasePage;
+ if (MemoryDescriptor->PageCount < MostFreeLowMem) {
+ MostFreeLowMem = MemoryDescriptor->PageCount;
+ }
+ FreeDescriptorLowMem = MemoryDescriptor;
+ }
+ }
+ }
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+ NextPhysicalPage = FreeDescriptorLowMem->BasePage;
+
+ OldFreeDescriptorLowMemCount = FreeDescriptorLowMem->PageCount;
+ OldFreeDescriptorLowMemBase = FreeDescriptorLowMem->BasePage;
+
+ OldFreeDescriptorCount = FreeDescriptor->PageCount;
+ OldFreeDescriptorBase = FreeDescriptor->BasePage;
+
+ NumberOfPages = FreeDescriptorLowMem->PageCount;
+
+ if (MmNumberOfPhysicalPages < 1100) {
+ KeBugCheckEx (INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MmLowestPhysicalPage,
+ MmHighestPhysicalPage,
+ 0);
+ }
+
+ //
+ // Build non-paged pool using the physical pages following the
+ // data page in which to build the pool from. Non-page pool grows
+ // from the high range of the virtual address space and expands
+ // downward.
+ //
+ // At this time non-paged pool is constructed so virtual addresses
+ // are also physically contiguous.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ (7 * (MmNumberOfPhysicalPages << 3))) {
+
+ //
+ // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = 0;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
+
+ //
+ // Calculate the size of nonpaged pool.
+ // Use the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
+
+ MmSizeOfNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMinAdditionNonPagedPoolPerMb;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
+ MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
+ }
+
+ //
+ // Align to page size boundary.
+ //
+
+ MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
+
+ //
+ // Calculate the maximum size of pool.
+ //
+
+ if (MmMaximumNonPagedPoolInBytes == 0) {
+
+ //
+ // Calculate the size of nonpaged pool. If 4mb of less use
+ // the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
+
+ //
+ // Make sure enough expansion for pfn database exists.
+ //
+
+ MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ MmMaximumNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMaxAdditionNonPagedPoolPerMb;
+ }
+
+ MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
+ (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmMaximumNonPagedPoolInBytes < MaxPool) {
+ MmMaximumNonPagedPoolInBytes = MaxPool;
+ }
+
+ if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
+ MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
+ }
+
+ //
+ // Add in the PFN database size.
+ //
+
+ PfnAllocation = 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
+ (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
+ >> PAGE_SHIFT);
+
+ MmMaximumNonPagedPoolInBytes += PfnAllocation << PAGE_SHIFT;
+
+ MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
+ - MmMaximumNonPagedPoolInBytes);
+
+ MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
+
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ //
+ // Calculate the starting PDE for the system PTE pool which is
+ // right below the nonpaged pool.
+ //
+
+ MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
+ (~PAGE_DIRECTORY_MASK));
+
+ if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
+ MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
+ MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
+ (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
+ ASSERT (MmNumberOfSystemPtes > 1000);
+ }
+
+ StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
+
+ EndPde = MiGetPdeAddress ((PVOID)((PCHAR)MmNonPagedPoolEnd - 1));
+
+ //
+ // Start building nonpaged pool with the largest free chunk of
+ // memory below 16mb.
+ //
+
+ while (StartPde <= EndPde) {
+ ASSERT(StartPde->u.Hard.Valid == 0);
+
+ //
+ // Map in a page directory page.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NumberOfPages -= 1;
+ NextPhysicalPage += 1;
+ *StartPde = TempPte;
+ PointerPte = MiGetVirtualAddressMappedByPte (StartPde);
+ RtlZeroMemory (PointerPte, PAGE_SIZE);
+ StartPde += 1;
+ }
+
+ ASSERT (NumberOfPages > 0);
+
+//fixfix - remove later
+ if ((KeFeatureBits & KF_LARGE_PAGE) &&
+ (MmNumberOfPhysicalPages > ((31*1024*1024) >> PAGE_SHIFT))) {
+
+ //
+ // Map lower 512MB of physical memory as large pages starting
+ // at address 0x80000000
+ //
+
+ PointerPde = MiGetPdeAddress (MM_KSEG0_BASE);
+ LastPte = MiGetPdeAddress (MM_KSEG2_BASE) - 1;
+ if (MmHighestPhysicalPage < MM_PAGES_IN_KSEG0) {
+ LastPte = MiGetPdeAddress (MM_KSEG0_BASE +
+ (MmHighestPhysicalPage << PAGE_SHIFT));
+ }
+ PointerPte = MiGetPteAddress (MM_KSEG0_BASE);
+
+ TempPte = ValidKernelPde;
+ j = 0;
+
+ do {
+ PMMPTE PPte;
+
+ Range = 0;
+ if (PointerPde->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPde = TempPte;
+ Range = 1;
+ }
+ PPte = PointerPte;
+ for (i = 0; i < PTE_PER_PAGE; i++) {
+ if (Range || (PPte->u.Hard.Valid == 0)) {
+ *PPte = ValidKernelPte;
+ PPte->u.Hard.PageFrameNumber = i + j;
+ }
+ PPte += 1;
+ }
+ PointerPde += 1;
+ PointerPte += PTE_PER_PAGE;
+ j += PTE_PER_PAGE;
+ } while (PointerPde <= LastPte);
+ MapLargePages = 1; //fixfix save this line!
+ }
+//end of remove
+
+ PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
+ NonPagedPoolStartVirtual = MmNonPagedPoolStart;
+
+ //
+ // Fill in the PTEs for non-paged pool.
+ //
+
+ SavedSize = MmSizeOfNonPagedPoolInBytes;
+
+ if (MapLargePages) {
+ if (MmSizeOfNonPagedPoolInBytes > (NumberOfPages << (PAGE_SHIFT))) {
+ MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT;
+ }
+
+ NonPagedPoolStartVirtual = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
+ MmSizeOfNonPagedPoolInBytes);
+
+ //
+ // No need to get page table pages for these as we can reference
+ // them via large pages.
+ //
+
+ MmNonPagedPoolStart =
+ (PVOID)(MM_KSEG0_BASE | (NextPhysicalPage << PAGE_SHIFT));
+ NextPhysicalPage += MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
+ NumberOfPages -= MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+
+ MmSubsectionBase = (ULONG)MmNonPagedPoolStart;
+ if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) {
+ MmSubsectionBase = MM_KSEG0_BASE;
+ MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT;
+ }
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
+ (SavedSize - MmSizeOfNonPagedPoolInBytes));
+ } else {
+
+ LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes - 1);
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
+ MmSizeOfNonPagedPoolInBytes);
+ }
+
+ //
+ // Non-paged pages now exist, build the pool structures.
+ //
+
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ MmMaximumNonPagedPoolInBytes -= (SavedSize - MmSizeOfNonPagedPoolInBytes);
+ MiInitializeNonPagedPool (MmNonPagedPoolStart);
+ MmMaximumNonPagedPoolInBytes += (SavedSize - MmSizeOfNonPagedPoolInBytes);
+
+ //
+ // Before Non-paged pool can be used, the PFN database must
+ // be built. This is due to the fact that the start and end of
+ // allocation bits for nonpaged pool are maintained in the
+ // PFN elements for the corresponding pages.
+ //
+
+ //
+ // Calculate the number of pages required from page zero to
+ // the highest page.
+ //
+ // Get secondary color value from registry.
+ //
+
+ MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
+
+ if (MmSecondaryColors == 0) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ } else {
+
+ //
+ // Make sure value is power of two and within limits.
+ //
+
+ if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
+ (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
+ (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ }
+ }
+
+ MmSecondaryColorMask = MmSecondaryColors - 1;
+
+ //
+ // Get the number of secondary colors and add the arrary for tracking
+ // secondary colors to the end of the PFN database.
+ //
+
+ HighPage = FreeDescriptor->BasePage + FreeDescriptor->PageCount;
+ PagesLeft = HighPage - NextPhysicalPage;
+
+ if (MapLargePages &&
+ (PagesLeft >= PfnAllocation) &&
+ (HighPage < MM_PAGES_IN_KSEG0)) {
+
+ //
+ // Allocate the PFN database in kseg0.
+ //
+ // Compute the address of the PFN by allocating the appropriate
+ // number of pages from the end of the free descriptor.
+ //
+
+ PfnInKseg0 = TRUE;
+ MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE |
+ ((HighPage - PfnAllocation) << PAGE_SHIFT));
+
+ RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE);
+ FreeDescriptor->PageCount -= PfnAllocation;
+
+ //
+ // The PFN database was NOT allocated in virtual memory, make sure
+ // the extended nonpaged pool size is not too large.
+ //
+
+ if (MmTotalFreeSystemPtes[NonPagedPoolExpansion] >
+ (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT)) {
+ //
+ // Reserve the expanded pool PTEs so they cannot be used.
+ //
+
+ MiReserveSystemPtes (
+ MmTotalFreeSystemPtes[NonPagedPoolExpansion] -
+ (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT),
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+ }
+ } else {
+
+ //
+ // Calculate the start of the Pfn Database (it starts a physical
+ // page zero, even if the Lowest physical page is not zero).
+ //
+
+
+
+ PointerPte = MiReserveSystemPtes (PfnAllocation,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+
+ MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
+
+ //
+ // Go through the memory descriptors and for each physical page
+ // make the PFN database has a valid PTE to map it. This allows
+ // machines with sparse physical memory to have a minimal PFN
+ // database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) &&
+ (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) {
+
+ PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage));
+
+ LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage +
+ MemoryDescriptor->PageCount))) - 1);
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+ }
+
+ //
+ // Initialize support for colored pages.
+ //
+
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
+ &MmPfnDatabase[MmHighestPhysicalPage + 1];
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ //
+ // Make sure the PTEs are mapped.
+ //
+
+ if (MmFreePagesByColor[0] > (PMMCOLOR_TABLES)MM_KSEG2_BASE) {
+ PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
+
+ LastPte = MiGetPteAddress (
+ (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ for (i = 0; i < MmSecondaryColors; i++) {
+ MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
+ MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
+ }
+#endif
+
+ //
+ // Add nonpaged pool to PFN database if mapped via KSEG0.
+ //
+
+ PointerPde = MiGetPdeAddress (PTE_BASE);
+
+ if (MmNonPagedPoolStart < (PVOID)MM_KSEG2_BASE) {
+ j = MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedPoolStart);
+ Pfn1 = MI_PFN_ELEMENT (j);
+ i = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT;
+ do {
+ PointerPde = MiGetPdeAddress (MM_KSEG0_BASE + (j << PAGE_SHIFT));
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = (PMMPTE)(j << PAGE_SHIFT);
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = 0;
+ j += 1;
+ Pfn1 += 1;
+ i -= 1;
+ } while ( i );
+ }
+
+ //
+ // Go through the page table entries and for any page which is
+ // valid, update the corresponding PFN database element.
+ //
+
+ Pde = MiGetPdeAddress (NULL);
+ va = 0;
+
+ for (i = 0; i < PDE_PER_PAGE; i++) {
+
+ if ((Pde->u.Hard.Valid == 1) && (Pde->u.Hard.LargePage == 0)) {
+
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = 0;
+
+ PointerPte = MiGetPteAddress (va);
+
+ //
+ // Set global bit.
+ //
+
+ Pde->u.Long |= MiDetermineUserGlobalPteMask (PointerPte) &
+ ~MM_PTE_ACCESS_MASK;
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ PointerPte->u.Long |= MiDetermineUserGlobalPteMask (PointerPte) &
+ ~MM_PTE_ACCESS_MASK;
+ Pfn1->u2.ShareCount += 1;
+
+ if ((PointerPte->u.Hard.PageFrameNumber <=
+ MmHighestPhysicalPage) &&
+ (MiGetVirtualAddressMappedByPte(PointerPte) >
+ (PVOID)MM_KSEG2_BASE)) {
+ Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
+
+ if (MmIsAddressValid(Pfn2) &&
+ MmIsAddressValid((PUCHAR)(Pfn2+1)-1)) {
+
+ Pfn2->PteFrame = PdePage;
+ Pfn2->PteAddress = PointerPte;
+ Pfn2->u2.ShareCount += 1;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+ Pfn2->u3.e1.PageColor = 0;
+ }
+ }
+ }
+ va += PAGE_SIZE;
+ PointerPte++;
+ }
+ } else {
+ va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
+ }
+ Pde++;
+ }
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KeFlushCurrentTb();
+ KeLowerIrql (OldIrql);
+
+ //
+ // If page zero is still unused, mark it as in use. This is
+ // temporary as we want to find bugs where a physical page
+ // is specified as zero.
+ //
+
+ Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Make the reference count non-zero and point it into a
+ // page directory.
+ //
+
+ Pde = MiGetPdeAddress (0xb0000000);
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 0xfff0;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = 0;
+ }
+
+ // end of temporary set to physical page zero.
+
+ //
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ if (NextPhysicalPage <= (FreeDescriptorLowMem->PageCount +
+ FreeDescriptorLowMem->BasePage)) {
+
+ //
+ // We haven't used the other descriptor.
+ //
+
+ FreeDescriptorLowMem->PageCount -= NextPhysicalPage -
+ OldFreeDescriptorLowMemBase;
+ FreeDescriptorLowMem->BasePage = NextPhysicalPage;
+
+ } else {
+ FreeDescriptorLowMem->PageCount = 0;
+ FreeDescriptor->PageCount -= NextPhysicalPage - OldFreeDescriptorBase;
+ FreeDescriptor->BasePage = NextPhysicalPage;
+
+ }
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ i = MemoryDescriptor->PageCount;
+ NextPhysicalPage = MemoryDescriptor->BasePage;
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderBad:
+ while (i != 0) {
+ MiInsertPageInList (MmPageLocationList[BadPageList],
+ NextPhysicalPage);
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ if (i > LargestFreePfnCount) {
+ LargestFreePfnCount = i;
+ LargestFreePfnStart = NextPhysicalPage;
+ }
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress =
+ (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFirmwarePermanent:
+ case LoaderSpecialMemory:
+ break;
+
+ default:
+
+ PointerPte = MiGetPteAddress (0x80000000 +
+ (NextPhysicalPage << PAGE_SHIFT));
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ //
+ // Set page as in use.
+ //
+
+ PointerPde = MiGetPdeAddress (0x80000000 +
+ (NextPhysicalPage << PAGE_SHIFT));
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ Pfn1->PteFrame = PdePageNumber;
+ if (!MapLargePages) {
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ }
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = 0;
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ PointerPte += 1;
+ }
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+
+ if (PfnInKseg0 == FALSE) {
+
+ //
+ // Indicate that the PFN database is allocated in NonPaged pool.
+ //
+
+ PointerPte = MiGetPteAddress (&MmPfnDatabase[MmLowestPhysicalPage]);
+ Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ //
+ // Set the end of the allocation.
+ //
+
+ PointerPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPhysicalPage]);
+ Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ } else {
+
+ //
+ // The PFN database is allocated in KSEG0.
+ //
+ // Mark all pfn entries for the pfn pages in use.
+ //
+
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (MmPfnDatabase);
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ do {
+ Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor = 0;
+ Pfn1->u3.e2.ReferenceCount += 1;
+ PageFrameIndex += 1;
+ Pfn1 += 1;
+ PfnAllocation -= 1;
+ } while (PfnAllocation != 0);
+
+ // Scan the PFN database backward for pages that are completely zero.
+ // These pages are unused and can be added to the free list
+ //
+
+ BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
+ do {
+
+ //
+ // Compute the address of the start of the page that is next
+ // lower in memory and scan backwards until that page address
+ // is reached or just crossed.
+ //
+
+ if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1));
+ TopPfn = BottomPfn + 1;
+
+ } else {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE);
+ TopPfn = BottomPfn;
+ }
+
+ while (BottomPfn > BasePfn) {
+ BottomPfn -= 1;
+ }
+
+ //
+ // If the entire range over which the PFN entries span is
+ // completely zero and the PFN entry that maps the page is
+ // not in the range, then add the page to the appropriate
+ // free list.
+ //
+
+ Range = (ULONG)TopPfn - (ULONG)BottomPfn;
+ if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
+
+ //
+ // Set the PTE address to the physical page for virtual
+ // address alignment checking.
+ //
+
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BasePfn);
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
+ ASSERT (Pfn1->PteAddress == (PMMPTE)(PageFrameIndex << PTE_SHIFT));
+ Pfn1->u3.e2.ReferenceCount == 0;
+ PfnAllocation += 1;
+ Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor = 0;
+ MiInsertPageInList(MmPageLocationList[FreePageList],
+ PageFrameIndex);
+ }
+
+ } while (BottomPfn > MmPfnDatabase);
+ }
+
+ //
+ // Indicate that nonpaged pool must succeed is allocated in
+ // nonpaged pool.
+ //
+
+ PointerPte = MiGetPteAddress(MmNonPagedMustSucceed);
+ i = MmSizeOfNonPagedMustSucceed;
+ while ((LONG)i > 0) {
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ i -= PAGE_SIZE;
+ PointerPte += 1;
+ }
+
+ //
+ // Adjust the memory descriptors to indicate that free pool has
+ // been used for nonpaged pool creation.
+ //
+
+ FreeDescriptorLowMem->PageCount = OldFreeDescriptorLowMemCount;
+ FreeDescriptorLowMem->BasePage = OldFreeDescriptorLowMemBase;
+
+ FreeDescriptor->PageCount = OldFreeDescriptorCount;
+ FreeDescriptor->BasePage = OldFreeDescriptorBase;
+
+// moved from above for pool hack routines...
+ KeInitializeSpinLock (&MmSystemSpaceLock);
+
+ KeInitializeSpinLock (&MmPfnLock);
+
+ //
+ // Initialize the nonpaged available PTEs for mapping I/O space
+ // and kernel stacks.
+ //
+
+ PointerPte = MiGetPteAddress (MmNonPagedSystemStart);
+ ASSERT (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0);
+
+ MmNumberOfSystemPtes = MiGetPteAddress(NonPagedPoolStartVirtual) - PointerPte - 1;
+
+ MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
+
+ //
+ // Add pages to nonpaged pool if we could not allocate enough physically
+ // configuous.
+ //
+
+ j = (SavedSize - MmSizeOfNonPagedPoolInBytes) >> PAGE_SHIFT;
+
+ if (j) {
+ ULONG CountContiguous;
+
+ CountContiguous = LargestFreePfnCount;
+ PageFrameIndex = LargestFreePfnStart - 1;
+
+ PointerPte = MiGetPteAddress (NonPagedPoolStartVirtual);
+ TempPte = ValidKernelPte;
+
+ while (j) {
+
+ if (CountContiguous) {
+ PageFrameIndex += 1;
+ MiUnlinkFreeOrZeroedPage (PageFrameIndex);
+ CountContiguous -= 1;
+ } else {
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+ }
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ Pfn1->PteFrame = MiGetPteAddress(PointerPte)->u.Hard.PageFrameNumber;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ PointerPte += 1;
+
+ j -= 1;
+ }
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT (MiGetPteAddress(NonPagedPoolStartVirtual)->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ Range = MmAllocatedNonPagedPool;
+ MiFreePoolPages (NonPagedPoolStartVirtual);
+ MmAllocatedNonPagedPool = Range;
+ }
+
+ //
+ // Initialize the nonpaged pool.
+ //
+
+ InitializePool (NonPagedPool, 0);
+
+
+ //
+ // Initialize memory management structures for this process.
+ //
+
+ //
+ // Build working set list. This requires the creation of a PDE
+ // to map HYPER space and the page table page pointed to
+ // by the PDE must be initialized.
+ //
+ // Note, we can't remove a zeroed page as hyper space does not
+ // exist and we map non-zeroed pages into hyper space to zero.
+ //
+
+ TempPte = ValidPdePde;
+
+ PointerPte = MiGetPdeAddress(HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (0);
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KeFlushCurrentTb();
+ KeLowerIrql (OldIrql);
+
+// MiInitializePfn (PageFrameIndex, PointerPte, 1L);
+
+ //
+ // Point to the page table page we just created and zero it.
+ //
+
+ PointerPte = MiGetPteAddress(HYPER_SPACE);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ //
+ // Hyper space now exists, set the necessary variables.
+ //
+
+ MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
+ MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
+
+ MmWorkingSetList = WORKING_SET_LIST;
+ MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL));
+
+ //
+ // Initialize this process's memory management structures including
+ // the working set list.
+ //
+
+ //
+ // The pfn element for the page directory has already been initialized,
+ // zero the reference count and the share count so they won't be
+ // wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Get a page for the working set list and map it into the Page
+ // directory at the page after hyperspace.
+ //
+
+ PointerPte = MiGetPteAddress (HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (0);
+
+ CurrentProcess->WorkingSetPage = PageFrameIndex;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
+
+ *PointerPde = TempPte;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KeFlushCurrentTb();
+ KeLowerIrql (OldIrql);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
+ CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
+
+ MmInitializeProcessAddressSpace (CurrentProcess,
+ (PEPROCESS)NULL,
+ (PVOID)NULL);
+ *PointerPde = ZeroPte;
+
+ //
+ // Check to see if moving the secondary page structures to the end
+ // of the PFN database is a waste of memory. And if so, copy it
+ // to paged pool.
+ //
+ // If the PFN datbase ends on a page aligned boundary and the
+ // size of the two arrays is less than a page, free the page
+ // and allocate nonpagedpool for this.
+ //
+
+ if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
+ ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
+
+ PMMCOLOR_TABLES c;
+
+ c = MmFreePagesByColor[0];
+
+ MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
+ ' mM');
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ RtlMoveMemory (MmFreePagesByColor[0],
+ c,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
+
+ //
+ // Free the page.
+ //
+
+ if (c > (PMMCOLOR_TABLES)MM_KSEG2_BASE) {
+ PointerPte = MiGetPteAddress(c);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *PointerPte = ZeroKernelPte;
+ } else {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT ((Pfn1->u2.ShareCount <= 1) && (Pfn1->u3.e2.ReferenceCount <= 1));
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiDecrementReferenceCount (PageFrameIndex);
+ }
+
+ //
+ // Handle physical pages in BIOS memory range (640k to 1mb) by
+ // explicitly initializing them in the PFN database so that they
+ // can be handled properly when I/O is done to these pages (or virtual
+ // reads accross process.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (MM_BIOS_START);
+ Pfn2 = MI_PFN_ELEMENT (MM_BIOS_END);
+
+ do {
+ if ((Pfn1->u2.ShareCount == 0) &&
+ (Pfn1->u3.e2.ReferenceCount == 0) &&
+ (Pfn1->PteAddress == 0)) {
+
+ //
+ // Set this as in use.
+ //
+
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->PteAddress = (PMMPTE)0x7FFFFFFF;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = 0;
+ }
+ Pfn1 += 1;
+ } while (Pfn1 <= Pfn2);
+ return;
+}
+
diff --git a/private/ntos/mm/i386/mi386.h b/private/ntos/mm/i386/mi386.h
new file mode 100644
index 000000000..8a79dcc59
--- /dev/null
+++ b/private/ntos/mm/i386/mi386.h
@@ -0,0 +1,2105 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ mi386.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ This module is specifically tailored for the Intel 386,
+
+Author:
+
+ Lou Perazzoli (loup) 6-Jan-1990
+
+Revision History:
+
+--*/
+
+
+/*++
+
+ Virtual Memory Layout on the i386 is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7ffff000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | |
+ | HAL loads kernel and initial |
+ | boot drivers in first 16mb |
+ | of this region. |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ 81000000 | |
+ | Unused NO ACCESS |
+ | |
+ +------------------------------------+
+ A0000000 | System mapped views |
+ | |
+ | |
+ +------------------------------------+
+ A3000000 | |
+ | Unused NO ACCESS |
+ | |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 4mb region |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ C0400000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 4mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C0800000 | NO ACCESS AREA (4MB) |
+ | |
+ +------------------------------------+
+ C0C00000 | System Cache Structures |
+ | reside in this 4mb region |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C1000000 | System cache resides here. |
+ | Kernel mode access only. |
+ | |
+ | |
+ +------------------------------------+
+ E1000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ | |
+ +------------------------------------+
+ | |
+ | Kernel mode access only. |
+ | |
+ | |
+ FFBFFFFF | NonPaged System area |
+ +------------------------------------+
+ FFC00000 | Last 4mb reserved for HAL usage |
+ +------------------------------------+
+
+--*/
+
+#define MM_KSEG0_BASE ((ULONG)0x80000000)
+
+#define MM_KSEG2_BASE ((ULONG)0xA0000000)
+
+#define MM_PAGES_IN_KSEG0 ((MM_KSEG2_BASE - MM_KSEG0_BASE) >> PAGE_SHIFT)
+
+extern ULONG MmKseg2Frame;
+
+//
+// PAGE_SIZE for Intel i386 is 4k, virtual page is 20 bits with a PAGE_SHIFT
+// byte offset.
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT 20
+
+//
+// Address space layout definitions.
+//
+
+#define CODE_START MM_KSEG0_BASE
+
+#define CODE_END MM_KSEG2_BASE
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define PDE_BASE ((ULONG)0xC0300000)
+
+#define MM_SYSTEM_SPACE_START (0xC0800000)
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define PDE_TOP 0xC03FFFFF
+
+#define PTE_BASE ((ULONG)0xC0000000)
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+
+#define HYPER_SPACE_END (0xC07fffff)
+
+#define MM_SYSTEM_VIEW_START (0xA0000000)
+
+#define MM_SYSTEM_VIEW_SIZE (48*1024*1024)
+
+//
+// Define the start and maximum size for the system cache.
+// Maximum size 512MB.
+//
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0xC0C00000)
+
+#define MM_SYSTEM_CACHE_START (0xC1000000)
+
+#define MM_SYSTEM_CACHE_END (0xE1000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
+ (((ULONG)MM_SYSTEM_CACHE_END - (ULONG)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT)
+
+#define MM_PAGED_POOL_START ((PVOID)(0xE1000000))
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(0xEB000000))
+
+#define MmProtopte_Base ((ULONG)0xE1000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xFFBE0000))
+
+#define MM_CRASH_DUMP_VA ((PVOID)(0xFFBE0000))
+
+#define MM_DEBUG_VA ((PVOID)0xFFBFF000)
+
+#define NON_PAGED_SYSTEM_END ((ULONG)0xFFFFFFF0) //quadword aligned.
+
+//
+// Define absolute minumum and maximum count for system ptes.
+//
+
+#define MM_MINIMUM_SYSTEM_PTES 7000
+
+#define MM_MAXIMUM_SYSTEM_PTES 50000
+
+#define MM_DEFAULT_SYSTEM_PTES 11000
+
+//
+// Pool limits
+//
+
+//
+// The maximim amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The total amount of nonpaged pool (initial pool + expansion).
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(192*1024*1024))
+
+#define MM_MAX_TOTAL_POOL (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)(MM_PAGED_POOL_START)))
+
+
+//
+// Structure layout defintions.
+//
+
+#define MM_PROTO_PTE_ALIGNMENT ((ULONG)PAGE_SIZE)
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x003FFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x400000)
+
+#define LOWEST_IO_ADDRESS 0xa0000
+
+#define PTE_SHIFT 2
+
+//
+// The number of bits in a physical address.
+//
+
+#define PHYSICAL_ADDRESS_BITS 32
+
+#define MM_MAXIMUM_NUMBER_OF_COLORS (1)
+
+//
+// i386 does not require support for colored pages.
+//
+
+#define MM_NUMBER_OF_COLORS (1)
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK (0)
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT (0)
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL (0)
+
+//
+// Define 256k worth of secondary colors.
+//
+
+#define MM_SECONDARY_COLORS_DEFAULT (64)
+
+#define MM_SECONDARY_COLORS_MIN (2)
+
+#define MM_SECONDARY_COLORS_MAX (1024)
+
+//
+// Mask for isolating secondary color from physical page number;
+//
+
+extern ULONG MmSecondaryColorMask;
+
+//
+// Maximum number of paging files.
+//
+
+#define MAX_PAGE_FILES 16
+
+
+//
+// Hyper space definitions.
+//
+
+#define FIRST_MAPPING_PTE ((ULONG)0xC0400000)
+
+#define NUMBER_OF_MAPPING_PTES 255
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_MAXIMUM_WORKING_SET \
+ ((ULONG)((ULONG)2*1024*1024*1024 - 64*1024*1024) >> PAGE_SHIFT) //2Gb-64Mb
+
+#define MM_WORKING_SET_END ((ULONG)0xC07FF000)
+
+
+//
+// Define masks for fields within the PTE.
+///
+
+#define MM_PTE_VALID_MASK 0x1
+#if defined(NT_UP)
+#define MM_PTE_WRITE_MASK 0x2
+#else
+#define MM_PTE_WRITE_MASK 0x800
+#endif
+#define MM_PTE_OWNER_MASK 0x4
+#define MM_PTE_WRITE_THROUGH_MASK 0x8
+#define MM_PTE_CACHE_DISABLE_MASK 0x10
+#define MM_PTE_ACCESS_MASK 0x20
+#if defined(NT_UP)
+#define MM_PTE_DIRTY_MASK 0x40
+#else
+#define MM_PTE_DIRTY_MASK 0x42
+#endif
+#define MM_PTE_LARGE_PAGE_MASK 0x80
+#define MM_PTE_GLOBAL_MASK 0x100
+#define MM_PTE_COPY_ON_WRITE_MASK 0x200
+#define MM_PTE_PROTOTYPE_MASK 0x400
+#define MM_PTE_TRANSITION_MASK 0x800
+
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS 0x0 // not expressable on i386
+#define MM_PTE_READONLY 0x0
+#define MM_PTE_READWRITE MM_PTE_WRITE_MASK
+#define MM_PTE_WRITECOPY 0x200 // read-only copy on write bit set.
+#define MM_PTE_EXECUTE 0x0 // read-only on i386
+#define MM_PTE_EXECUTE_READ 0x0
+#define MM_PTE_EXECUTE_READWRITE MM_PTE_WRITE_MASK
+#define MM_PTE_EXECUTE_WRITECOPY 0x200 // read-only copy on write bit set.
+#define MM_PTE_NOCACHE 0x010
+#define MM_PTE_GUARD 0x0 // not expressable on i386
+#define MM_PTE_CACHE 0x0
+
+#define MM_PROTECT_FIELD_SHIFT 5
+
+//
+// Zero PTE
+//
+
+#define MM_ZERO_PTE 0
+
+//
+// Zero Kernel PTE
+//
+
+#define MM_ZERO_KERNEL_PTE 0
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE.
+//
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE for system space.
+//
+
+#define MM_KERNEL_DEMAND_ZERO_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A no access PTE for system space.
+//
+
+#define MM_KERNEL_NOACCESS_PTE (MM_NOACCESS << MM_PROTECT_FIELD_SHIFT)
+
+extern ULONG MmPteGlobal; // One if processor supports Global Page, else zero.
+
+//
+// Kernel stack alignment requirements.
+//
+
+#define MM_STACK_ALIGNMENT 0x0
+
+#define MM_STACK_OFFSET 0x0
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)1024)
+
+#define PTE_PER_PAGE ((ULONG)1024)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (512)
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ (OUTPTE).u.Long = ((FRAME << 12) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MiDetermineUserGlobalPteMask ((PMMPTE)PPTE));
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Soft.Transition = 1; \
+ (OUTPTE).u.Soft.Valid = 0; \
+ (OUTPTE).u.Soft.Prototype = 0; \
+ (OUTPTE).u.Soft.Protection = PROTECT;
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT; \
+ (OUTPTE).u.Trans.Owner = MI_DETERMINE_OWNER(PPTE);
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ ASSERT (((PPTE)->u.Hard.Valid == 0) && \
+ ((PPTE)->u.Trans.Prototype == 0) && \
+ ((PPTE)->u.Trans.Transition == 1)); \
+ (OUTPTE).u.Long = (((PPTE)->u.Long & 0xFFFFF000) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MiDetermineUserGlobalPteMask ((PMMPTE)PPTE));
+
+
+//++
+//VOID
+//MI_SET_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set dirty.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_DIRTY(PTE) (PTE).u.Long |= HARDWARE_PTE_DIRTY_MASK
+
+
+//++
+//VOID
+//MI_SET_PTE_CLEAN (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro clears the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set clear.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_CLEAN(PTE) (PTE).u.Long &= ~HARDWARE_PTE_DIRTY_MASK
+
+
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != 0)
+
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_BIT_IF_SYSTEM (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit if the pointer PTE is within
+// system space.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the PTE becoming valid.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE) \
+ if ((((PMMPTE)PPTE) > MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) && \
+ ((((PMMPTE)PPTE) <= MiGetPteAddress (PTE_BASE)) || \
+ (((PMMPTE)PPTE) >= MiGetPteAddress (MM_SYSTEM_CACHE_WORKING_SET)))) { \
+ (OUTPTE).u.Hard.Global = MmPteGlobal; \
+ } \
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_STATE (
+// IN MMPTE PTE,
+// IN ULONG STATE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit in the PTE. if the pointer PTE is within
+//
+// Argments
+//
+// PTE - Supplies the PTE to set global state into.
+//
+// STATE - Supplies 1 if global, 0 if not.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_STATE(PTE,STATE) \
+ (PTE).u.Hard.Global = (STATE & MmPteGlobal);
+
+
+
+
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_ENABLE_CACHING(PTE) ((PTE).u.Hard.CacheDisable = 0)
+
+
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_DISABLE_CACHING(PTE) ((PTE).u.Hard.CacheDisable = 1)
+
+
+
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+
+#define MI_IS_CACHING_DISABLED(PPTE) \
+ ((PPTE)->u.Hard.CacheDisable == 1)
+
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) (((PPFN)->PteAddress = (PMMPTE)0xFFFFFFFF))
+
+
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ ((PPFN)->PteAddress == (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// PPTE - Supplies a pointer to the new PTE which will contain the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+// does nothing on i386.
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,PPTE)
+
+
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// does nothing on i386.
+
+#define MI_INITIALIZE_HYPERSPACE_MAP(INDEX)
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
+ ((ULONG)((MmSystemPageColor++) & MmSecondaryColorMask))
+
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
+ ((ULONG)((MmSystemPageColor++) & MmSecondaryColorMask))
+
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_PTE_PROCESS (
+// IN PCHAR COLOR,
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+
+#define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
+ ((ULONG)((*(COLOR))++) & MmSecondaryColorMask)
+
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_VA_PROCESS (
+// IN PVOID ADDRESS,
+// IN PEPROCESS COLOR
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
+ ((ULONG)((*(COLOR))++) & MmSecondaryColorMask)
+
+
+
+//++
+//ULONG
+//MI_GET_NEXT_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the next color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the next of.
+//
+// Return Value:
+//
+// Next color in sequence.
+//
+//--
+
+#define MI_GET_NEXT_COLOR(COLOR) ((COLOR + 1) & MM_COLOR_MASK)
+
+
+//++
+//ULONG
+//MI_GET_PREVIOUS_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the previous color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the previous of.
+//
+// Return Value:
+//
+// Previous color in sequence.
+//
+//--
+
+#define MI_GET_PREVIOUS_COLOR(COLOR) (0)
+
+
+#define MI_GET_SECONDARY_COLOR(PAGE,PFN) (PAGE & MmSecondaryColorMask)
+
+
+#define MI_GET_COLOR_FROM_SECONDARY(SECONDARY_COLOR) (0)
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_BY_COLOR (
+// OUT ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. It does NOT remove the page
+// from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_ANY_COLOR (
+// OUT ULONG PAGE,
+// IN OUT ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. If not page of the desired
+// color exists, all colored lists are searched for a page.
+// It does NOT remove the page from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate and returns the
+// color of the page located.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
+ { \
+ if (MmTotalPagesForPagingFile == 0) { \
+ PAGE = MM_EMPTY_LIST; \
+ } else { \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink; \
+ } \
+ }
+
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_WRITE_COPY (
+// IN OUT PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if the PTE indicates that the
+// page is writable and if so it clears the write bit and
+// sets the copy-on-write bit.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Write = 0; \
+ }
+
+
+
+//++
+//ULONG
+//MI_DETERMINE_OWNER (
+// IN MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro examines the virtual address of the PTE and determines
+// if the PTE resides in system space or user space.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ ((((PPTE) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) || \
+ ((PPTE) >= MiGetPdeAddress(NULL) && \
+ ((PPTE) <= MiGetPdeAddress(MM_HIGHEST_USER_ADDRESS)))) ? 1 : 0)
+
+
+
+//++
+//VOID
+//MI_SET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#if defined(NT_UP)
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED) \
+ ((PPTE)->u.Hard.Accessed = ACCESSED)
+#else
+
+//
+// Don't do anything on MP systems.
+//
+
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+#endif
+
+
+//++
+//ULONG
+//MI_GET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro returns the state of the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the ACCESSED field.
+//
+//--
+
+#if defined(NT_UP)
+#define MI_GET_ACCESSED_IN_PTE(PPTE) ((PPTE)->u.Hard.Accessed)
+#else
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+#endif
+
+
+//++
+//VOID
+//MI_SET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// IN ULONG OWNER
+// );
+//
+// Routine Description:
+//
+// This macro sets the owner field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER) ((PPTE)->u.Hard.Owner = OWNER)
+
+
+
+
+//++
+//ULONG
+//MI_GET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro gets the owner field from the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the OWNER field.
+//
+//--
+
+#define MI_GET_OWNER_IN_PTE(PPTE) ((PPTE)->u.Hard.Owner)
+
+
+
+//
+// bit mask to clear out fields in a PTE to or in prototype pte offset.
+//
+
+#define CLEAR_FOR_PROTO_PTE_ADDRESS ((ULONG)0x701)
+
+//
+// bit mask to clear out fields in a PTE to or in paging file location.
+//
+
+#define CLEAR_FOR_PAGE_FILE 0x000003E0
+
+
+//++
+//VOID
+//MI_SET_PAGING_FILE_INFO (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// FILEINFO - Supplies the number of the paging file.
+//
+// OFFSET - Supplies the offset into the paging file.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) ((((PTE).u.Long & \
+ CLEAR_FOR_PAGE_FILE) | ((FILEINFO << 1) | \
+ (OFFSET << 12))))
+
+
+//++
+//PMMPTE
+//MiPteToProto (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro returns the address of the corresponding prototype which
+// was encoded earlier into the supplied PTE.
+//
+// NOTE THAT AS PROTOPTE CAN ONLY RESIDE IN PAGED POOL!!!!!!
+//
+// MAX SIZE = 2^(2+7+21) = 2^30 = 1GB.
+//
+// NOTE, that the valid bit must be zero!
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Pointer to the prototype PTE that backs this PTE.
+//
+//--
+
+
+#define MiPteToProto(lpte) ((PMMPTE)(((((lpte)->u.Long) >> 11) << 9) + \
+ (((((lpte)->u.Long)) << 24) >> 23) \
+ + MmProtopte_Base))
+
+
+//++
+//ULONG
+//MiProtoAddressForPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForPte(proto_va) \
+ ((((((ULONG)proto_va - MmProtopte_Base) >> 1) & (ULONG)0x000000FE) | \
+ (((((ULONG)proto_va - MmProtopte_Base) << 2) & (ULONG)0xfffff800))) | \
+ MM_PTE_PROTOTYPE_MASK)
+
+
+
+
+//++
+//ULONG
+//MiProtoAddressForKernelPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// This macro also sets any other information (such as global bits)
+// required for kernel mode PTEs.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+// not different on x86.
+
+#define MiProtoAddressForKernelPte(proto_va) MiProtoAddressForPte(proto_va)
+
+
+#define MM_SUBSECTION_MAP (128*1024*1024)
+
+//++
+//PSUBSECTION
+//MiGetSubsectionAddress (
+// IN PMMPTE lpte
+// );
+//
+// Routine Description:
+//
+// This macro takes a PTE and returns the address of the subsection that
+// the PTE refers to. Subsections are quadword structures allocated
+// from nonpaged pool.
+//
+// NOTE THIS MACRO LIMITS THE SIZE OF NONPAGED POOL!
+// MAXIMUM NONPAGED POOL = 2^(3+4+21) = 2^28 = 256mb.
+//
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// A pointer to the subsection referred to by the supplied PTE.
+//
+//--
+
+//#define MiGetSubsectionAddress(lpte) \
+// ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - \
+// (((((lpte)->u.Long)>>11)<<7) | \
+// (((lpte)->u.Long<<2) & 0x78))))
+
+#define MiGetSubsectionAddress(lpte) \
+ (((lpte)->u.Long & 0x80000000) ? \
+ ((PSUBSECTION)((ULONG)MmSubsectionBase + \
+ ((((lpte)->u.Long & 0x7ffff800) >> 4) | \
+ (((lpte)->u.Long<<2) & 0x78)))) \
+ : \
+ ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - \
+ (((((lpte)->u.Long)>>11)<<7) | \
+ (((lpte)->u.Long<<2) & 0x78)))))
+
+
+
+//++
+//ULONG
+//MiGetSubsectionAddressForPte (
+// IN PSUBSECTION VA
+// );
+//
+// Routine Description:
+//
+// This macro takes the address of a subsection and encodes it for use
+// in a PTE.
+//
+// NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
+//
+// Argments
+//
+// VA - Supplies a pointer to the subsection to encode.
+//
+// Return Value:
+//
+// The mask to set into the PTE to make it reference the supplied
+// subsetion.
+//
+//--
+
+//#define MiGetSubsectionAddressForPte(VA) \
+// (((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA)>>2) & (ULONG)0x0000001E) | \
+// ((((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA)<<4) & (ULONG)0xfffff800))))
+
+#define MiGetSubsectionAddressForPte(VA) \
+ (((ULONG)(VA) < (ULONG)MM_KSEG2_BASE) ? \
+ (((((ULONG)VA - (ULONG)MmSubsectionBase)>>2) & (ULONG)0x0000001E) | \
+ ((((((ULONG)VA - (ULONG)MmSubsectionBase)<<4) & (ULONG)0x7ffff800)))| \
+ 0x80000000) \
+ : \
+ (((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA)>>2) & (ULONG)0x0000001E) | \
+ ((((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA)<<4) & (ULONG)0x7ffff800)))))
+
+
+
+
+//++
+//PMMPTE
+//MiGetPdeAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeAddress returns the address of the PDE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PDE for.
+//
+// Return Value:
+//
+// The address of the PDE.
+//
+//--
+
+#define MiGetPdeAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 22) << 2) + PDE_BASE))
+
+
+
+//++
+//PMMPTE
+//MiGetPteAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PTE for.
+//
+// Return Value:
+//
+// The address of the PTE.
+//
+//--
+
+#define MiGetPteAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 12) << 2) + PTE_BASE))
+
+
+
+//++
+//ULONG
+//MiGetPdeOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page directory table the corresponding PDE is at.
+//
+//--
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+
+
+
+//++
+//ULONG
+//MiGetPteOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteOffset returns the offset into a page table page
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page table page table the corresponding PTE is at.
+//
+//--
+
+#define MiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
+
+
+
+//++
+//PMMPTE
+//MiGetProtoPteAddress (
+// IN PMMPTE VAD,
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+// Argments
+//
+// VAD - Supplies a pointer to the virtual address descriptor that contains
+// the VA.
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// A pointer to the proto PTE which corresponds to the VA.
+//
+//--
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+
+//++
+//PVOID
+//MiGetVirtualAddressMappedByPte (
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+// Argments
+//
+// PTE - Supplies the PTE to get the virtual address for.
+//
+// Return Value:
+//
+// Virtual address mapped by the PTE.
+//
+//--
+
+#define MiGetVirtualAddressMappedByPte(PTE) ((PVOID)((ULONG)(PTE) << 10))
+
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_NUMBER (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the paging file number from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file number.
+//
+//--
+
+#define GET_PAGING_FILE_NUMBER(PTE) ((((PTE).u.Long) >> 1) & 0x0000000F)
+
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_OFFSET (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the offset into the paging file from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file offset.
+//
+//--
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+
+
+
+//++
+//ULONG
+//IS_PTE_NOT_DEMAND_ZERO (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if a given PTE is NOT a demand zero PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Returns 0 if the PTE is demand zero, non-zero otherwise.
+//
+//--
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFFC01)
+
+
+
+
+//++
+//VOID
+//MI_MAKING_VALID_PTE_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make a single valid PTE invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make multiple valid PTEs invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+
+
+//++
+//VOID
+//MI_MAKE_PROTECT_WRITE_COPY (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro makes a writable PTE a writeable-copy PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// NONE
+//
+//--
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Soft.Protection & MM_PROTECTION_WRITE_MASK) { \
+ (PTE).u.Long |= MM_PROTECTION_COPY_MASK << MM_PROTECT_FIELD_SHIFT; \
+ }
+
+
+//++
+//VOID
+//MI_SET_PAGE_DIRTY(
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit (and release page file space).
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#if defined(NT_UP)
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD)
+#else
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
+ if ((PPTE)->u.Hard.Dirty == 1) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+#endif
+
+
+
+
+//++
+//VOID
+//MI_NO_FAULT_FOUND(
+// IN TEMP,
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro handles the case when a page fault is taken and no
+// PTE with the valid bit clear is found.
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#if defined(NT_UP)
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD)
+#else
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == 0)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+#endif
+
+
+
+
+//++
+//ULONG
+//MI_CAPTURE_DIRTY_BIT_TO_PFN (
+// IN PMMPTE PPTE,
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro gets captures the state of the dirty bit to the PFN
+// and frees any associated page file space if the PTE has been
+// modified element.
+//
+// NOTE - THE PFN LOCK MUST BE HELD!
+//
+// Argments
+//
+// PPTE - Supplies the PTE to operate upon.
+//
+// PPFN - Supplies a pointer to the PFN database element that corresponds
+// to the page mapped by the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ ASSERT (KeGetCurrentIrql() > APC_LEVEL); \
+ if (((PPFN)->u3.e1.Modified == 0) && \
+ ((PPTE)->u.Hard.Dirty != 0)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+
+//++
+//BOOLEAN
+//MI_IS_PHYSICAL_ADDRESS (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro deterines if a give virtual address is really a
+// physical address.
+//
+// Argments
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// FALSE if it is not a physical address, TRUE if it is.
+//
+//--
+
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ (((ULONG)Va >= MM_KSEG0_BASE) && ((ULONG)Va < MM_KSEG2_BASE) && (MmKseg2Frame))
+
+
+//++
+//ULONG
+//MI_CONVERT_PHYSICAL_TO_PFN (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
+// to its corresponding physical frame number.
+//
+// Argments
+//
+// VA - Supplies a pointer to the physical address.
+//
+// Return Value:
+//
+// Returns the PFN for the page.
+//
+//--
+
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) (((ULONG)Va << 3) >> 15)
+
+
+typedef struct _MMCOLOR_TABLES {
+ ULONG Flink;
+ PVOID Blink;
+} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
+
+typedef struct _MMPRIMARY_COLOR_TABLES {
+ LIST_ENTRY ListHead;
+} MMPRIMARY_COLOR_TABLES, *PMMPRIMARY_COLOR_TABLES;
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+extern PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+extern ULONG MmTotalPagesForPagingFile;
+
+
+//
+// A VALID Page Table Entry on an Intel 386/486 has the following definition.
+//
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Valid : 1;
+ ULONG PageFileLow : 4;
+ ULONG Protection : 5;
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Valid : 1;
+ ULONG Write : 1;
+ ULONG Owner : 1;
+ ULONG WriteThrough : 1;
+ ULONG CacheDisable : 1;
+ ULONG Protection : 5;
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG PageFrameNumber : 20;
+} MMPTE_TRANSITION;
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Valid : 1;
+ ULONG ProtoAddressLow : 7;
+ ULONG ReadOnly : 1; // if set allow read only access.
+ ULONG WhichPool : 1;
+ ULONG Prototype : 1;
+ ULONG ProtoAddressHigh : 21;
+} MMPTE_PROTOTYPE;
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG Valid : 1;
+ ULONG SubsectionAddressLow : 4;
+ ULONG Protection : 5;
+ ULONG Prototype : 1;
+ ULONG SubsectionAddressHigh : 20;
+ ULONG WhichPool : 1;
+} MMPTE_SUBSECTION;
+
+typedef struct _MMPTE_LIST {
+ ULONG Valid : 1;
+ ULONG OneEntry : 1;
+ ULONG filler10 : 10;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+//
+// A Page Table Entry on an Intel 386/486 has the following definition.
+//
+
+#if defined(NT_UP)
+
+//
+// Uniprocessor version.
+//
+
+typedef struct _MMPTE_HARDWARE {
+ ULONG Valid : 1;
+ ULONG Write : 1; // UP version
+ ULONG Owner : 1;
+ ULONG WriteThrough : 1;
+ ULONG CacheDisable : 1;
+ ULONG Accessed : 1;
+ ULONG Dirty : 1;
+ ULONG LargePage : 1;
+ ULONG Global : 1;
+ ULONG CopyOnWrite : 1; // software field
+ ULONG Prototype : 1; // software field
+ ULONG reserved : 1; // software field
+ ULONG PageFrameNumber : 20;
+} MMPTE_HARDWARE, *PMMPTE_HARDWARE;
+
+#define HARDWARE_PTE_DIRTY_MASK 0x40
+
+#else
+
+//
+// MP version to avoid stalls when flush TBs accross processors.
+//
+
+typedef struct _MMPTE_HARDWARE {
+ ULONG Valid : 1;
+ ULONG Writable : 1; //changed for MP version
+ ULONG Owner : 1;
+ ULONG WriteThrough : 1;
+ ULONG CacheDisable : 1;
+ ULONG Accessed : 1;
+ ULONG Dirty : 1;
+ ULONG LargePage : 1;
+ ULONG Global : 1;
+ ULONG CopyOnWrite : 1; // software field
+ ULONG Prototype : 1; // software field
+ ULONG Write : 1; // software field - MP change
+ ULONG PageFrameNumber : 20;
+} MMPTE_HARDWARE, *PMMPTE_HARDWARE;
+
+#define HARDWARE_PTE_DIRTY_MASK 0x42
+
+#endif //NT_UP
+
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ MMPTE_HARDWARE Hard;
+ HARDWARE_PTE Flush;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_TRANSITION Trans;
+ MMPTE_SUBSECTION Subsect;
+ MMPTE_LIST List;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
+
+ULONG
+FASTCALL
+MiDetermineUserGlobalPteMask (
+ IN PMMPTE Pte
+ );
diff --git a/private/ntos/mm/i386/probewrt.c b/private/ntos/mm/i386/probewrt.c
new file mode 100644
index 000000000..52630c5ab
--- /dev/null
+++ b/private/ntos/mm/i386/probewrt.c
@@ -0,0 +1,138 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ probewrt.c
+
+Abstract:
+
+ This module contains the routine to support probe for write on
+ the Intel 386. The Intel 386 has the unique property that in
+ kernel mode the writable bit of the PTE is ignored. This allows
+ the kernel to write user mode pages which may be read only. Note,
+ that copy-on-write pages are protected as read-only as well, hence
+ the kernel could write to a user-mode copy on write page and the
+ copy on write would not occur.
+
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1990
+
+Environment:
+
+ Kernel mode only. Non-paged.
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+VOID
+MmProbeForWrite (
+ IN PVOID Address,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function probes an address for write accessibility on
+ the Intel 386.
+
+Arguments:
+
+ Address - Supplies a pointer to the structure to probe.
+
+ Length - Supplies the length of the structure.
+
+Return Value:
+
+ None. If the Address cannot be written an exception is raised.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE PteContents;
+ CCHAR Temp;
+
+ //
+ // Loop on the copy on write case until the page is only
+ // writable.
+ //
+
+ if (Address >= (PVOID)MM_HIGHEST_USER_ADDRESS) {
+ ExRaiseStatus(STATUS_ACCESS_VIOLATION);
+ }
+
+ PointerPte = MiGetPteAddress (Address);
+ LastPte = MiGetPteAddress ((PVOID)((ULONG)Address + Length - 1));
+
+ while (PointerPte <= LastPte) {
+
+ for (;;) {
+
+ //
+ // Touch the address as a byte to check for readability and
+ // get the PTE built.
+ //
+
+ do {
+ Temp = *(volatile CCHAR *)Address;
+ PteContents = *(volatile MMPTE *)PointerPte;
+ } while (PteContents.u.Hard.Valid == 0);
+
+ if (PteContents.u.Hard.Write == 1) {
+
+ //
+ // The PTE is writable and not copy on write.
+ //
+
+ break;
+ }
+
+ if (PteContents.u.Hard.CopyOnWrite == 1) {
+
+ //
+ // The PTE is copy on write. Call the pager and let
+ // it deal with this. Once the page fault is complete,
+ // this loop will again be repeated and the PTE will
+ // again be checked for write access and copy-on-write
+ // access. The PTE could still be copy-on-write even
+ // after the pager is called if the page table page
+ // was removed from the working set at this time (unlikely,
+ // but still possible).
+ //
+
+ if (!NT_SUCCESS (MmAccessFault (TRUE,
+ Address,
+ UserMode))) {
+
+ //
+ // Raise an access violation status.
+ //
+
+ ExRaiseStatus(STATUS_ACCESS_VIOLATION);
+
+ }
+ } else {
+
+ //
+ // Raise an access violation status.
+ //
+
+ ExRaiseStatus(STATUS_ACCESS_VIOLATION);
+
+ }
+ }
+ PointerPte += 1;
+ Address = (PVOID)((ULONG)Address + PAGE_SIZE);
+ }
+}
diff --git a/private/ntos/mm/i386/setmodfy.c b/private/ntos/mm/i386/setmodfy.c
new file mode 100644
index 000000000..e897ece48
--- /dev/null
+++ b/private/ntos/mm/i386/setmodfy.c
@@ -0,0 +1,242 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ setmodfy.c
+
+Abstract:
+
+ This module contains the setting modify bit routine for memory management.
+
+ i386 specific.
+
+Author:
+
+ 10-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+VOID
+MiSetModifyBit (
+ IN PMMPFN Pfn
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets the modify bit in the specified PFN element
+ and deallocates and allocated page file space.
+
+Arguments:
+
+ Pfn - Supplies the pointer to the PFN element to update.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held and PFN mutex held.
+
+--*/
+
+{
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ Pfn->u3.e1.Modified = 1;
+
+ if (Pfn->OriginalPte.u.Soft.Prototype == 0) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+
+ return;
+}
+
+ULONG
+FASTCALL
+MiDetermineUserGlobalPteMask (
+ IN PMMPTE Pte
+ )
+
+/*++
+
+Routine Description:
+
+ Builds a mask to OR with the PTE frame field.
+ This mask has the valid and access bits set and
+ has the global and owner bits set based on the
+ address of the PTE.
+
+ ******************* NOTE *********************************************
+ THIS ROUTINE DOES NOT CHECK FOR PDE'S WHICH NEED TO BE
+ SET GLOBAL AS IT ASSUMES ARE PDES FOR SYSTEM SPACE ARE
+ PROPERLY SET AT INITIALIZATION TIME!
+
+Arguments:
+
+ Pte - Supplies a pointer to the PTE in which to fill.
+
+Return Value:
+
+ Mask to OR into the frame to make a valid PTE.
+
+Environment:
+
+ Kernel mode, 386 specific.
+
+--*/
+
+
+{
+ MMPTE Mask;
+
+ Mask.u.Long = 0;
+ Mask.u.Hard.Valid = 1;
+ Mask.u.Hard.Accessed = 1;
+
+ if ((Pte) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {
+ Mask.u.Hard.Owner = 1;
+ } else if (((Pte) < MiGetPteAddress (PTE_BASE)) ||
+ ((Pte) >= MiGetPteAddress (MM_SYSTEM_CACHE_WORKING_SET))) {
+ Mask.u.Hard.Global = MmPteGlobal;
+ } else if (((Pte) >= MiGetPdeAddress (NULL)) ||
+ ((Pte) <= MiGetPdeAddress (MM_HIGHEST_USER_ADDRESS))) {
+ Mask.u.Hard.Owner = 1;
+ }
+ return Mask.u.Long;
+}
+
+
+
+
+#if !defined(NT_UP)
+
+ULONG MmSetDirtyCount; //fixfix remove
+
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets dirty in the specified PTE and the modify bit in the
+ correpsonding PFN element. If any page file space is allocated, it
+ is deallocated.
+
+Arguments:
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies a pointer to the corresponding valid PTE.
+
+ PfnHeld - Supplies TRUE if the PFN mutex is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ //
+ // The page is NOT copy on write, update the PTE setting both the
+ // dirty bit and the accessed bit. Note, that as this PTE is in
+ // the TB, the TB must be flushed.
+ //
+
+ MmSetDirtyCount += 1; //fixfix - remove
+
+ TempPte = *PointerPte;
+ MI_SET_PTE_DIRTY (TempPte);
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ *PointerPte = TempPte;
+
+ //
+ // Check state of PFN mutex and if not held, don't update PFN database.
+ //
+
+ if (PfnHeld) {
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ Pfn1->u3.e1.Modified = 1;
+ }
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty bit clear
+ // has been fetched into the TB. If it isn't flushed, another fault
+ // is generated as the dirty bit is not set in the cached TB entry.
+ //
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, FaultingAddress, TRUE);
+ return;
+}
+#endif
+
diff --git a/private/ntos/mm/i386/sources b/private/ntos/mm/i386/sources
new file mode 100644
index 000000000..eb7fffea6
--- /dev/null
+++ b/private/ntos/mm/i386/sources
@@ -0,0 +1,5 @@
+i386_SOURCES=..\i386\init386.c \
+ ..\i386\data386.c \
+ ..\i386\debugsup.c \
+ ..\i386\hypermap.c \
+ ..\i386\setmodfy.c
diff --git a/private/ntos/mm/iosup.c b/private/ntos/mm/iosup.c
new file mode 100644
index 000000000..4187e96f1
--- /dev/null
+++ b/private/ntos/mm/iosup.c
@@ -0,0 +1,4027 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ iosup.c
+
+Abstract:
+
+ This module contains routines which provide support for the I/O system.
+
+Author:
+
+ Lou Perazzoli (loup) 25-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#undef MmIsRecursiveIoFault
+
+BOOLEAN
+MmIsRecursiveIoFault(
+ VOID
+ );
+
+BOOLEAN
+MiCheckForContiguousMemory (
+ IN PVOID BaseAddress,
+ IN ULONG SizeInPages,
+ IN ULONG HighestPfn
+ );
+
+PVOID
+MiFindContiguousMemory (
+ IN ULONG HighestPfn,
+ IN ULONG SizeInPages
+ );
+
+PVOID
+MiMapLockedPagesInUserSpace (
+ IN PMDL MemoryDescriptorList,
+ IN PVOID StartingVa
+ );
+
+VOID
+MiUnmapLockedPagesInUserSpace (
+ IN PVOID BaseAddress,
+ IN PMDL MemoryDescriptorList
+ );
+
+VOID
+MiFlushTb (
+ VOID
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, MmLockPagableDataSection)
+#pragma alloc_text(PAGE, MiLookupDataTableEntry)
+#pragma alloc_text(PAGE, MiMapLockedPagesInUserSpace)
+#pragma alloc_text(PAGE, MmSetBankedSection)
+#pragma alloc_text(PAGE, MmUnmapIoSpace)
+#pragma alloc_text(PAGE, MmMapVideoDisplay)
+#pragma alloc_text(PAGE, MmUnmapVideoDisplay)
+
+#pragma alloc_text(PAGELK, MiUnmapLockedPagesInUserSpace)
+#pragma alloc_text(PAGELK, MmAllocateNonCachedMemory)
+#pragma alloc_text(PAGELK, MmFreeNonCachedMemory)
+#pragma alloc_text(PAGELK, MiFindContiguousMemory)
+#pragma alloc_text(PAGELK, MmLockPagedPool)
+#pragma alloc_text(PAGELK, MmUnlockPagedPool)
+#endif
+
+extern POOL_DESCRIPTOR NonPagedPoolDescriptor;
+
+extern ULONG MmAllocatedNonPagedPool;
+
+extern ULONG MmDelayPageFaults;
+
+KEVENT MmCollidedLockEvent;
+ULONG MmCollidedLockWait;
+
+ULONG MmLockPagesCount;
+
+ULONG MmLockedCode;
+
+#ifdef LARGE_PAGES
+ULONG MmLargeVideoMapped;
+#endif
+
+#if DBG
+ULONG MmReferenceCountCheck = 75;
+#endif //DBG
+
+
+
+VOID
+MmProbeAndLockPages (
+ IN OUT PMDL MemoryDescriptorList,
+ IN KPROCESSOR_MODE AccessMode,
+ IN LOCK_OPERATION Operation
+ )
+
+/*++
+
+Routine Description:
+
+ This routine probes the specified pages, makes the pages resident and
+ locks the physical pages mapped by the virtual pages in memory. The
+ Memory descriptor list is updated to describe the physical pages.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
+ (MDL). The supplied MDL must supply a virtual
+ address, byte offset and length field. The
+ physical page portion of the MDL is updated when
+ the pages are locked in memory.
+
+ AccessMode - Supplies the access mode in which to probe the arguments.
+ One of KernelMode or UserMode.
+
+ Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
+ or IoModifyAccess.
+
+Return Value:
+
+ None - exceptions are raised.
+
+Environment:
+
+ Kernel mode. APC_LEVEL and below for pageable addresses,
+ DISPATCH_LEVEL and below for non-pageable addresses.
+
+--*/
+
+{
+ PULONG Page;
+ PMMPTE PointerPte;
+ PMMPTE PointerPte1;
+ PMMPTE PointerPde;
+ PVOID Va;
+ PVOID EndVa;
+ PMMPFN Pfn1 ;
+ ULONG PageFrameIndex;
+ PEPROCESS CurrentProcess;
+ KIRQL OldIrql;
+ ULONG NumberOfPagesToLock;
+ NTSTATUS status;
+
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+ ASSERT (((ULONG)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0);
+ ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
+
+ ASSERT ((MemoryDescriptorList->MdlFlags & (
+ MDL_PAGES_LOCKED |
+ MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_SOURCE_IS_NONPAGED_POOL |
+ MDL_PARTIAL |
+ MDL_SCATTER_GATHER_VA |
+ MDL_IO_SPACE)) == 0);
+
+ Page = (PULONG)(MemoryDescriptorList + 1);
+
+ Va = (PCHAR) MemoryDescriptorList->StartVa + MemoryDescriptorList->ByteOffset;
+
+ PointerPte = MiGetPteAddress (Va);
+ PointerPte1 = PointerPte;
+
+ //
+ // Endva is one byte past the end of the buffer, if ACCESS_MODE is not
+ // kernel, make sure the EndVa is in user space AND the byte count
+ // does not cause it to wrap.
+ //
+
+ EndVa = (PVOID)(((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset) +
+ MemoryDescriptorList->ByteCount);
+
+ if ((AccessMode != KernelMode) &&
+ ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) {
+ *Page = MM_EMPTY_LIST;
+ ExRaiseStatus (STATUS_ACCESS_VIOLATION);
+ return;
+ }
+
+ //
+ // There is an optimization which could be performed here. If
+ // the operation is for WriteAccess and the complete page is
+ // being modified, we can remove the current page, if it is not
+ // resident, and substitute a demand zero page.
+ // Note, that after analysis by marking the thread and then
+ // noting if a page read was done, this rarely occurs.
+ //
+ //
+
+ MemoryDescriptorList->Process = (PEPROCESS)NULL;
+
+ if (!MI_IS_PHYSICAL_ADDRESS(Va)) {
+ do {
+
+ *Page = MM_EMPTY_LIST;
+ PointerPde = MiGetPdeAddress (Va);
+
+ //
+ // Make sure the page is resident.
+ //
+
+ if ((PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte1->u.Hard.Valid == 0)) {
+
+ status = MmAccessFault (FALSE, Va, KernelMode);
+ }
+
+ //
+ // Touch the page in case the previous fault caused
+ // an access violation. This is quicker than checking
+ // the status code.
+ //
+
+ *(volatile CHAR *)Va;
+
+ if ((Operation != IoReadAccess) &&
+ (Va <= MM_HIGHEST_USER_ADDRESS)) {
+
+ //
+ // Probe for write access as well.
+ //
+
+ ProbeForWriteChar ((PCHAR)Va);
+ }
+
+ Va = (PVOID)(((ULONG)(PCHAR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1));
+ Page += 1;
+ PointerPte1 += 1;
+ } while (Va < EndVa);
+ }
+
+ Va = (PVOID)(MemoryDescriptorList->StartVa);
+ Page = (PULONG)(MemoryDescriptorList + 1);
+
+ //
+ // Indicate that this is a write operation.
+ //
+
+ if (Operation != IoReadAccess) {
+ MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION;
+ } else {
+ MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION);
+ }
+
+ //
+ // Acquire the PFN database lock.
+ //
+
+ LOCK_PFN2 (OldIrql);
+
+ if (Va <= MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // These are addresses with user space, check to see if the
+ // working set size will allow these pages to be locked.
+ //
+
+ CurrentProcess = PsGetCurrentProcess ();
+ NumberOfPagesToLock =
+ (((ULONG)EndVa - ((ULONG)Va + 1)) >> PAGE_SHIFT) + 1;
+
+ PageFrameIndex = NumberOfPagesToLock + CurrentProcess->NumberOfLockedPages;
+
+ if ((PageFrameIndex >
+ (CurrentProcess->Vm.MaximumWorkingSetSize - MM_FLUID_WORKING_SET))
+ &&
+ ((MmLockPagesCount + NumberOfPagesToLock) > MmLockPagesLimit)) {
+
+ UNLOCK_PFN (OldIrql);
+ ExRaiseStatus (STATUS_WORKING_SET_QUOTA);
+ return;
+ }
+
+ CurrentProcess->NumberOfLockedPages = PageFrameIndex;
+ MmLockPagesCount += NumberOfPagesToLock;
+ MemoryDescriptorList->Process = CurrentProcess;
+ }
+
+ MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED;
+
+ do {
+
+ PointerPde = MiGetPdeAddress (Va);
+
+ if (MI_IS_PHYSICAL_ADDRESS(Va)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va);
+
+ } else {
+
+ while ((PointerPde->u.Hard.Valid == 0) ||
+ (PointerPte->u.Hard.Valid == 0)) {
+
+ //
+ // PDE is not resident, release PFN lock touch the page and make
+ // it appear.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ status = MmAccessFault (FALSE, Va, KernelMode);
+
+ if (!NT_SUCCESS(status)) {
+
+ //
+ // An exception occurred. Unlock the pages locked
+ // so far.
+ //
+
+ MmUnlockPages (MemoryDescriptorList);
+
+ //
+ // Raise an exception of access violation to the caller.
+ //
+
+ ExRaiseStatus (status);
+ return;
+ }
+
+ LOCK_PFN (OldIrql);
+ }
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ }
+
+ if (PageFrameIndex > MmHighestPhysicalPage) {
+
+ //
+ // This is an I/O space address don't allow operations
+ // on addresses not in the PFN database.
+ //
+
+ MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE;
+
+ } else {
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0);
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount < MmReferenceCountCheck);
+ }
+
+ *Page = PageFrameIndex;
+
+ Page += 1;
+ PointerPte += 1;
+ Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
+ } while (Va < EndVa);
+
+ UNLOCK_PFN2 (OldIrql);
+
+ return;
+}
+
+NTKERNELAPI
+VOID
+MmProbeAndLockSelectedPages (
+ IN OUT PMDL MemoryDescriptorList,
+ IN PFILE_SEGMENT_ELEMENT SegmentArray,
+ IN KPROCESSOR_MODE AccessMode,
+ IN LOCK_OPERATION Operation
+ )
+
+/*++
+
+Routine Description:
+
+ This routine probes the specified pages, makes the pages resident and
+ locks the physical pages mapped by the virtual pages in memory. The
+ Memory descriptor list is updated to describe the physical pages.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
+ (MDL). The MDL must supply the length. The
+ physical page portion of the MDL is updated when
+ the pages are locked in memory.
+
+ SegmentArray - Supplies a pointer to a list of buffer segments to be
+ probed and locked.
+
+ AccessMode - Supplies the access mode in which to probe the arguments.
+ One of KernelMode or UserMode.
+
+ Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess
+ or IoModifyAccess.
+
+Return Value:
+
+ None - exceptions are raised.
+
+Environment:
+
+ Kernel mode. APC_LEVEL and below.
+
+--*/
+
+{
+ PMDL TempMdl;
+ ULONG MdlHack[(sizeof(MDL)/4) + 1];
+ PULONG Page;
+ PFILE_SEGMENT_ELEMENT LastSegment;
+
+ PAGED_CODE();
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+ ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
+
+ ASSERT ((MemoryDescriptorList->MdlFlags & (
+ MDL_PAGES_LOCKED |
+ MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_SOURCE_IS_NONPAGED_POOL |
+ MDL_PARTIAL |
+ MDL_SCATTER_GATHER_VA |
+ MDL_IO_SPACE)) == 0);
+
+ //
+ // Initialize TempMdl.
+ //
+
+ TempMdl = (PMDL) &MdlHack;
+ MmInitializeMdl( TempMdl, NULL, PAGE_SIZE );
+
+ Page = (PULONG) (MemoryDescriptorList + 1);
+
+ //
+ // Calculate the end of the segment list.
+ //
+
+ LastSegment = SegmentArray +
+ BYTES_TO_PAGES(MemoryDescriptorList->ByteCount);
+
+ //
+ // Build a small Mdl for each segement and call probe and lock pages.
+ // Then copy the PFNs to the real mdl.
+ //
+
+ while (SegmentArray < LastSegment) {
+
+ TempMdl->MdlFlags = 0;
+ TempMdl->StartVa = (PVOID) SegmentArray->Buffer;
+
+ SegmentArray++;
+ MmProbeAndLockPages( TempMdl, AccessMode, Operation );
+
+ *Page++ = *((PULONG) (TempMdl + 1));
+ }
+
+ //
+ // Copy the flags and process fields.
+ //
+
+ MemoryDescriptorList->MdlFlags = TempMdl->MdlFlags;
+ MemoryDescriptorList->Process = TempMdl->Process;
+
+#ifdef _MIPS_
+
+ //
+ // Becuase the caches are virtual on mips they need to be completely
+ // flushed. Only the first level dcache needs to be swept; however,
+ // the only kernel interface to do this with is sweep I-cache range
+ // since it sweeps the both the first level I and D caches.
+ //
+
+ KeSweepIcacheRange( TRUE, NULL, KeGetPcr()->FirstLevelDcacheSize );
+
+ //
+ // Set a flag the MDL to indicate this MDL is a scatter/gather MDL.
+ //
+
+ MemoryDescriptorList->MdlFlags |= MDL_SCATTER_GATHER_VA;
+
+#endif
+}
+
+VOID
+MmUnlockPages (
+ IN OUT PMDL MemoryDescriptorList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unlocks physical pages which are described by a Memory
+ Descriptor List.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a pointer to a memory description list
+ (MDL). The supplied MDL must have been supplied
+ to MmLockPages to lock the pages down. As the
+ pages are unlocked, the MDL is updated.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ PULONG Page;
+ PVOID StartingVa;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0);
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0);
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+
+ if (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+
+ //
+ // This MDL has been mapped into system space, unmap now.
+ //
+
+ MmUnmapLockedPages (MemoryDescriptorList->MappedSystemVa,
+ MemoryDescriptorList);
+ }
+
+ StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset);
+
+ Page = (PULONG)(MemoryDescriptorList + 1);
+ NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
+ MemoryDescriptorList->ByteCount);
+ ASSERT (NumberOfPages != 0);
+
+ LOCK_PFN2 (OldIrql);
+
+ if (MemoryDescriptorList->Process != NULL) {
+ MemoryDescriptorList->Process->NumberOfLockedPages -= NumberOfPages;
+ MmLockPagesCount -= NumberOfPages;
+ ASSERT ((LONG)MemoryDescriptorList->Process->NumberOfLockedPages >= 0);
+ }
+
+ if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
+
+ //
+ // Only unlock if not I/O space.
+ //
+
+ do {
+
+ if (*Page == MM_EMPTY_LIST) {
+
+ //
+ // There are no more locked pages.
+ //
+
+ UNLOCK_PFN2 (OldIrql);
+ return;
+ }
+ ASSERT ((*Page <= MmHighestPhysicalPage) &&
+ (*Page >= MmLowestPhysicalPage));
+
+ //
+ // If this was a write operation set the modified bit in the
+ // pfn database.
+ //
+
+ if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) {
+ Pfn1 = MI_PFN_ELEMENT (*Page);
+ Pfn1->u3.e1.Modified = 1;
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+ }
+
+ MiDecrementReferenceCount (*Page);
+ *Page = MM_EMPTY_LIST;
+ Page += 1;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+ }
+ UNLOCK_PFN2 (OldIrql);
+
+ MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED;
+
+ return;
+}
+
+VOID
+MmBuildMdlForNonPagedPool (
+ IN OUT PMDL MemoryDescriptorList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine fills in the "pages" portion of the MDL using the PFN
+ numbers corresponding the the buffers which resides in non-paged pool.
+
+ Unlike MmProbeAndLockPages, there is no corresponding unlock as no
+ reference counts are incremented as the buffers being in nonpaged
+ pool are always resident.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List
+ (MDL). The supplied MDL must supply a virtual
+ address, byte offset and length field. The
+ physical page portion of the MDL is updated when
+ the pages are locked in memory. The virtual
+ address must be within the non-paged portion
+ of the system space.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ PULONG Page;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PVOID EndVa;
+ ULONG PageFrameIndex;
+
+ Page = (PULONG)(MemoryDescriptorList + 1);
+
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+ ASSERT ((MemoryDescriptorList->MdlFlags & (
+ MDL_PAGES_LOCKED |
+ MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_SOURCE_IS_NONPAGED_POOL |
+ MDL_PARTIAL)) == 0);
+
+ MemoryDescriptorList->Process = (PEPROCESS)NULL;
+
+ //
+ // Endva is last byte of the buffer.
+ //
+
+ MemoryDescriptorList->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
+
+ MemoryDescriptorList->MappedSystemVa =
+ (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset);
+
+ EndVa = (PVOID)(((PCHAR)MemoryDescriptorList->MappedSystemVa +
+ MemoryDescriptorList->ByteCount - 1));
+
+ LastPte = MiGetPteAddress (EndVa);
+
+ ASSERT (MmIsNonPagedSystemAddressValid (MemoryDescriptorList->StartVa));
+
+ PointerPte = MiGetPteAddress (MemoryDescriptorList->StartVa);
+
+ if (MI_IS_PHYSICAL_ADDRESS(EndVa)) {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (
+ MemoryDescriptorList->StartVa);
+
+ do {
+ *Page = PageFrameIndex;
+ Page += 1;
+ PageFrameIndex += 1;
+ PointerPte += 1;
+ } while (PointerPte <= LastPte);
+ } else {
+ do {
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *Page = PageFrameIndex;
+ Page += 1;
+ PointerPte += 1;
+ } while (PointerPte <= LastPte);
+ }
+
+ return;
+}
+
+PVOID
+MmMapLockedPages (
+ IN PMDL MemoryDescriptorList,
+ IN KPROCESSOR_MODE AccessMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps physical pages described by a memory description
+ list into the system virtual address space or the user portion of
+ the virtual address space.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
+ been updated by MmProbeAndLockPages.
+
+
+ AccessMode - Supplies an indicator of where to map the pages;
+ KernelMode indicates that the pages should be mapped in the
+ system part of the address space, UserMode indicates the
+ pages should be mapped in the user part of the address space.
+
+Return Value:
+
+ Returns the base address where the pages are mapped. The base address
+ has the same offset as the virtual address in the MDL.
+
+ This routine will raise an exception if the processor mode is USER_MODE
+ and quota limits or VM limits are exceeded.
+
+Environment:
+
+ Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode,
+ APC_LEVEL or below if access mode is UserMode.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ ULONG SavedPageCount;
+ PULONG Page;
+ PMMPTE PointerPte;
+ PVOID BaseVa;
+ MMPTE TempPte;
+ PVOID StartingVa;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+
+ StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset);
+
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+
+ if (AccessMode == KernelMode) {
+
+ Page = (PULONG)(MemoryDescriptorList + 1);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa,
+ MemoryDescriptorList->ByteCount);
+ SavedPageCount = NumberOfPages;
+
+ //
+ // Map the pages into the system part of the address space as
+ // kernel read/write.
+ //
+
+ ASSERT ((MemoryDescriptorList->MdlFlags & (
+ MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_SOURCE_IS_NONPAGED_POOL |
+ MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
+ ASSERT ((MemoryDescriptorList->MdlFlags & (
+ MDL_PAGES_LOCKED |
+ MDL_PARTIAL)) != 0);
+
+#if defined(_ALPHA_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((NumberOfPages == 1) &&
+ (*Page < ((1*1024*1024*1024) >> PAGE_SHIFT))) {
+ BaseVa = (PVOID)(KSEG0_BASE + (*Page << PAGE_SHIFT) +
+ MemoryDescriptorList->ByteOffset);
+ MemoryDescriptorList->MappedSystemVa = BaseVa;
+ MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+
+ goto Update;
+ }
+#endif //ALPHA
+
+#if defined(_MIPS_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((NumberOfPages == 1) &&
+ (MI_GET_PAGE_COLOR_FROM_VA (MemoryDescriptorList->StartVa) ==
+ (MM_COLOR_MASK & *Page)) &&
+ (*Page < ((512*1024*1024) >> PAGE_SHIFT))) {
+ BaseVa = (PVOID)(KSEG0_BASE + (*Page << PAGE_SHIFT) +
+ MemoryDescriptorList->ByteOffset);
+ MemoryDescriptorList->MappedSystemVa = BaseVa;
+ MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+
+ goto Update;
+ }
+#endif //MIPS
+
+
+#if defined(_X86_)
+
+ //
+ // See if KSEG0 can be used to map this.
+ //
+
+ if ((NumberOfPages == 1) &&
+ (*Page < MmKseg2Frame)) {
+ BaseVa = (PVOID)(MM_KSEG0_BASE + (*Page << PAGE_SHIFT) +
+ MemoryDescriptorList->ByteOffset);
+ MemoryDescriptorList->MappedSystemVa = BaseVa;
+ MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+
+ goto Update;
+ }
+#endif //X86
+
+ PointerPte = MiReserveSystemPtes (
+ NumberOfPages,
+ SystemPteSpace,
+ MM_COLOR_ALIGNMENT,
+ ((ULONG)MemoryDescriptorList->StartVa &
+ MM_COLOR_MASK_VIRTUAL),
+ MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL ? 0 : 1);
+ if (PointerPte == NULL) {
+
+ //
+ // Not enough system PTES are available.
+ //
+
+ return NULL;
+ }
+ BaseVa = (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (PointerPte) +
+ MemoryDescriptorList->ByteOffset);
+
+ TempPte = ValidKernelPte;
+
+#if defined(_MIPS_)
+
+ //
+ // If this is a Scatter/Gather Mdl then disable caching since the
+ // page colors will be wrong in the MDL.
+ //
+
+ if (MemoryDescriptorList->MdlFlags & MDL_SCATTER_GATHER_VA) {
+ MI_DISABLE_CACHING( TempPte );
+ }
+#endif
+
+#if DBG
+ LOCK_PFN2 (OldIrql);
+#endif //DBG
+
+ do {
+
+ if (*Page == MM_EMPTY_LIST) {
+ break;
+ }
+ ASSERT (*Page != 0);
+ TempPte.u.Hard.PageFrameNumber = *Page;
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+#if DBG
+ if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0) {
+ Pfn2 = MI_PFN_ELEMENT (*Page);
+ ASSERT (Pfn2->u3.e2.ReferenceCount != 0);
+ Pfn2->u3.e2.ReferenceCount += 1;
+ ASSERT (Pfn2->u3.e2.ReferenceCount < MmReferenceCountCheck);
+ ASSERT ((((ULONG)PointerPte >> PTE_SHIFT) & MM_COLOR_MASK) ==
+ (((ULONG)Pfn2->u3.e1.PageColor)));
+ }
+#endif //DBG
+
+ *PointerPte = TempPte;
+ Page++;
+ PointerPte++;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+#if DBG
+ UNLOCK_PFN2 (OldIrql);
+#endif //DBG
+
+ ExAcquireSpinLock ( &MmSystemSpaceLock, &OldIrql );
+ if (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+
+ //
+ //
+ // Another thread must have already mapped this.
+ // Clean up the system ptes and release them.
+ //
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+#if DBG
+ if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE ) == 0) {
+ PMMPFN Pfn3;
+ ULONG j;
+ PULONG Page1;
+
+ Page1 = (PULONG)(MemoryDescriptorList + 1);
+ for (j = 0; j < SavedPageCount ; j++ ) {
+ if (*Page == MM_EMPTY_LIST) {
+ break;
+ }
+ Pfn3 = MI_PFN_ELEMENT (*Page1);
+ Page1 += 1;
+ ASSERT (Pfn3->u3.e2.ReferenceCount > 1);
+ Pfn3->u3.e2.ReferenceCount -= 1;
+ ASSERT (Pfn3->u3.e2.ReferenceCount < 256);
+ }
+ }
+#endif //DBG
+ PointerPte = MiGetPteAddress (BaseVa);
+
+ MiReleaseSystemPtes (PointerPte,
+ SavedPageCount,
+ SystemPteSpace);
+
+ return MemoryDescriptorList->MappedSystemVa;
+ }
+
+ MemoryDescriptorList->MappedSystemVa = BaseVa;
+ *(volatile ULONG *)&MmLockPagesCount; //need to force order.
+ MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined (_X86_)
+Update:
+#endif
+
+ if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) {
+ MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
+ }
+
+ return BaseVa;
+
+ } else {
+
+ return MiMapLockedPagesInUserSpace (MemoryDescriptorList, StartingVa);
+ }
+}
+
+
+PVOID
+MiMapLockedPagesInUserSpace (
+ IN PMDL MemoryDescriptorList,
+ IN PVOID StartingVa
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps physical pages described by a memory description
+ list into the system virtual address space or the user portion of
+ the virtual address space.
+
+Arguments:
+
+ MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
+ been updated by MmProbeAndLockPages.
+
+
+ StartingVa - Supplies the starting address.
+
+Return Value:
+
+ Returns the base address where the pages are mapped. The base address
+ has the same offset as the virtual address in the MDL.
+
+ This routine will raise an exception if the processor mode is USER_MODE
+ and quota limits or VM limits are exceeded.
+
+Environment:
+
+ Kernel mode. APC_LEVEL or below.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ PULONG Page;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PVOID BaseVa;
+ MMPTE TempPte;
+ PVOID EndingAddress;
+ PMMVAD Vad;
+ PEPROCESS Process;
+ PMMPFN Pfn2;
+
+ PAGED_CODE ();
+ Page = (PULONG)(MemoryDescriptorList + 1);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa,
+ MemoryDescriptorList->ByteCount);
+
+ if (MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) {
+ ExRaiseStatus (STATUS_INVALID_ADDRESS);
+ }
+
+ //
+ // Map the pages into the user part of the address as user
+ // read/write no-delete.
+ //
+
+ TempPte = ValidUserPte;
+
+ Process = PsGetCurrentProcess ();
+
+ //
+ // Get the working set mutex and address creation mutex.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ try {
+
+ Vad = (PMMVAD)NULL;
+ BaseVa = MiFindEmptyAddressRange ( (NumberOfPages * PAGE_SIZE),
+ X64K,
+ 0 );
+
+ EndingAddress = (PVOID)((PCHAR)BaseVa + (NumberOfPages * PAGE_SIZE) - 1);
+
+ Vad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV');
+
+ if (Vad == NULL) {
+ BaseVa = NULL;
+ goto Done;
+ }
+
+ Vad->StartingVa = BaseVa;
+ Vad->EndingVa = EndingAddress;
+ Vad->ControlArea = NULL;
+ Vad->FirstPrototypePte = NULL;
+ Vad->u.LongFlags = 0;
+ Vad->u.VadFlags.Protection = MM_READWRITE;
+ Vad->u.VadFlags.PhysicalMapping = 1;
+ Vad->u.VadFlags.PrivateMemory = 1;
+ Vad->Banked = NULL;
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ if (Vad != (PMMVAD)NULL) {
+ ExFreePool (Vad);
+ }
+ BaseVa = NULL;
+ goto Done;
+ }
+
+ //
+ // Get the working set mutex and address creation mutex.
+ //
+
+ PointerPte = MiGetPteAddress (BaseVa);
+
+ do {
+
+ if (*Page == MM_EMPTY_LIST) {
+ break;
+ }
+
+ ASSERT (*Page != 0);
+ ASSERT ((*Page <= MmHighestPhysicalPage) &&
+ (*Page >= MmLowestPhysicalPage));
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ TempPte.u.Hard.PageFrameNumber = *Page;
+ *PointerPte = TempPte;
+
+ //
+ // A PTE just went from not present, not transition to
+ // present. The share count and valid count must be
+ // updated in the page table page which contains this
+ // Pte.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
+ Pfn2->u2.ShareCount += 1;
+
+ //
+ // Another zeroed PTE has become non-zero.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+
+ ASSERT (MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] <= PTE_PER_PAGE);
+
+ Page++;
+ PointerPte++;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+
+Done:
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ if (BaseVa == NULL) {
+ ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ return BaseVa;
+}
+
+
+VOID
+MmUnmapLockedPages (
+ IN PVOID BaseAddress,
+ IN PMDL MemoryDescriptorList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unmaps locked pages which were previously mapped via
+ a MmMapLockedPages function.
+
+Arguments:
+
+ BaseAddress - Supplies the base address where the pages were previously
+ mapped.
+
+ MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
+ been updated by MmProbeAndLockPages.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. DISPATCH_LEVEL or below if base address is within system space;
+ APC_LEVEL or below if base address is user space.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ ULONG i;
+ PULONG Page;
+ PMMPTE PointerPte;
+ PMMPTE PointerBase;
+ PVOID StartingVa;
+ KIRQL OldIrql;
+
+ ASSERT (MemoryDescriptorList->ByteCount != 0);
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
+
+ if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) {
+
+ //
+ // MDL is not mapped into virtual space, just clear the fields
+ // and return.
+ //
+
+ MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_PARTIAL_HAS_BEEN_MAPPED);
+ return;
+ }
+
+ if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
+
+ StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset);
+
+ NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa,
+ MemoryDescriptorList->ByteCount);
+
+ PointerBase = MiGetPteAddress (BaseAddress);
+
+
+ ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0);
+
+
+#if DBG
+ PointerPte = PointerBase;
+ i = NumberOfPages;
+ Page = (PULONG)(MemoryDescriptorList + 1);
+ if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) {
+ LOCK_PFN2 (OldIrql);
+ }
+
+ while (i != 0) {
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ ASSERT (*Page == PointerPte->u.Hard.PageFrameNumber);
+ if ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE ) == 0) {
+ PMMPFN Pfn3;
+ Pfn3 = MI_PFN_ELEMENT (*Page);
+ ASSERT (Pfn3->u3.e2.ReferenceCount > 1);
+ Pfn3->u3.e2.ReferenceCount -= 1;
+ ASSERT (Pfn3->u3.e2.ReferenceCount < 256);
+ }
+
+ Page += 1;
+ PointerPte++;
+ i -= 1;
+ }
+
+ if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) {
+ UNLOCK_PFN2 (OldIrql);
+ }
+#endif //DBG
+
+ MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
+ MDL_PARTIAL_HAS_BEEN_MAPPED);
+
+ MiReleaseSystemPtes (PointerBase, NumberOfPages, SystemPteSpace);
+ return;
+
+ } else {
+
+ MiUnmapLockedPagesInUserSpace (BaseAddress,
+ MemoryDescriptorList);
+ }
+}
+
+
+VOID
+MiUnmapLockedPagesInUserSpace (
+ IN PVOID BaseAddress,
+ IN PMDL MemoryDescriptorList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unmaps locked pages which were previously mapped via
+ a MmMapLockedPages function.
+
+Arguments:
+
+ BaseAddress - Supplies the base address where the pages were previously
+ mapped.
+
+ MemoryDescriptorList - Supplies a valid Memory Descriptor List which has
+ been updated by MmProbeAndLockPages.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. DISPATCH_LEVEL or below if base address is within system space;
+ APC_LEVEL or below if base address is user space.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ PULONG Page;
+ PMMPTE PointerPte;
+ PMMPTE PointerBase;
+ PMMPTE PointerPde;
+ PVOID StartingVa;
+ KIRQL OldIrql;
+ PMMVAD Vad;
+ PVOID TempVa;
+ PEPROCESS Process;
+ CSHORT PageTableOffset;
+
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+
+ StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa +
+ MemoryDescriptorList->ByteOffset);
+
+ Page = (PULONG)(MemoryDescriptorList + 1);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa,
+ MemoryDescriptorList->ByteCount);
+
+ PointerPte = MiGetPteAddress (BaseAddress);
+ PointerBase = PointerPte;
+
+ //
+ // This was mapped into the user portion of the address space and
+ // the corresponding virtual address descriptor must be deleted.
+ //
+
+ //
+ // Get the working set mutex and address creation mutex.
+ //
+
+ Process = PsGetCurrentProcess ();
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ Vad = MiLocateAddress (BaseAddress);
+ ASSERT (Vad != NULL);
+ MiRemoveVad (Vad);
+
+ //
+ // Get the PFN mutex so we can safely decrement share and valid
+ // counts on page table pages.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ do {
+
+ if (*Page == MM_EMPTY_LIST) {
+ break;
+ }
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+
+ (VOID)KeFlushSingleTb (BaseAddress,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ ZeroPte.u.Flush);
+
+ PointerPde = MiGetPteAddress(PointerPte);
+ MiDecrementShareAndValidCount (PointerPde->u.Hard.PageFrameNumber);
+
+ //
+ // Another Pte has become zero.
+ //
+
+ PageTableOffset = (CSHORT)MiGetPteOffset( PointerPte );
+ MmWorkingSetList->UsedPageTableEntries[PageTableOffset] -= 1;
+ ASSERT (MmWorkingSetList->UsedPageTableEntries[PageTableOffset]
+ < PTE_PER_PAGE);
+
+ //
+ // If all the entries have been eliminated from the previous
+ // page table page, delete the page table page itself.
+ //
+
+ if (MmWorkingSetList->UsedPageTableEntries[PageTableOffset] == 0) {
+
+ TempVa = MiGetVirtualAddressMappedByPte (PointerPde);
+ MiDeletePte (PointerPde,
+ TempVa,
+ FALSE,
+ Process,
+ (PMMPTE)NULL,
+ NULL);
+ }
+
+ Page++;
+ PointerPte++;
+ NumberOfPages -= 1;
+ BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
+ } while (NumberOfPages != 0);
+
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ ExFreePool (Vad);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
+
+
+PVOID
+MmMapIoSpace (
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN ULONG NumberOfBytes,
+ IN MEMORY_CACHING_TYPE CacheType
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps the specified physical address into the non-pageable
+ portion of the system address space.
+
+Arguments:
+
+ PhysicalAddress - Supplies the starting physical address to map.
+
+ NumberOfBytes - Supplies the number of bytes to map.
+
+ CacheType - Supplies MmNonCached if the phyiscal address is to be mapped
+ as non-cached, MmCached if the address should be cached, and
+ MmCacheFrameBuffer if the address should be cached as a frame
+ buffer. For I/O device registers, this is usually specified
+ as MmNonCached.
+
+Return Value:
+
+ Returns the virtual address which maps the specified physical addresses.
+ The value NULL is returned if sufficient virtual address space for
+ the mapping could not be found.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ ULONG PageFrameIndex;
+ PMMPTE PointerPte;
+ PVOID BaseVa;
+ MMPTE TempPte;
+ NTSTATUS Status;
+
+ //
+ // For compatibility for when CacheType used to be passed as a BOOLEAN
+ // mask off the upper bits (TRUE == MmCached, FALSE == MmNonCached).
+ //
+
+ CacheType &= 0xFF;
+
+ if (CacheType >= MmMaximumCacheType) {
+ return (NULL);
+ }
+
+#ifdef i386
+ ASSERT (PhysicalAddress.HighPart == 0);
+#endif
+#ifdef R4000
+ ASSERT (PhysicalAddress.HighPart < 16);
+#endif
+
+ //PAGED_CODE();
+
+
+ ASSERT (NumberOfBytes != 0);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart,
+ NumberOfBytes);
+
+ PointerPte = MiReserveSystemPtes(NumberOfPages,
+ SystemPteSpace,
+ MM_COLOR_ALIGNMENT,
+ (PhysicalAddress.LowPart &
+ MM_COLOR_MASK_VIRTUAL),
+ FALSE);
+ if (PointerPte == NULL) {
+ return(NULL);
+ }
+
+ BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+ BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart));
+
+ TempPte = ValidKernelPte;
+
+#ifdef i386
+ //
+ // Set physical range to proper caching type.
+ //
+
+ Status = KeSetPhysicalCacheTypeRange(
+ PhysicalAddress,
+ NumberOfBytes,
+ CacheType
+ );
+
+ //
+ // If range could not be set, determine what to do
+ //
+
+ if (!NT_SUCCESS(Status)) {
+
+ if ((Status == STATUS_NOT_SUPPORTED) &&
+ ((CacheType == MmNonCached) || (CacheType == MmCached))) {
+
+ //
+ // The range may not have been set into the proper cache
+ // type. If the range is either MmNonCached or MmCached just
+ // continue as the PTE will be marked properly.
+ //
+
+ } else if (Status == STATUS_UNSUCCESSFUL && CacheType == MmCached) {
+
+ //
+ // If setting a range to Cached was unsuccessful things are not
+ // optimal, but not fatal. The range can be returned to the
+ // caller and it will have whatever caching type it has - possibly
+ // something below fully cached.
+ //
+
+#if DBG
+ DbgPrint("MmMapIoSpace: Failed to set range to MmCached\n");
+#endif
+
+ } else {
+
+ //
+ // If there's still a problem, fail the request.
+ //
+#if DBG
+ DbgPrint("MmMapIoSpace: KeSetPhysicalCacheTypeRange failed\n");
+#endif
+
+ MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace);
+
+ return(NULL);
+ }
+ }
+#endif
+
+ if (CacheType == MmNonCached) {
+ MI_DISABLE_CACHING (TempPte);
+ }
+
+ PageFrameIndex = (ULONG)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
+
+ do {
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ PointerPte++;
+ PageFrameIndex += 1;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+
+ return BaseVa;
+}
+
+VOID
+MmUnmapIoSpace (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfBytes
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a range of physical address which were previously
+ mapped via an MmMapIoSpace function call.
+
+Arguments:
+
+ BaseAddress - Supplies the base virtual address where the physical
+ address was previously mapped.
+
+ NumberOfBytes - Supplies the number of bytes which were mapped.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ ULONG i;
+ PMMPTE FirstPte;
+
+ PAGED_CODE();
+ ASSERT (NumberOfBytes != 0);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes);
+ FirstPte = MiGetPteAddress (BaseAddress);
+ MiReleaseSystemPtes(FirstPte, NumberOfPages, SystemPteSpace);
+
+ return;
+}
+
+PVOID
+MmAllocateContiguousMemory (
+ IN ULONG NumberOfBytes,
+ IN PHYSICAL_ADDRESS HighestAcceptableAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function allocates a range of physically contiguous non-paged
+ pool. It relies on the fact that non-paged pool is built at
+ system initialization time from a contiguous range of phyiscal
+ memory. It allocates the specified size of non-paged pool and
+ then checks to ensure it is contiguous as pool expansion does
+ not maintain the contiguous nature of non-paged pool.
+
+ This routine is designed to be used by a driver's initialization
+ routine to allocate a contiguous block of physical memory for
+ issuing DMA requests from.
+
+Arguments:
+
+ NumberOfBytes - Supplies the number of bytes to allocate.
+
+ HighestAcceptableAddress - Supplies the highest physical address
+ which is valid for the allocation. For
+ example, if the device can only reference
+ phyiscal memory in the lower 16MB this
+ value would be set to 0xFFFFFF (16Mb - 1).
+
+Return Value:
+
+ NULL - a contiguous range could not be found to satisfy the request.
+
+ NON-NULL - Returns a pointer (virtual address in the nonpaged portion
+ of the system) to the allocated phyiscally contiguous
+ memory.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PVOID BaseAddress;
+ ULONG SizeInPages;
+ ULONG HighestPfn;
+ ULONG i;
+
+ PAGED_CODE();
+
+ ASSERT (NumberOfBytes != 0);
+ BaseAddress = ExAllocatePoolWithTag (NonPagedPoolCacheAligned,
+ NumberOfBytes,
+ 'mCmM');
+
+ SizeInPages = BYTES_TO_PAGES (NumberOfBytes);
+ HighestPfn = (ULONG)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
+ if (BaseAddress != NULL) {
+ if (MiCheckForContiguousMemory( BaseAddress,
+ SizeInPages,
+ HighestPfn)) {
+
+ return BaseAddress;
+ } else {
+
+ //
+ // The allocation from pool does not meet the contingious
+ // requirements. Free the page and see if any of the free
+ // pool pages meet the requirement.
+ //
+
+ ExFreePool (BaseAddress);
+ }
+ } else {
+
+ //
+ // No pool was available, return NULL.
+ //
+
+ return NULL;
+ }
+
+ if (KeGetCurrentIrql() > APC_LEVEL) {
+ return NULL;
+ }
+
+ BaseAddress = NULL;
+
+ i = 3;
+ for (; ; ) {
+ BaseAddress = MiFindContiguousMemory (HighestPfn, SizeInPages);
+ if ((BaseAddress != NULL) || (i == 0)) {
+ break;
+ }
+
+ MmDelayPageFaults = TRUE;
+
+ //
+ // Attempt to move pages to the standby list.
+ //
+
+ MiEmptyAllWorkingSets ();
+ MiFlushAllPages();
+
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &Mm30Milliseconds);
+
+ i -= 1;
+ }
+ MmDelayPageFaults = FALSE;
+ return BaseAddress;
+}
+
+PVOID
+MiFindContiguousMemory (
+ IN ULONG HighestPfn,
+ IN ULONG SizeInPages
+ )
+
+/*++
+
+Routine Description:
+
+ This function search nonpaged pool and the free, zeroed,
+ and standby lists for contiguous pages that satisfy the
+ request.
+
+Arguments:
+
+ HighestPfn - Supplies the highest acceptible physical page number.
+
+ SizeInPages - Supplies the number of pages to allocate.
+
+
+Return Value:
+
+ NULL - a contiguous range could not be found to satisfy the request.
+
+ NON-NULL - Returns a pointer (virtual address in the nonpaged portion
+ of the system) to the allocated phyiscally contiguous
+ memory.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PVOID BaseAddress = NULL;
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ PMMFREE_POOL_ENTRY FreePageInfo;
+ PLIST_ENTRY Entry;
+ ULONG start;
+ ULONG count;
+ ULONG Page;
+ ULONG found;
+ MMPTE TempPte;
+ ULONG PageColor;
+
+ PAGED_CODE ();
+
+ //
+ // A suitable pool page was not allocated via the pool allocator.
+ // Grab the pool lock and manually search of a page which meets
+ // the requirements.
+ //
+
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+ OldIrql = ExLockPool (NonPagedPool);
+
+ //
+ // Trace through the page allocators pool headers for a page which
+ // meets the requirements.
+ //
+
+ //
+ // NonPaged pool is linked together through the pages themselves.
+ //
+
+ Entry = MmNonPagedPoolFreeListHead.Flink;
+
+ while (Entry != &MmNonPagedPoolFreeListHead) {
+
+ //
+ // The list is not empty, see if this one meets the physical
+ // requirements.
+ //
+
+ FreePageInfo = CONTAINING_RECORD(Entry,
+ MMFREE_POOL_ENTRY,
+ List);
+
+ ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE);
+ if (FreePageInfo->Size >= SizeInPages) {
+
+ //
+ // This entry has sufficient space, check to see if the
+ // pages meet the pysical requirements.
+ //
+
+ if (MiCheckForContiguousMemory( Entry,
+ SizeInPages,
+ HighestPfn)) {
+
+ //
+ // These page meet the requirements, note that
+ // pages are being removed from the front of
+ // the list entry and the whole list entry
+ // will be removed and then the remainder inserted.
+ //
+
+ RemoveEntryList (&FreePageInfo->List);
+
+ //
+ // Adjust the number of free pages remaining in the pool.
+ //
+
+ MmNumberOfFreeNonPagedPool -= FreePageInfo->Size;
+ ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0);
+ NonPagedPoolDescriptor.TotalBigPages += FreePageInfo->Size;
+
+ //
+ // Mark start and end for the block at the top of the
+ // list.
+ //
+
+ Entry = PAGE_ALIGN(Entry);
+ if (MI_IS_PHYSICAL_ADDRESS(Entry)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry));
+ } else {
+ PointerPte = MiGetPteAddress(Entry);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+
+ ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ //
+ // Calculate the ending PFN address, note that since
+ // these pages are contiguous, just add to the PFN.
+ //
+
+ Pfn1 += SizeInPages - 1;
+ ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ MmAllocatedNonPagedPool += FreePageInfo->Size;
+ NonPagedPoolDescriptor.TotalBigPages += FreePageInfo->Size;
+
+ if (SizeInPages == FreePageInfo->Size) {
+
+ //
+ // Unlock the pool and return.
+ //
+ BaseAddress = (PVOID)Entry;
+ goto Done;
+ }
+
+ BaseAddress = (PVOID)((PCHAR)Entry + (SizeInPages << PAGE_SHIFT));
+
+ //
+ // Mark start and end of allocation in the PFN database.
+ //
+
+ if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress));
+ } else {
+ PointerPte = MiGetPteAddress(BaseAddress);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+
+ ASSERT (Pfn1->u3.e1.StartOfAllocation == 0);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+
+ //
+ // Calculate the ending PTE's address, can't depend on
+ // these pages being physically contiguous.
+ //
+
+ if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
+ Pfn1 += FreePageInfo->Size - (SizeInPages + 1);
+ } else {
+ PointerPte += FreePageInfo->Size - (SizeInPages + 1);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+ ASSERT (Pfn1->u3.e1.EndOfAllocation == 0);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ ASSERT (((ULONG)BaseAddress & (PAGE_SIZE -1)) == 0);
+
+ //
+ // Unlock the pool.
+ //
+
+ ExUnlockPool (NonPagedPool, OldIrql);
+
+ //
+ // Free the entry at BaseAddress back into the pool.
+ //
+
+ ExFreePool (BaseAddress);
+ BaseAddress = (PVOID)Entry;
+ goto Done1;
+ }
+ }
+ Entry = FreePageInfo->List.Flink;
+ }
+
+ //
+ // No entry was found that meets the requirements.
+ // Search the PFN database for pages that meet the
+ // requirements.
+ //
+
+ start = 0;
+ do {
+
+ count = MmPhysicalMemoryBlock->Run[start].PageCount;
+ Page = MmPhysicalMemoryBlock->Run[start].BasePage;
+
+ if ((Page <= (1 + HighestPfn - SizeInPages)) &&
+ (count >= SizeInPages)) {
+
+ //
+ // Check to see if these pages are on the right list.
+ //
+
+ found = 0;
+
+ Pfn1 = MI_PFN_ELEMENT (Page);
+ LOCK_PFN2 (OldIrql2);
+ do {
+
+ if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) ||
+ (Pfn1->u3.e1.PageLocation == FreePageList) ||
+ (Pfn1->u3.e1.PageLocation == StandbyPageList)) {
+
+ if ((Pfn1->u1.Flink != 0) && (Pfn1->u2.Blink != 0)) {
+ found += 1;
+ if (found == SizeInPages) {
+
+ //
+ // A match has been found, remove these
+ // pages, add them to the free pool and
+ // return.
+ //
+
+ Page = 1 + Page - found;
+
+ //
+ // Try to find system ptes to expand the pool into.
+ //
+
+ PointerPte = MiReserveSystemPtes (SizeInPages,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ FALSE);
+
+ if (PointerPte == NULL) {
+ UNLOCK_PFN2 (OldIrql2);
+ goto Done;
+ }
+
+ MmResidentAvailablePages -= SizeInPages;
+ MiChargeCommitmentCantExpand (SizeInPages, TRUE);
+ BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ PageColor = MI_GET_PAGE_COLOR_FROM_VA(BaseAddress);
+ TempPte = ValidKernelPte;
+ MmAllocatedNonPagedPool += SizeInPages;
+ NonPagedPoolDescriptor.TotalBigPages += SizeInPages;
+ Pfn1 = MI_PFN_ELEMENT (Page - 1);
+
+ do {
+ Pfn1 += 1;
+ if (Pfn1->u3.e1.PageLocation == StandbyPageList) {
+ MiUnlinkPageFromList (Pfn1);
+ MiRestoreTransitionPte (Page);
+ } else {
+ MiUnlinkFreeOrZeroedPage (Page);
+ }
+
+ MI_CHECK_PAGE_ALIGNMENT(Page,
+ PageColor & MM_COLOR_MASK);
+ Pfn1->u3.e1.PageColor = PageColor & MM_COLOR_MASK;
+ PageColor += 1;
+ TempPte.u.Hard.PageFrameNumber = Page;
+ *PointerPte = TempPte;
+
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ Pfn1->PteFrame = MiGetPteAddress(PointerPte)->u.Hard.PageFrameNumber;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ if (found == SizeInPages) {
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ }
+ PointerPte += 1;
+ Page += 1;
+ found -= 1;
+ } while (found);
+
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ UNLOCK_PFN2 (OldIrql2);
+ goto Done;
+ }
+ } else {
+ found = 0;
+ }
+ } else {
+ found = 0;
+ }
+ Page += 1;
+ Pfn1 += 1;
+ count -= 1;
+
+ } while (count && (Page <= HighestPfn));
+ UNLOCK_PFN2 (OldIrql2);
+ }
+ start += 1;
+ } while (start != MmPhysicalMemoryBlock->NumberOfRuns);
+
+Done:
+
+ ExUnlockPool (NonPagedPool, OldIrql);
+
+Done1:
+
+ MmUnlockPagableImageSection (ExPageLockHandle);
+ return BaseAddress;
+}
+
+VOID
+MmFreeContiguousMemory (
+ IN PVOID BaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function deallocates a range of physically contiguous non-paged
+ pool which was allocated with the MmAllocateContiguousMemory function.
+
+Arguments:
+
+ BaseAddress - Supplies the base virtual address where the physical
+ address was previously mapped.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ PAGED_CODE();
+ ExFreePool (BaseAddress);
+ return;
+}
+
+PHYSICAL_ADDRESS
+MmGetPhysicalAddress (
+ IN PVOID BaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the corresponding physical address for a
+ valid virtual address.
+
+Arguments:
+
+ BaseAddress - Supplies the virtual address for which to return the
+ physical address.
+
+Return Value:
+
+ Returns the corresponding physical address.
+
+Environment:
+
+ Kernel mode. Any IRQL level.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PHYSICAL_ADDRESS PhysicalAddress;
+
+ if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) {
+ PhysicalAddress.LowPart = MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress);
+ } else {
+
+ PointerPte = MiGetPteAddress(BaseAddress);
+
+ if (PointerPte->u.Hard.Valid == 0) {
+ KdPrint(("MM:MmGetPhysicalAddressFailed base address was %lx",
+ BaseAddress));
+ ZERO_LARGE (PhysicalAddress);
+ return PhysicalAddress;
+ }
+ PhysicalAddress.LowPart = PointerPte->u.Hard.PageFrameNumber;
+ }
+
+ PhysicalAddress.HighPart = 0;
+ PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT;
+ PhysicalAddress.LowPart += BYTE_OFFSET(BaseAddress);
+
+ return PhysicalAddress;
+}
+
+PVOID
+MmGetVirtualForPhysical (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the corresponding virtual address for a physical
+ address whose primary virtual address is in system space.
+
+Arguments:
+
+ PhysicalAddress - Supplies the physical address for which to return the
+ virtual address.
+
+Return Value:
+
+ Returns the corresponding virtual address.
+
+Environment:
+
+ Kernel mode. Any IRQL level.
+
+--*/
+
+{
+ ULONG PageFrameIndex;
+ PMMPFN Pfn;
+
+ PageFrameIndex = (ULONG)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
+
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+
+ return (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (Pfn->PteAddress) +
+ BYTE_OFFSET (PhysicalAddress.LowPart));
+}
+
+PVOID
+MmAllocateNonCachedMemory (
+ IN ULONG NumberOfBytes
+ )
+
+/*++
+
+Routine Description:
+
+ This function allocates a range of noncached memory in
+ the non-paged portion of the system address space.
+
+ This routine is designed to be used by a driver's initialization
+ routine to allocate a noncached block of virtual memory for
+ various device specific buffers.
+
+Arguments:
+
+ NumberOfBytes - Supplies the number of bytes to allocate.
+
+Return Value:
+
+ NULL - the specified request could not be satisfied.
+
+ NON-NULL - Returns a pointer (virtual address in the nonpaged portion
+ of the system) to the allocated phyiscally contiguous
+ memory.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ ULONG NumberOfPages;
+ ULONG PageFrameIndex;
+ PVOID BaseAddress;
+ KIRQL OldIrql;
+
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+
+ ASSERT (NumberOfBytes != 0);
+
+ //
+ // Acquire the PFN mutex to synchronize access to the pfn database.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Obtain enough pages to contain the allocation.
+ // the system PTE pool. The system PTE pool contains non-paged PTEs
+ // which are currently empty.
+ //
+
+ NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
+
+ //
+ // Check to make sure the phyiscal pages are available.
+ //
+
+ if (MmResidentAvailablePages <= (LONG)NumberOfPages) {
+ BaseAddress = NULL;
+ goto Done;
+ }
+
+ PointerPte = MiReserveSystemPtes (NumberOfPages,
+ SystemPteSpace,
+ 0,
+ 0,
+ FALSE);
+ if (PointerPte == NULL) {
+ BaseAddress = NULL;
+ goto Done;
+ }
+
+ MmResidentAvailablePages -= (LONG)NumberOfPages;
+ MiChargeCommitmentCantExpand (NumberOfPages, TRUE);
+
+ BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ do {
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ MM_READWRITE,
+ PointerPte);
+
+ MI_SET_PTE_DIRTY (TempPte);
+ MI_DISABLE_CACHING (TempPte);
+ *PointerPte = TempPte;
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+ PointerPte += 1;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+
+ //
+ // Flush any data for this page out of the dcaches.
+ //
+
+ KeSweepDcache (TRUE);
+
+Done:
+ UNLOCK_PFN (OldIrql);
+ MmUnlockPagableImageSection (ExPageLockHandle);
+
+ return BaseAddress;
+}
+
+VOID
+MmFreeNonCachedMemory (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfBytes
+ )
+
+/*++
+
+Routine Description:
+
+ This function deallocates a range of noncached memory in
+ the non-paged portion of the system address space.
+
+Arguments:
+
+ BaseAddress - Supplies the base virtual address where the noncached
+ memory resides.
+
+ NumberOfBytes - Supplies the number of bytes allocated to the requst.
+ This must be the same number that was obtained with
+ the MmAllocateNonCachedMemory call.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ ULONG NumberOfPages;
+ ULONG i;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+
+ ASSERT (NumberOfBytes != 0);
+ ASSERT (PAGE_ALIGN (BaseAddress) == BaseAddress);
+ MI_MAKING_MULTIPLE_PTES_INVALID (TRUE);
+
+ NumberOfPages = BYTES_TO_PAGES(NumberOfBytes);
+
+ PointerPte = MiGetPteAddress (BaseAddress);
+
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+
+ LOCK_PFN (OldIrql);
+
+ i = NumberOfPages;
+
+ do {
+
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn1->u2.ShareCount == 1);
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (PageFrameIndex);
+ PointerPte += 1;
+ i -= 1;
+ } while (i != 0);
+
+ PointerPte -= NumberOfPages;
+
+ MiReleaseSystemPtes (PointerPte, NumberOfPages, SystemPteSpace);
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ MmResidentAvailablePages += NumberOfPages;
+ MiReturnCommitment (NumberOfPages);
+
+ UNLOCK_PFN (OldIrql);
+
+ MmUnlockPagableImageSection (ExPageLockHandle);
+ return;
+}
+
+ULONG
+MmSizeOfMdl (
+ IN PVOID Base,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the number of bytes required for an MDL for a
+ given buffer and size.
+
+Arguments:
+
+ Base - Supplies the base virtual address for the buffer.
+
+ Length - Supplies the size of the buffer in bytes.
+
+Return Value:
+
+ Returns the number of bytes required to contain the MDL.
+
+Environment:
+
+ Kernel mode. Any IRQL level.
+
+--*/
+
+{
+ return( sizeof( MDL ) +
+ (ADDRESS_AND_SIZE_TO_SPAN_PAGES( Base, Length ) *
+ sizeof( ULONG ))
+ );
+}
+
+
+
+PMDL
+MmCreateMdl (
+ IN PMDL MemoryDescriptorList OPTIONAL,
+ IN PVOID Base,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function optionally allocates and initializes an MDL.
+
+Arguments:
+
+ MemoryDescriptorList - Optionally supplies the address of the MDL
+ to initialize. If this address is supplied as NULL an MDL is
+ allocated from non-paged pool and initialized.
+
+ Base - Supplies the base virtual address for the buffer.
+
+ Length - Supplies the size of the buffer in bytes.
+
+Return Value:
+
+ Returns the address of the initialized MDL.
+
+Environment:
+
+ Kernel mode, IRQL of DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ ULONG MdlSize;
+
+ MdlSize = MmSizeOfMdl( Base, Length );
+
+ if (!ARGUMENT_PRESENT( MemoryDescriptorList )) {
+ MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag (
+ NonPagedPoolMustSucceed,
+ MdlSize,
+ 'ldmM');
+ }
+
+ MmInitializeMdl( MemoryDescriptorList, Base, Length );
+ return ( MemoryDescriptorList );
+}
+
+BOOLEAN
+MmSetAddressRangeModified (
+ IN PVOID Address,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets the modified bit in the PFN database for the
+ pages that correspond to the specified address range.
+
+ Note that the dirty bit in the PTE is cleared by this operation.
+
+Arguments:
+
+ Address - Supplies the address of the start of the range. This
+ range must reside within the system cache.
+
+ Length - Supplies the length of the range.
+
+Return Value:
+
+ TRUE if at least one PTE was dirty in the range, FALSE otherwise.
+
+Environment:
+
+ Kernel mode. APC_LEVEL and below for pageable addresses,
+ DISPATCH_LEVEL and below for non-pageable addresses.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPFN Pfn1;
+ PMMPTE FlushPte;
+ MMPTE PteContents;
+ MMPTE FlushContents;
+ KIRQL OldIrql;
+ PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT];
+ ULONG Count = 0;
+ BOOLEAN Result = FALSE;
+
+ //
+ // Loop on the copy on write case until the page is only
+ // writable.
+ //
+
+ PointerPte = MiGetPteAddress (Address);
+ LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + Length - 1));
+
+ LOCK_PFN2 (OldIrql);
+
+ do {
+
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.Modified = 1;
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+#ifdef NT_UP
+ //
+ // On uniprocessor systems no need to flush if this processor
+ // doesn't think the PTE is dirty.
+ //
+
+ if (MI_IS_PTE_DIRTY (PteContents)) {
+ Result = TRUE;
+#else //NT_UP
+ Result |= (BOOLEAN)(MI_IS_PTE_DIRTY (PteContents));
+#endif //NT_UP
+ MI_SET_PTE_CLEAN (PteContents);
+ *PointerPte = PteContents;
+ FlushContents = PteContents;
+ FlushPte = PointerPte;
+
+ //
+ // Clear the write bit in the PTE so new writes can be tracked.
+ //
+
+ if (Count != MM_MAXIMUM_FLUSH_COUNT) {
+ VaFlushList[Count] = Address;
+ Count += 1;
+ }
+#ifdef NT_UP
+ }
+#endif //NT_UP
+ }
+ PointerPte += 1;
+ Address = (PVOID)((PCHAR)Address + PAGE_SIZE);
+ } while (PointerPte <= LastPte);
+
+ if (Count != 0) {
+ if (Count == 1) {
+
+ (VOID)KeFlushSingleTb (VaFlushList[0],
+ FALSE,
+ TRUE,
+ (PHARDWARE_PTE)FlushPte,
+ FlushContents.u.Flush);
+
+ } else if (Count != MM_MAXIMUM_FLUSH_COUNT) {
+
+ KeFlushMultipleTb (Count,
+ &VaFlushList[0],
+ FALSE,
+ TRUE,
+ NULL,
+ ZeroPte.u.Flush);
+
+ } else {
+ KeFlushEntireTb (FALSE, TRUE);
+ }
+ }
+ UNLOCK_PFN2 (OldIrql);
+ return Result;
+}
+
+
+BOOLEAN
+MiCheckForContiguousMemory (
+ IN PVOID BaseAddress,
+ IN ULONG SizeInPages,
+ IN ULONG HighestPfn
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the physical memory mapped
+ by the specified BaseAddress for the specified size is
+ contiguous and the last page of the physical memory is
+ less than or equal to the specified HighestPfn.
+
+Arguments:
+
+ BaseAddress - Supplies the base address to start checking at.
+
+ SizeInPages - Supplies the number of pages in the range.
+
+ HighestPfn - Supplies the highest PFN acceptable as a physical page.
+
+Return Value:
+
+ Returns TRUE if the physical memory is contiguous and less than
+ or equal to the HighestPfn, FALSE otherwise.
+
+Environment:
+
+ Kernel mode, memory mangement internal.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ ULONG PageFrameIndex;
+
+ if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) {
+ if (HighestPfn >=
+ (MI_CONVERT_PHYSICAL_TO_PFN(BaseAddress) + SizeInPages - 1)) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+ } else {
+ PointerPte = MiGetPteAddress (BaseAddress);
+ LastPte = PointerPte + SizeInPages;
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber + 1;
+ PointerPte += 1;
+
+ //
+ // Check to see if the range of physical addresses is contiguous.
+ //
+
+ while (PointerPte < LastPte) {
+ if (PointerPte->u.Hard.PageFrameNumber != PageFrameIndex) {
+
+ //
+ // Memory is not physically contiguous.
+ //
+
+ return FALSE;
+ }
+ PageFrameIndex += 1;
+ PointerPte += 1;
+ }
+ }
+
+ if (PageFrameIndex <= HighestPfn) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+VOID
+MmLockPagableSectionByHandle (
+ IN PVOID ImageSectionHandle
+ )
+
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the specified pages are resident in
+ the process's working set and if so the reference count for the
+ page is incremented. The allows the virtual address to be accessed
+ without getting a hard page fault (have to go to the disk... except
+ for extremely rare case when the page table page is removed from the
+ working set and migrates to the disk.
+
+ If the virtual address is that of the system wide global "cache" the
+ virtual adderss of the "locked" pages is always guarenteed to
+ be valid.
+
+ NOTE: This routine is not to be used for general locking of user
+ addresses - use MmProbeAndLockPages. This routine is intended for
+ well behaved system code like the file system caches which allocates
+ virtual addresses for mapping files AND guarantees that the mapping
+ will not be modified (deleted or changed) while the pages are locked.
+
+Arguments:
+
+ ImageSectionHandle - Supplies the value returned by a previous call
+ to MmLockPagableDataSection. This is a pointer to the Section
+ header for the image.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ PIMAGE_SECTION_HEADER NtSection;
+ PVOID BaseAddress;
+ ULONG SizeToLock;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ KIRQL OldIrql;
+ ULONG Collision;
+
+ if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
+
+ //
+ // No need to lock physical addresses.
+ //
+
+ return;
+ }
+
+ NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
+
+ BaseAddress = (PVOID)NtSection->PointerToLinenumbers;
+
+ ASSERT ((BaseAddress < (PVOID)MM_SYSTEM_CACHE_START) ||
+ (BaseAddress >= (PVOID)MM_SYSTEM_CACHE_END));
+ ASSERT (BaseAddress >= (PVOID)MM_SYSTEM_RANGE_START);
+
+ SizeToLock = NtSection->SizeOfRawData;
+ PointerPte = MiGetPteAddress(BaseAddress);
+ LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToLock - 1);
+
+ ASSERT (SizeToLock != 0);
+
+ //
+ // The address must be within the system space.
+ //
+
+RetryLock:
+
+ LOCK_PFN2 (OldIrql);
+
+ MiMakeSystemAddressValidPfn (&NtSection->NumberOfLinenumbers);
+
+ //
+ // The NumberOfLinenumbers field is used to store the
+ // lock count.
+ //
+ // Value of 0 means unlocked,
+ // Value of 1 means lock in progress by another thread.
+ // Value of 2 or more means locked.
+ //
+ // If the value is 1, this thread must block until the other thread's
+ // lock operation is complete.
+ //
+
+ NtSection->NumberOfLinenumbers += 1;
+
+ if (NtSection->NumberOfLinenumbers >= 3) {
+
+ //
+ // Already locked, increment counter and return.
+ //
+
+ UNLOCK_PFN2 (OldIrql);
+ return;
+
+ }
+
+ if (NtSection->NumberOfLinenumbers == 2) {
+
+ //
+ // A lock is in progress.
+ // Reset to back to 1 and wait.
+ //
+
+ NtSection->NumberOfLinenumbers = 1;
+ MmCollidedLockWait = TRUE;
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT (OldIrql);
+
+ KeWaitForSingleObject(&MmCollidedLockEvent,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+ goto RetryLock;
+ }
+
+ //
+ // Value was 0 when the lock was obtained. It is now 1 indicating
+ // a lock is in progress.
+ //
+
+ MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
+
+ //
+ // Set lock count to 2 (it was 1 when this started) and check
+ // to see if any other threads tried to lock while this was happening.
+ //
+
+ MiMakeSystemAddressValidPfn (&NtSection->NumberOfLinenumbers);
+ NtSection->NumberOfLinenumbers += 1;
+
+ ASSERT (NtSection->NumberOfLinenumbers == 2);
+
+ Collision = MmCollidedLockWait;
+ MmCollidedLockWait = FALSE;
+
+ UNLOCK_PFN2 (OldIrql);
+
+ if (Collision) {
+
+ //
+ // Wake up all waiters.
+ //
+
+ KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
+ }
+
+ return;
+}
+
+
+VOID
+MiLockCode (
+ IN PMMPTE FirstPte,
+ IN PMMPTE LastPte,
+ IN ULONG LockType
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the specified pages are resident in
+ the process's working set and if so the reference count for the
+ page is incremented. The allows the virtual address to be accessed
+ without getting a hard page fault (have to go to the disk... except
+ for extremely rare case when the page table page is removed from the
+ working set and migrates to the disk.
+
+ If the virtual address is that of the system wide global "cache" the
+ virtual adderss of the "locked" pages is always guarenteed to
+ be valid.
+
+ NOTE: This routine is not to be used for general locking of user
+ addresses - use MmProbeAndLockPages. This routine is intended for
+ well behaved system code like the file system caches which allocates
+ virtual addresses for mapping files AND guarantees that the mapping
+ will not be modified (deleted or changed) while the pages are locked.
+
+Arguments:
+
+ FirstPte - Supplies the base address to begin locking.
+
+ LastPte - The last PTE to lock.
+
+ LockType - Supplies either MM_LOCK_BY_REFCOUNT or MM_LOCK_NONPAGE.
+ LOCK_BY_REFCOUNT increments the reference count to keep
+ the page in memory, LOCK_NONPAGE removes the page from
+ the working set so it's locked just like nonpaged pool.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN LOCK held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ MMPTE PteContents;
+ ULONG WorkingSetIndex;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql1;
+ KIRQL OldIrql;
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT (!MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(FirstPte)));
+ PointerPte = FirstPte;
+
+ MmLockedCode += 1 + LastPte - FirstPte;
+
+ do {
+
+ PteContents = *PointerPte;
+ ASSERT (PteContents.u.Long != ZeroKernelPte.u.Long);
+ if (PteContents.u.Hard.Valid == 0) {
+
+ ASSERT (PteContents.u.Soft.Prototype != 1);
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ PageFrameIndex = PteContents.u.Trans.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ if ((Pfn1->u3.e1.ReadInProgress) ||
+ (Pfn1->u3.e1.InPageError)) {
+
+ //
+ // Page read is ongoing, wait for the read to
+ // complete then retest.
+ //
+
+ OldIrql = APC_LEVEL;
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT (OldIrql);
+ KeWaitForSingleObject( Pfn1->u1.Event,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ //
+ // Need to delay so the faulting thread can
+ // perform the inpage completion.
+ //
+
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &MmShortTime);
+
+ LOCK_PFN (OldIrql);
+ continue;
+ }
+
+ MiUnlinkPageFromList (Pfn1);
+
+ //
+ // Set the reference count and share counts to 1.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ Pfn1->OriginalPte.u.Soft.Protection,
+ PointerPte );
+
+ *PointerPte = TempPte;
+
+ //
+ // Increment the reference count one for putting it the
+ // working set list and one for locking it for I/O.
+ //
+
+ if (LockType == MM_LOCK_BY_REFCOUNT) {
+
+ //
+ // Lock the page in the working set by upping the
+ // refernece count.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+
+ UNLOCK_PFN (APC_LEVEL);
+ LOCK_SYSTEM_WS (OldIrql);
+ WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs);
+
+ MiUpdateWsle (&WorkingSetIndex,
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ MmSystemCacheWorkingSetList,
+ Pfn1);
+ UNLOCK_SYSTEM_WS (OldIrql);
+ LOCK_PFN (OldIrql);
+ } else {
+
+ //
+ // Set the wsindex field to zero, indicating that the
+ // page is not in the system working set.
+ //
+
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ }
+
+ } else {
+
+ //
+ // Page is not in memory.
+ //
+
+ MiMakeSystemAddressValidPfn (
+ MiGetVirtualAddressMappedByPte(PointerPte));
+
+ continue;
+ }
+
+ } else {
+
+ //
+ // This address is already in the system working set.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ //
+ // Up the reference count so the page cannot be released.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ if (LockType != MM_LOCK_BY_REFCOUNT) {
+
+ //
+ // If the page is in the system working set, remove it.
+ // The system working set lock MUST be owned to check to
+ // see if this page is in the working set or not. This
+ // is because the pager may have just release the PFN lock,
+ // acquired the system lock and is now trying to add the
+ // page to the system working set.
+ //
+
+ UNLOCK_PFN (APC_LEVEL);
+ LOCK_SYSTEM_WS (OldIrql1);
+
+ if (Pfn1->u1.WsIndex != 0) {
+ MiRemoveWsle (Pfn1->u1.WsIndex,
+ MmSystemCacheWorkingSetList );
+ MiReleaseWsle (Pfn1->u1.WsIndex, &MmSystemCacheWs);
+ Pfn1->u1.WsIndex = 0;
+ }
+ UNLOCK_SYSTEM_WS (OldIrql1);
+ LOCK_PFN (OldIrql);
+ ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
+ Pfn1->u3.e2.ReferenceCount -= 1;
+ }
+ }
+
+ PointerPte += 1;
+ } while (PointerPte <= LastPte);
+
+ return;
+}
+
+
+PVOID
+MmLockPagableDataSection(
+ IN PVOID AddressWithinSection
+ )
+
+/*++
+
+Routine Description:
+
+ This functions locks the entire section that contains the specified
+ section in memory. This allows pagable code to be brought into
+ memory and to be used as if the code was not really pagable. This
+ should not be done with a high degree of frequency.
+
+Arguments:
+
+ AddressWithinSection - Supplies the address of a function
+ contained within a section that should be brought in and locked
+ in memory.
+
+Return Value:
+
+ This function returns a value to be used in a subsequent call to
+ MmUnlockPagableImageSection.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG i;
+ PIMAGE_NT_HEADERS NtHeaders;
+ PIMAGE_SECTION_HEADER NtSection;
+ PIMAGE_SECTION_HEADER FoundSection;
+ ULONG Rva;
+
+ PAGED_CODE();
+
+ if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) {
+
+ //
+ // Physical address, just return that as the handle.
+ //
+
+ return AddressWithinSection;
+ }
+
+ //
+ // Search the loaded module list for the data table entry that describes
+ // the DLL that was just unloaded. It is possible an entry is not in the
+ // list if a failure occured at a point in loading the DLL just before
+ // the data table entry was generated.
+ //
+
+ FoundSection = NULL;
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceShared (&PsLoadedModuleResource, TRUE);
+
+ DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE);
+
+ Rva = (ULONG)((PUCHAR)AddressWithinSection - (ULONG)DataTableEntry->DllBase);
+
+ NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase);
+
+ NtSection = (PIMAGE_SECTION_HEADER)((ULONG)NtHeaders +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeaders->FileHeader.SizeOfOptionalHeader
+ );
+
+ for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i++) {
+
+ if ( Rva >= NtSection->VirtualAddress &&
+ Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) {
+ FoundSection = NtSection;
+
+ if (NtSection->PointerToLinenumbers != (ULONG)((PUCHAR)DataTableEntry->DllBase +
+ NtSection->VirtualAddress)) {
+
+ //
+ // Stomp on the PointerToLineNumbers field so that it contains
+ // the Va of this section and NumberOFLinenumbers so it contains
+ // the Lock Count for the section.
+ //
+
+ NtSection->PointerToLinenumbers = (ULONG)((PUCHAR)DataTableEntry->DllBase +
+ NtSection->VirtualAddress);
+ NtSection->NumberOfLinenumbers = 0;
+ }
+
+ //
+ // Now lock in the code
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_LOCK_CODE) {
+ DbgPrint("MM Lock %wZ %8s 0x%08x -> 0x%8x : 0x%08x %3ld.\n",
+ &DataTableEntry->BaseDllName,
+ NtSection->Name,
+ AddressWithinSection,
+ NtSection,
+ NtSection->PointerToLinenumbers,
+ NtSection->NumberOfLinenumbers);
+ }
+#endif //DBG
+
+ MmLockPagableSectionByHandle ((PVOID)NtSection);
+
+ goto found_the_section;
+ }
+ NtSection++;
+ }
+
+found_the_section:
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+ if (!FoundSection) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x1234,
+ (ULONG)AddressWithinSection,
+ 0,
+ 0);
+ }
+ return (PVOID)FoundSection;
+}
+
+
+PLDR_DATA_TABLE_ENTRY
+MiLookupDataTableEntry (
+ IN PVOID AddressWithinSection,
+ IN ULONG ResourceHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This functions locks the entire section that contains the specified
+ section in memory. This allows pagable code to be brought into
+ memory and to be used as if the code was not really pagable. This
+ should not be done with a high degree of frequency.
+
+Arguments:
+
+ AddressWithinSection - Supplies the address of a function
+ contained within a section that should be brought in and locked
+ in memory.
+
+ ResourceHeld - Supplies true if the data table resource lock is
+ already held.
+
+Return Value:
+
+ This function returns a value to be used in a subsequent call to
+ MmUnlockPagableImageSection.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PLDR_DATA_TABLE_ENTRY FoundEntry = NULL;
+ PLIST_ENTRY NextEntry;
+
+ PAGED_CODE();
+
+ //
+ // Search the loaded module list for the data table entry that describes
+ // the DLL that was just unloaded. It is possible an entry is not in the
+ // list if a failure occured at a point in loading the DLL just before
+ // the data table entry was generated.
+ //
+
+ if (!ResourceHeld) {
+ KeEnterCriticalRegion();
+ ExAcquireResourceShared (&PsLoadedModuleResource, TRUE);
+ }
+
+ NextEntry = PsLoadedModuleList.Flink;
+ do {
+
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ //
+ // Locate the loaded module that contains this address.
+ //
+
+ if ( AddressWithinSection >= DataTableEntry->DllBase &&
+ AddressWithinSection < (PVOID)((PUCHAR)DataTableEntry->DllBase+DataTableEntry->SizeOfImage) ) {
+
+ FoundEntry = DataTableEntry;
+ break;
+ }
+
+ NextEntry = NextEntry->Flink;
+ } while (NextEntry != &PsLoadedModuleList);
+
+ if (!ResourceHeld) {
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+ }
+ return FoundEntry;
+}
+
+VOID
+MmUnlockPagableImageSection(
+ IN PVOID ImageSectionHandle
+ )
+
+/*++
+
+Routine Description:
+
+ This function unlocks from memory, the pages locked by a preceding call to
+ MmLockPagableDataSection.
+
+Arguments:
+
+ ImageSectionHandle - Supplies the value returned by a previous call
+ to MmLockPagableDataSection.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PIMAGE_SECTION_HEADER NtSection;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ KIRQL OldIrql;
+ PVOID BaseAddress;
+ ULONG SizeToUnlock;
+ ULONG Collision;
+
+ if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) {
+
+ //
+ // No need to lock physical addresses.
+ //
+
+ return;
+ }
+
+ NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle;
+
+ BaseAddress = (PVOID)NtSection->PointerToLinenumbers;
+ SizeToUnlock = NtSection->SizeOfRawData;
+
+ //DbgPrint("MM Unlock %s 0x%08x\n",NtSection->Name,NtSection->PointerToLinenumbers);
+
+ PointerPte = MiGetPteAddress(BaseAddress);
+ LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToUnlock - 1);
+
+ //
+ // Address must be within the system cache.
+ //
+
+ LOCK_PFN2 (OldIrql);
+
+ //
+ // The NumberOfLinenumbers field is used to store the
+ // lock count.
+ //
+
+ ASSERT (NtSection->NumberOfLinenumbers >= 2);
+ NtSection->NumberOfLinenumbers -= 1;
+
+ if (NtSection->NumberOfLinenumbers != 1) {
+ UNLOCK_PFN2 (OldIrql);
+ return;
+ }
+
+ do {
+
+#if DBG
+ { PMMPFN Pfn;
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn->u3.e2.ReferenceCount > 1);
+ }
+#endif //DBG
+
+ MiDecrementReferenceCount (PointerPte->u.Hard.PageFrameNumber);
+ PointerPte += 1;
+ } while (PointerPte <= LastPte);
+
+ NtSection->NumberOfLinenumbers -= 1;
+ ASSERT (NtSection->NumberOfLinenumbers == 0);
+ Collision = MmCollidedLockWait;
+ MmCollidedLockWait = FALSE;
+ MmLockedCode -= SizeToUnlock;
+
+ UNLOCK_PFN2 (OldIrql);
+
+ if (Collision) {
+ KePulseEvent (&MmCollidedLockEvent, 0, FALSE);
+ }
+
+ return;
+}
+
+
+BOOLEAN
+MmIsRecursiveIoFault(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines the thread's page fault clustering information
+ and determines if the current page fault is occuring during an I/O
+ operation.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Returns TRUE if the fault is occuring during an I/O operation,
+ FALSE otherwise.
+
+--*/
+
+{
+ return PsGetCurrentThread()->DisablePageFaultClustering |
+ PsGetCurrentThread()->ForwardClusterOnly;
+}
+
+
+VOID
+MmMapMemoryDumpMdl(
+ IN OUT PMDL MemoryDumpMdl
+ )
+
+/*++
+
+Routine Description:
+
+ For use by crash dump routine ONLY. Maps an MDL into a fixed
+ portion of the address space. Only 1 mdl can be mapped at a
+ time.
+
+Arguments:
+
+ MemoryDumpMdl - Supplies the MDL to map.
+
+Return Value:
+
+ None, fields in MDL updated.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ PMMPTE PointerPte;
+ PCHAR BaseVa;
+ MMPTE TempPte;
+ PULONG Page;
+
+ NumberOfPages = BYTES_TO_PAGES (MemoryDumpMdl->ByteCount + MemoryDumpMdl->ByteOffset);
+
+ PointerPte = MmCrashDumpPte;
+ BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte);
+ MemoryDumpMdl->MappedSystemVa = (PCHAR)BaseVa + MemoryDumpMdl->ByteOffset;
+ TempPte = ValidKernelPte;
+ Page = (PULONG)(MemoryDumpMdl + 1);
+
+ do {
+
+ KiFlushSingleTb (TRUE, BaseVa);
+ ASSERT ((*Page <= MmHighestPhysicalPage) &&
+ (*Page >= MmLowestPhysicalPage));
+
+ TempPte.u.Hard.PageFrameNumber = *Page;
+ *PointerPte = TempPte;
+
+ Page++;
+ PointerPte++;
+ BaseVa += PAGE_SIZE;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+
+ PointerPte->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+ return;
+}
+
+
+NTSTATUS
+MmSetBankedSection (
+ IN HANDLE ProcessHandle,
+ IN PVOID VirtualAddress,
+ IN ULONG BankLength,
+ IN BOOLEAN ReadWriteBank,
+ IN PBANKED_SECTION_ROUTINE BankRoutine,
+ IN PVOID Context
+ )
+
+/*++
+
+Routine Description:
+
+ This function declares a mapped video buffer as a banked
+ section. This allows banked video devices (i.e., even
+ though the video controller has a megabyte or so of memory,
+ only a small bank (like 64k) can be mapped at any one time.
+
+ In order to overcome this problem, the pager handles faults
+ to this memory, unmaps the current bank, calls off to the
+ video driver and then maps in the new bank.
+
+ This function creates the neccessary structures to allow the
+ video driver to be called from the pager.
+
+ ********************* NOTE NOTE NOTE *************************
+ At this time only read/write banks are supported!
+
+Arguments:
+
+ ProcessHandle - Supplies a handle to the process in which to
+ support the banked video function.
+
+ VirtualAddress - Supplies the virtual address where the video
+ buffer is mapped in the specified process.
+
+ BankLength - Supplies the size of the bank.
+
+ ReadWriteBank - Supplies TRUE if the bank is read and write.
+
+ BankRoutine - Supplies a pointer to the routine that should be
+ called by the pager.
+
+ Context - Supplies a context to be passed by the pager to the
+ BankRoutine.
+
+Return Value:
+
+ Returns the status of the function.
+
+Environment:
+
+ Kernel mode, APC_LEVEL or below.
+
+--*/
+
+{
+ NTSTATUS Status;
+ PEPROCESS Process;
+ PMMVAD Vad;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE TempPte;
+ ULONG size;
+ LONG count;
+ ULONG NumberOfPtes;
+ PMMBANKED_SECTION Bank;
+
+ PAGED_CODE ();
+
+ //
+ // Reference the specified process handle for VM_OPERATION access.
+ //
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ KernelMode,
+ (PVOID *)&Process,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ KeAttachProcess (&Process->Pcb);
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked. Block APCs so an APC which takes a page
+ // fault does not corrupt various structures.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ Vad = MiLocateAddress (VirtualAddress);
+
+ if ((Vad == NULL) ||
+ (Vad->StartingVa != VirtualAddress) ||
+ (Vad->u.VadFlags.PhysicalMapping == 0)) {
+ Status = STATUS_NOT_MAPPED_DATA;
+ goto ErrorReturn;
+ }
+
+ size = 1 + (ULONG)Vad->EndingVa - (ULONG)Vad->StartingVa;
+ if ((size % BankLength) != 0) {
+ Status = STATUS_INVALID_VIEW_SIZE;
+ goto ErrorReturn;
+ }
+
+ count = -1;
+ NumberOfPtes = BankLength;
+
+ do {
+ NumberOfPtes = NumberOfPtes >> 1;
+ count += 1;
+ } while (NumberOfPtes != 0);
+
+ //
+ // Turn VAD into Banked VAD
+ //
+
+ NumberOfPtes = BankLength >> PAGE_SHIFT;
+
+ Bank = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof (MMBANKED_SECTION) +
+ (NumberOfPtes - 1) * sizeof(MMPTE),
+ ' mM');
+ if (Bank == NULL) {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn;
+ }
+
+ Bank->BankShift = PTE_SHIFT + count - PAGE_SHIFT;
+
+ PointerPte = MiGetPteAddress(Vad->StartingVa);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+
+ Vad->Banked = Bank;
+ Bank->BasePhysicalPage = PointerPte->u.Hard.PageFrameNumber;
+ Bank->BasedPte = PointerPte;
+ Bank->BankSize = BankLength;
+ Bank->BankedRoutine = BankRoutine;
+ Bank->Context = Context;
+ Bank->CurrentMappedPte = PointerPte;
+
+ //
+ // Build template PTEs the structure.
+ //
+
+ count = 0;
+ TempPte = ZeroPte;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ Bank->BasePhysicalPage,
+ MM_READWRITE,
+ PointerPte);
+
+ if (TempPte.u.Hard.Write) {
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+
+ do {
+ Bank->BankTemplate[count] = TempPte;
+ TempPte.u.Hard.PageFrameNumber += 1;
+ count += 1;
+ } while ((ULONG)count < NumberOfPtes );
+
+ LastPte = MiGetPteAddress (Vad->EndingVa);
+
+ //
+ // Set all PTEs within this range to zero. Any faults within
+ // this range will call the banked routine before making the
+ // page valid.
+ //
+
+ RtlFillMemory (PointerPte,
+ (size >> (PAGE_SHIFT - PTE_SHIFT)),
+ (UCHAR)ZeroPte.u.Long);
+
+ MiFlushTb ();
+
+ Status = STATUS_SUCCESS;
+ErrorReturn:
+
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ KeDetachProcess();
+ return Status;
+}
+
+PVOID
+MmMapVideoDisplay (
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN ULONG NumberOfBytes,
+ IN MEMORY_CACHING_TYPE CacheType
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps the specified physical address into the non-pageable
+ portion of the system address space.
+
+Arguments:
+
+ PhysicalAddress - Supplies the starting physical address to map.
+
+ NumberOfBytes - Supplies the number of bytes to map.
+
+ CacheType - Supplies MmNonCached if the phyiscal address is to be mapped
+ as non-cached, MmCached if the address should be cached, and
+ MmCacheFrameBuffer if the address should be cached as a frame
+ buffer. For I/O device registers, this is usually specified
+ as MmNonCached.
+
+Return Value:
+
+ Returns the virtual address which maps the specified physical addresses.
+ The value NULL is returned if sufficient virtual address space for
+ the mapping could not be found.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ ULONG NumberOfPages;
+ ULONG PageFrameIndex;
+ PMMPTE PointerPte = NULL;
+ PVOID BaseVa;
+ MMPTE TempPte;
+#ifdef LARGE_PAGES
+ ULONG size;
+ PMMPTE protoPte;
+ PMMPTE largePte;
+ ULONG pageSize;
+ PSUBSECTION Subsection;
+ ULONG Alignment;
+#endif LARGE_PAGES
+ ULONG LargePages = FALSE;
+
+#ifdef i386
+ ASSERT (PhysicalAddress.HighPart == 0);
+#endif
+#ifdef R4000
+ ASSERT (PhysicalAddress.HighPart < 16);
+#endif
+
+ PAGED_CODE();
+
+ ASSERT (NumberOfBytes != 0);
+
+#ifdef LARGE_PAGES
+ NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart,
+ NumberOfBytes);
+
+ TempPte = ValidKernelPte;
+ MI_DISABLE_CACHING (TempPte);
+ PageFrameIndex = (ULONG)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ if ((NumberOfBytes > X64K) && (!MmLargeVideoMapped)) {
+ size = (NumberOfBytes - 1) >> (PAGE_SHIFT + 1);
+ pageSize = PAGE_SIZE;
+
+ while (size != 0) {
+ size = size >> 2;
+ pageSize = pageSize << 2;
+ }
+
+ Alignment = pageSize << 1;
+ if (Alignment < MM_VA_MAPPED_BY_PDE) {
+ Alignment = MM_VA_MAPPED_BY_PDE;
+ }
+ NumberOfPages = Alignment >> PAGE_SHIFT;
+ PointerPte = MiReserveSystemPtes(NumberOfPages,
+ SystemPteSpace,
+ Alignment,
+ 0,
+ FALSE);
+ protoPte = ExAllocatePoolWithTag (PagedPool,
+ sizeof (MMPTE),
+ 'bSmM');
+ if ((PointerPte != NULL) && (protoPte != NULL)) {
+
+ RtlFillMemoryUlong (PointerPte,
+ Alignment >> (PAGE_SHIFT - PTE_SHIFT),
+ MM_ZERO_KERNEL_PTE);
+
+ //
+ // Build large page descriptor and fill in all the PTEs.
+ //
+
+ Subsection = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ sizeof(SUBSECTION) + (4 * sizeof(MMPTE)),
+ 'bSmM');
+
+ Subsection->StartingSector = pageSize;
+ Subsection->EndingSector = (ULONG)NumberOfPages;
+ Subsection->u.LongFlags = 0;
+ Subsection->u.SubsectionFlags.LargePages = 1;
+ Subsection->u.SubsectionFlags.Protection = MM_READWRITE | MM_NOCACHE;
+ Subsection->PtesInSubsection = Alignment;
+ Subsection->SubsectionBase = PointerPte;
+
+ largePte = (PMMPTE)(Subsection + 1);
+
+ //
+ // Build the first 2 ptes as entries for the TLB to
+ // map the specified physical address.
+ //
+
+ *largePte = TempPte;
+ largePte += 1;
+
+ if (NumberOfBytes > pageSize) {
+ *largePte = TempPte;
+ largePte->u.Hard.PageFrameNumber += (pageSize >> PAGE_SHIFT);
+ } else {
+ *largePte = ZeroKernelPte;
+ }
+
+ //
+ // Build the first prototype PTE as a paging file format PTE
+ // referring to the subsection.
+ //
+
+ protoPte->u.Long = (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ protoPte->u.Soft.Prototype = 1;
+ protoPte->u.Soft.Protection = MM_READWRITE | MM_NOCACHE;
+
+ //
+ // Set the PTE up for all the user's PTE entries, proto pte
+ // format pointing to the 3rd prototype PTE.
+ //
+
+ TempPte.u.Long = MiProtoAddressForPte (protoPte);
+ MI_SET_GLOBAL_STATE (TempPte, 1);
+ LargePages = TRUE;
+ MmLargeVideoMapped = TRUE;
+ }
+ }
+ BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+ BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart));
+
+ if (PointerPte != NULL) {
+
+ do {
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ *PointerPte = TempPte;
+ PointerPte++;
+ NumberOfPages -= 1;
+ } while (NumberOfPages != 0);
+ } else {
+#endif //LARGE_PAGES
+
+ BaseVa = MmMapIoSpace (PhysicalAddress,
+ NumberOfBytes,
+ CacheType);
+#ifdef LARGE_PAGES
+ }
+#endif //LARGE_PAGES
+
+ return BaseVa;
+}
+
+VOID
+MmUnmapVideoDisplay (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfBytes
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a range of physical address which were previously
+ mapped via an MmMapVideoDisplay function call.
+
+Arguments:
+
+ BaseAddress - Supplies the base virtual address where the physical
+ address was previously mapped.
+
+ NumberOfBytes - Supplies the number of bytes which were mapped.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+
+#ifdef LARGE_PAGES
+ ULONG NumberOfPages;
+ ULONG i;
+ PMMPTE FirstPte;
+ KIRQL OldIrql;
+ PMMPTE LargePte;
+ PSUBSECTION Subsection;
+
+ PAGED_CODE();
+
+ ASSERT (NumberOfBytes != 0);
+ NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes);
+ FirstPte = MiGetPteAddress (BaseAddress);
+
+ if ((NumberOfBytes > X64K) && (FirstPte->u.Hard.Valid == 0)) {
+
+ ASSERT (MmLargeVideoMapped);
+ LargePte = MiPteToProto (FirstPte);
+ Subsection = MiGetSubsectionAddress (LargePte);
+ ASSERT (Subsection->SubsectionBase == FirstPte);
+ NumberOfPages = Subsection->PtesInSubsection;
+ ExFreePool (Subsection);
+ ExFreePool (LargePte);
+ MmLargeVideoMapped = FALSE;
+ KeFillFixedEntryTb ((PHARDWARE_PTE)FirstPte, (PVOID)KSEG0_BASE, LARGE_ENTRY);
+ }
+ MiReleaseSystemPtes(FirstPte, NumberOfPages, SystemPteSpace);
+ return;
+
+#else // LARGE_PAGES
+
+ MmUnmapIoSpace (BaseAddress, NumberOfBytes);
+ return;
+#endif //LARGE_PAGES
+}
+
+
+VOID
+MiFlushTb (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Nonpagable wrapper.
+
+Arguments:
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KeFlushEntireTb (TRUE, TRUE);
+ KeLowerIrql (OldIrql);
+}
+
+
+VOID
+MmLockPagedPool (
+ IN PVOID Address,
+ IN ULONG Size
+ )
+
+/*++
+
+Routine Description:
+
+ Locks the specified address (which MUST reside in paged pool) into
+ memory until MmUnlockPagedPool is called.
+
+Arguments:
+
+ Address - Supplies the address in paged pool to lock.
+
+ Size - Supplies the size to lock.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ KIRQL OldIrql;
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ PointerPte = MiGetPteAddress (Address);
+ LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (Size - 1)));
+ LOCK_PFN (OldIrql);
+ MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT);
+ UNLOCK_PFN (OldIrql);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
+
+NTKERNELAPI
+VOID
+MmUnlockPagedPool (
+ IN PVOID Address,
+ IN ULONG Size
+ )
+
+/*++
+
+Routine Description:
+
+ Unlocks paged pool that was locked with MmLockPagedPool.
+
+Arguments:
+
+ Address - Supplies the address in paged pool to unlock.
+
+ Size - Supplies the size to unlock.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL of APC_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ KIRQL OldIrql;
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ PointerPte = MiGetPteAddress (Address);
+ LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (Size - 1)));
+ LOCK_PFN2 (OldIrql);
+
+ do {
+#if DBG
+ { PMMPFN Pfn;
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn->u3.e2.ReferenceCount > 1);
+ }
+#endif //DBG
+
+ MiDecrementReferenceCount (PointerPte->u.Hard.PageFrameNumber);
+ PointerPte += 1;
+ } while (PointerPte <= LastPte);
+
+ UNLOCK_PFN2 (OldIrql);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
diff --git a/private/ntos/mm/lockvm.c b/private/ntos/mm/lockvm.c
new file mode 100644
index 000000000..cf48299a2
--- /dev/null
+++ b/private/ntos/mm/lockvm.c
@@ -0,0 +1,810 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ lockvm.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtLockVirtualMemory service.
+
+Author:
+
+ Lou Perazzoli (loup) 20-August-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtLockVirtualMemory)
+#pragma alloc_text(PAGE,NtUnlockVirtualMemory)
+#endif
+
+
+
+NTSTATUS
+NtLockVirtualMemory (
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ IN ULONG MapType
+ )
+
+/*++
+
+Routine Description:
+
+ This function locks a region of pages within the working set list
+ of a subject process.
+
+ The caller of this function must have PROCESS_VM_OPERATION access
+ to the target process. The caller must also have SeLockMemoryPrivilege.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - The base address of the region of pages
+ to be locked. This value is rounded down to the
+ next host page address boundary.
+
+ RegionSize - A pointer to a variable that will receive
+ the actual size in bytes of the locked region of
+ pages. The initial value of this argument is
+ rounded up to the next host page size boundary.
+
+ MapType - A set of flags that describe the type of locking to
+ perform. One of MAP_PROCESS or MAP_SYSTEM.
+
+Return Value:
+
+ Returns the status
+
+ STATUS_PRIVILEGE_NOT_HELD - The caller did not have sufficient
+ privilege to perform the requested operation.
+
+ TBS
+
+
+--*/
+
+{
+ PVOID Va;
+ PVOID EndingAddress;
+ PMMPTE PointerPte;
+ PMMPTE PointerPte1;
+ PMMPFN Pfn1;
+ PMMPTE PointerPde;
+ ULONG CapturedRegionSize;
+ PVOID CapturedBase;
+ PEPROCESS TargetProcess;
+ NTSTATUS Status;
+ BOOLEAN WasLocked = FALSE;
+ KPROCESSOR_MODE PreviousMode;
+ ULONG Entry;
+ ULONG SwapEntry;
+ ULONG NumberOfAlreadyLocked;
+ ULONG NumberToLock;
+ ULONG WorkingSetIndex;
+
+ PAGED_CODE();
+
+ //
+ // Validate the flags in MapType.
+ //
+
+ if ((MapType & ~(MAP_PROCESS | MAP_SYSTEM)) != 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ if ((MapType & (MAP_PROCESS | MAP_SYSTEM)) == 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ }
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ if ((ULONG)MM_HIGHEST_USER_ADDRESS - (ULONG)CapturedBase <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER;
+
+ }
+
+ if (CapturedRegionSize == 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ //
+ // Reference the specified process.
+ //
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&TargetProcess,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ if ((MapType & MAP_SYSTEM) != 0) {
+
+ //
+ // In addition to PROCESS_VM_OPERATION access to the target
+ // process, the caller must have SE_LOCK_MEMORY_PRIVILEGE.
+ //
+
+ if (!SeSinglePrivilegeCheck(
+ SeLockMemoryPrivilege,
+ PreviousMode
+ )) {
+
+ ObDereferenceObject( TargetProcess );
+ return( STATUS_PRIVILEGE_NOT_HELD );
+ }
+ }
+
+ //
+ // Attach to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+
+ //
+ // Get address creation mutex, this prevents the
+ // address range from being modified while it is examined. Raise
+ // to APC level to prevent an APC routine from acquiring the
+ // address creation mutex. Get the working set mutex so the
+ // number of already locked pages in the request can be determined.
+ //
+
+ EndingAddress = PAGE_ALIGN((ULONG)CapturedBase + CapturedRegionSize - 1);
+ Va = PAGE_ALIGN (CapturedBase);
+ NumberOfAlreadyLocked = 0;
+ NumberToLock = ((ULONG)EndingAddress - (ULONG)Va) >> PAGE_SHIFT;
+
+ LOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (TargetProcess->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ if (NumberToLock + MM_FLUID_WORKING_SET >
+ TargetProcess->Vm.MinimumWorkingSetSize) {
+ Status = STATUS_WORKING_SET_QUOTA;
+ goto ErrorReturn;
+ }
+
+ EndingAddress = PAGE_ALIGN((ULONG)CapturedBase + CapturedRegionSize - 1);
+ Va = PAGE_ALIGN (CapturedBase);
+
+ while (Va <= EndingAddress) {
+ if (MmIsAddressValid (Va)) {
+
+ //
+ // The page is valid, therefore it is in the working set.
+ // Locate the WSLE for the page and see if it is locked.
+ //
+
+ PointerPte1 = MiGetPteAddress (Va);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte1->u.Hard.PageFrameNumber);
+
+ WorkingSetIndex = MiLocateWsle (Va,
+ MmWorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
+
+ if (WorkingSetIndex < MmWorkingSetList->FirstDynamic) {
+
+ //
+ // This page is locked in the working set.
+ //
+
+ NumberOfAlreadyLocked += 1;
+
+ //
+ // Check to see if the WAS_LOCKED status should be returned.
+ //
+
+ if ((MapType & MAP_PROCESS) &&
+ (MmWsle[WorkingSetIndex].u1.e1.LockedInWs == 1)) {
+ WasLocked = TRUE;
+ }
+
+ if ((MapType & MAP_SYSTEM) &&
+ (MmWsle[WorkingSetIndex].u1.e1.LockedInMemory == 1)) {
+ WasLocked = TRUE;
+ }
+ }
+ }
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ }
+
+ UNLOCK_WS (TargetProcess);
+
+ //
+ // Check to ensure the working set list is still fluid after
+ // the requested number of pages are locked.
+ //
+
+ if (TargetProcess->Vm.MinimumWorkingSetSize <
+ ((MmWorkingSetList->FirstDynamic + NumberToLock +
+ MM_FLUID_WORKING_SET) - NumberOfAlreadyLocked)) {
+
+ Status = STATUS_WORKING_SET_QUOTA;
+ goto ErrorReturn1;
+ }
+
+ Va = PAGE_ALIGN (CapturedBase);
+
+ //
+ // Set up an exception handler and touch each page in the specified
+ // range.
+ //
+
+ try {
+
+ while (Va <= EndingAddress) {
+ *(volatile ULONG *)Va;
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ goto ErrorReturn1;
+ }
+
+ //
+ // The complete address range is accessable, lock the pages into
+ // the working set.
+ //
+
+ PointerPte = MiGetPteAddress (CapturedBase);
+ Va = PAGE_ALIGN (CapturedBase);
+
+ //
+ // Acquire the working set mutex, no page faults are allowed.
+ //
+
+ LOCK_WS (TargetProcess);
+
+ while (Va <= EndingAddress) {
+
+ //
+ // Make sure the PDE is valid.
+ //
+
+ PointerPde = MiGetPdeAddress (Va);
+
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, TargetProcess, FALSE);
+
+ //
+ // Make sure the page is in the working set.
+ //
+
+ while (PointerPte->u.Hard.Valid == 0) {
+
+ //
+ // Release the working set mutex and fault in the page.
+ //
+
+ UNLOCK_WS (TargetProcess);
+
+ //
+ // Page in the PDE and make the PTE valid.
+ //
+
+ *(volatile ULONG *)Va;
+
+ //
+ // Reacquire the working set mutex.
+ //
+
+ LOCK_WS (TargetProcess);
+
+ //
+ // Make sure the page table page is still valid. This could
+ // occur if the page that was just made valid was removed
+ // from the working set before the working set lock was
+ // acquired.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, TargetProcess, FALSE);
+ }
+
+ //
+ // The page is now in the working set, lock the page into
+ // the working set.
+ //
+
+ PointerPte1 = MiGetPteAddress (Va);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte1->u.Hard.PageFrameNumber);
+
+ Entry = MiLocateWsle (Va, MmWorkingSetList, Pfn1->u1.WsIndex);
+
+ if (Entry >= MmWorkingSetList->FirstDynamic) {
+
+ SwapEntry = MmWorkingSetList->FirstDynamic;
+
+ if (Entry != MmWorkingSetList->FirstDynamic) {
+
+ //
+ // Swap this entry with the one at first dynamic.
+ //
+
+ MiSwapWslEntries (Entry, SwapEntry, &TargetProcess->Vm);
+ }
+
+ MmWorkingSetList->FirstDynamic += 1;
+ } else {
+ SwapEntry = Entry;
+ }
+
+ //
+ // Indicate that the page is locked.
+ //
+
+ if (MapType & MAP_PROCESS) {
+ MmWsle[SwapEntry].u1.e1.LockedInWs = 1;
+ }
+
+ if (MapType & MAP_SYSTEM) {
+ MmWsle[SwapEntry].u1.e1.LockedInMemory = 1;
+ }
+
+ //
+ // Increment to the next va and PTE.
+ //
+
+ PointerPte += 1;
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ if (MmWorkingSetList->NextSlot < MmWorkingSetList->FirstDynamic) {
+ MmWorkingSetList->NextSlot = MmWorkingSetList->FirstDynamic;
+ }
+ }
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ ObDereferenceObject (TargetProcess);
+
+ //
+ // Update return arguments.
+ //
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = ((ULONG)EndingAddress - (ULONG)PAGE_ALIGN(CapturedBase)) +
+ PAGE_SIZE;
+ *BaseAddress = PAGE_ALIGN(CapturedBase);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ if (WasLocked) {
+ return STATUS_WAS_LOCKED;
+ }
+
+ return STATUS_SUCCESS;
+
+ErrorReturn:
+ UNLOCK_WS (TargetProcess);
+ErrorReturn1:
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ ObDereferenceObject (TargetProcess);
+ return Status;
+}
+
+NTSTATUS
+NtUnlockVirtualMemory (
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ IN ULONG MapType
+ )
+
+/*++
+
+Routine Description:
+
+ This function unlocks a region of pages within the working set list
+ of a subject process.
+
+ As a side effect, any pages which are not locked and are in the
+ process's working set are removed from the process's working set.
+ This allows NtUnlockVirtualMemory to remove a range of pages
+ from the working set.
+
+ The caller of this function must have PROCESS_VM_OPERATION access
+ to the target process.
+
+ The caller must also have SeLockMemoryPrivilege for MAP_SYSTEM.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - The base address of the region of pages
+ to be unlocked. This value is rounded down to the
+ next host page address boundary.
+
+ RegionSize - A pointer to a variable that will receive
+ the actual size in bytes of the unlocked region of
+ pages. The initial value of this argument is
+ rounded up to the next host page size boundary.
+
+ MapType - A set of flags that describe the type of unlocking to
+ perform. One of MAP_PROCESS or MAP_SYSTEM.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PVOID Va;
+ PVOID EndingAddress;
+ ULONG CapturedRegionSize;
+ PVOID CapturedBase;
+ PEPROCESS TargetProcess;
+ NTSTATUS Status;
+ KPROCESSOR_MODE PreviousMode;
+ ULONG Entry;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+
+ PAGED_CODE();
+
+ //
+ // Validate the flags in MapType.
+ //
+
+ if ((MapType & ~(MAP_PROCESS | MAP_SYSTEM)) != 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ if ((MapType & (MAP_PROCESS | MAP_SYSTEM)) == 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ }
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedRegionSize = *RegionSize;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ if ((ULONG)MM_HIGHEST_USER_ADDRESS - (ULONG)CapturedBase <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER;
+
+ }
+
+ if (CapturedRegionSize == 0) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&TargetProcess,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ if ((MapType & MAP_SYSTEM) != 0) {
+
+ //
+ // In addition to PROCESS_VM_OPERATION access to the target
+ // process, the caller must have SE_LOCK_MEMORY_PRIVILEGE.
+ //
+
+ if (!SeSinglePrivilegeCheck(
+ SeLockMemoryPrivilege,
+ PreviousMode
+ )) {
+
+ ObDereferenceObject( TargetProcess );
+ return( STATUS_PRIVILEGE_NOT_HELD );
+ }
+ }
+
+ //
+ // Attach to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+ //
+ // Get address creation mutex, this prevents the
+ // address range from being modified while it is examined.
+ // Block APCs so an APC routine can't get a page fault and
+ // corrupt the working set list, etc.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (TargetProcess->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ EndingAddress = PAGE_ALIGN((ULONG)CapturedBase + CapturedRegionSize - 1);
+
+ Va = PAGE_ALIGN (CapturedBase);
+
+ while (Va <= EndingAddress) {
+
+ //
+ // Check to ensure all the specified pages are locked.
+ //
+
+ if (!MmIsAddressValid (Va)) {
+
+ //
+ // This page is not valid, therefore not in working set.
+ //
+
+ Status = STATUS_NOT_LOCKED;
+ } else {
+
+ PointerPte = MiGetPteAddress (Va);
+ ASSERT (PointerPte->u.Hard.Valid != 0);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Entry = MiLocateWsle (Va, MmWorkingSetList, Pfn1->u1.WsIndex);
+ ASSERT (Entry != WSLE_NULL_INDEX);
+
+ if ((MmWsle[Entry].u1.e1.LockedInWs == 0) &&
+ (MmWsle[Entry].u1.e1.LockedInMemory == 0)) {
+
+ //
+ // Not locked in memory or system, remove from working
+ // set.
+ //
+
+ MiTakePageFromWorkingSet (Entry,
+ &TargetProcess->Vm,
+ PointerPte);
+
+ Status = STATUS_NOT_LOCKED;
+
+ } else if (MapType & MAP_PROCESS) {
+ if (MmWsle[Entry].u1.e1.LockedInWs == 0) {
+
+ //
+ // This page is not locked.
+ //
+
+ Status = STATUS_NOT_LOCKED;
+ }
+ } else {
+ if (MmWsle[Entry].u1.e1.LockedInMemory == 0) {
+
+ //
+ // This page is not locked.
+ //
+
+ Status = STATUS_NOT_LOCKED;
+ }
+ }
+ }
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ } // end while
+
+ if (Status == STATUS_NOT_LOCKED) {
+ goto ErrorReturn;
+ }
+
+ //
+ // The complete address range is locked, unlock them.
+ //
+
+ Va = PAGE_ALIGN (CapturedBase);
+
+
+ while (Va <= EndingAddress) {
+
+ PointerPte = MiGetPteAddress (Va);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Entry = MiLocateWsle (Va, MmWorkingSetList, Pfn1->u1.WsIndex);
+
+ if (MapType & MAP_PROCESS) {
+ MmWsle[Entry].u1.e1.LockedInWs = 0;
+ }
+
+ if (MapType & MAP_SYSTEM) {
+ MmWsle[Entry].u1.e1.LockedInMemory = 0;
+ }
+
+ if ((MmWsle[Entry].u1.e1.LockedInMemory == 0) &&
+ MmWsle[Entry].u1.e1.LockedInWs == 0) {
+
+ //
+ // The page is no longer should be locked, move
+ // it to the dynamic part of the working set.
+ //
+
+ MmWorkingSetList->FirstDynamic -= 1;
+
+ if (Entry != MmWorkingSetList->FirstDynamic) {
+
+ //
+ // Swap this element with the last locked page, making
+ // this element the new first dynamic entry.
+ //
+
+ MiSwapWslEntries (Entry,
+ MmWorkingSetList->FirstDynamic,
+ &TargetProcess->Vm);
+ }
+ }
+
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ }
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ ObDereferenceObject (TargetProcess);
+
+ //
+ // Update return arguments.
+ //
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *RegionSize = ((ULONG)EndingAddress -
+ (ULONG)PAGE_ALIGN(CapturedBase)) + PAGE_SIZE;
+
+ *BaseAddress = PAGE_ALIGN(CapturedBase);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ return STATUS_SUCCESS;
+
+ErrorReturn:
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ ObDereferenceObject (TargetProcess);
+ return Status;
+}
+
+
diff --git a/private/ntos/mm/mapcache.c b/private/ntos/mm/mapcache.c
new file mode 100644
index 000000000..e287572c1
--- /dev/null
+++ b/private/ntos/mm/mapcache.c
@@ -0,0 +1,1677 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ Mapcache.c
+
+Abstract:
+
+ This module contains the routines which implement mapping views
+ of sections into the system-wide cache.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1990
+
+Revision History:
+
+--*/
+
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiInitializeSystemCache )
+#endif
+
+extern ULONG MmFrontOfList;
+
+VOID
+MiFreeInPageSupportBlock (
+ IN PMMINPAGE_SUPPORT Support
+ );
+
+VOID
+MiRemoveMappedPtes (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfPtes,
+ IN PCONTROL_AREA ControlArea,
+ ULONG SystemCache
+ );
+
+#define X256K 0x40000
+
+ULONG MmFirstFreeSystemCache;
+
+ULONG MmLastFreeSystemCache;
+
+ULONG MmFlushSystemCache;
+
+PMMPTE MmSystemCachePtes;
+
+LONG
+MiMapCacheExceptionFilter (
+ IN PNTSTATUS Status,
+ IN PEXCEPTION_POINTERS ExceptionPointer
+ );
+
+NTSTATUS
+MmMapViewInSystemCache (
+ IN PVOID SectionToMap,
+ OUT PVOID *CapturedBase,
+ IN OUT PLARGE_INTEGER SectionOffset,
+ IN OUT PULONG CapturedViewSize
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps a view in the specified subject process to
+ the section object. The page protection is identical to that
+ of the prototype PTE.
+
+ This function is a kernel mode interface to allow LPC to map
+ a section given the section pointer to map.
+
+ This routine assumes all arguments have been probed and captured.
+
+Arguments:
+
+ SectionToMap - Supplies a pointer to the section object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address of the view. If the initial value
+ of this argument is not null, then the view will
+ be allocated starting at the specified virtual
+ address rounded down to the next 64kb address
+ boundary. If the initial value of this argument is
+ null, then the operating system will determine
+ where to allocate the view using the information
+ specified by the ZeroBits argument value and the
+ section allocation attributes (i.e. based and
+ tiled).
+
+ SectionOffset - Supplies the offset from the beginning of the
+ section to the view in bytes. This value must be a multiple
+ of 256k.
+
+ ViewSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the view.
+ The initial values of this argument specifies the
+ size of the view in bytes and is rounded up to the
+ next host page size boundary and must be less than or equal
+ to 256k.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PSECTION Section;
+ ULONG PteOffset;
+ KIRQL OldIrql;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE ProtoPte;
+ PMMPTE LastProto;
+ PSUBSECTION Subsection;
+ PVOID EndingVa;
+ PCONTROL_AREA ControlArea;
+
+ Section = SectionToMap;
+
+ //
+ // Assert the the view size is less 256kb and the section offset
+ // is aligned on a 256k boundary.
+ //
+
+ ASSERT (*CapturedViewSize <= 256L*1024L);
+ ASSERT ((SectionOffset->LowPart & (256L*1024L - 1)) == 0);
+
+ //
+ // Make sure the section is not an image section or a page file
+ // backed section.
+ //
+
+ if (Section->u.Flags.Image) {
+ return STATUS_NOT_MAPPED_DATA;
+ }
+
+ ControlArea = Section->Segment->ControlArea;
+
+ ASSERT (*CapturedViewSize != 0);
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ LOCK_PFN (OldIrql);
+
+ ASSERT (ControlArea->u.Flags.BeingCreated == 0);
+ ASSERT (ControlArea->u.Flags.BeingDeleted == 0);
+ ASSERT (ControlArea->u.Flags.BeingPurged == 0);
+
+ //
+ // Find a free 256k base in the cache.
+ //
+
+ if (MmFirstFreeSystemCache == MM_EMPTY_PTE_LIST) {
+ UNLOCK_PFN (OldIrql);
+ return STATUS_NO_MEMORY;
+ }
+
+ if (MmFirstFreeSystemCache == MmFlushSystemCache) {
+
+ //
+ // All system cache PTEs have been used, flush the entire
+ // TB to remove any stale TB entries.
+ //
+
+ KeFlushEntireTb (TRUE, TRUE);
+ MmFlushSystemCache = 0;
+ }
+
+ *CapturedBase = (PVOID)((PCHAR)MmSystemCacheStart +
+ MmFirstFreeSystemCache * PAGE_SIZE);
+
+ //
+ // Update next free entry.
+ //
+
+ ASSERT (MmSystemCachePtes[MmFirstFreeSystemCache].u.Hard.Valid == 0);
+ MmFirstFreeSystemCache =
+ MmSystemCachePtes[MmFirstFreeSystemCache].u.Hard.PageFrameNumber;
+
+ ASSERT ((MmFirstFreeSystemCache == MM_EMPTY_PTE_LIST) ||
+ (MmFirstFreeSystemCache <= MmSizeOfSystemCacheInPages));
+
+ //
+ // Increment the count of the number of views for the
+ // section object. This requires the PFN mutex to be held.
+ //
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfSystemCacheViews += 1;
+ ASSERT (ControlArea->NumberOfSectionReferences != 0);
+
+ UNLOCK_PFN (OldIrql);
+
+ EndingVa = (PVOID)(((ULONG)*CapturedBase +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ //
+ // An unoccuppied address range has been found, put the PTEs in
+ // the range into prototype PTEs.
+ //
+
+ PointerPte = MiGetPteAddress (*CapturedBase);
+
+#if DBG
+
+ //
+ // Zero out the next pointer field.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = 0;
+#endif //DBG
+
+ LastPte = MiGetPteAddress (EndingVa);
+
+ //
+ // Calculate the first prototype PTE address.
+ //
+
+ PteOffset = (ULONG)(SectionOffset->QuadPart >> PAGE_SHIFT);
+
+ //
+ // Make sure the PTEs are not in the extended part of the
+ // segment.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ }
+
+ ProtoPte = &Subsection->SubsectionBase[PteOffset];
+
+ LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+
+ while (PointerPte <= LastPte) {
+
+ if (ProtoPte >= LastProto) {
+
+ //
+ // Handle extended subsections.
+ //
+
+ Subsection = Subsection->NextSubsection;
+ ProtoPte = Subsection->SubsectionBase;
+ LastProto = &Subsection->SubsectionBase[
+ Subsection->PtesInSubsection];
+ }
+ ASSERT (PointerPte->u.Long == ZeroKernelPte.u.Long);
+ PointerPte->u.Long = MiProtoAddressForKernelPte (ProtoPte);
+
+ ASSERT (((ULONG)PointerPte & (MM_COLOR_MASK << PTE_SHIFT)) ==
+ (((ULONG)ProtoPte & (MM_COLOR_MASK << PTE_SHIFT))));
+
+ PointerPte += 1;
+ ProtoPte += 1;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MiAddMappedPtes (
+ IN PMMPTE FirstPte,
+ IN ULONG NumberOfPtes,
+ IN PCONTROL_AREA ControlArea,
+ IN ULONG PteOffset,
+ IN ULONG SystemCache
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps a view in the specified subject process to
+ the section object. The page protection is identical to that
+ of the prototype PTE.
+
+ This function is a kernel mode interface to allow LPC to map
+ a section given the section pointer to map.
+
+ This routine assumes all arguments have been probed and captured.
+
+Arguments:
+
+ SectionToMap - Supplies a pointer to the section object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address of the view. If the initial value
+ of this argument is not null, then the view will
+ be allocated starting at the specified virtual
+ address rounded down to the next 64kb address
+ boundary. If the initial value of this argument is
+ null, then the operating system will determine
+ where to allocate the view using the information
+ specified by the ZeroBits argument value and the
+ section allocation attributes (i.e. based and
+ tiled).
+
+ SectionOffset - Supplies the offset from the beginning of the
+ section to the view in bytes. This value must be a multiple
+ of 256k.
+
+ ViewSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the view.
+ The initial values of this argument specifies the
+ size of the view in bytes and is rounded up to the
+ next host page size boundary and must be less than or equal
+ to 256k.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PMMPTE PointerPte;
+ PMMPTE ProtoPte;
+ PMMPTE LastProto;
+ PMMPTE LastPte;
+ PSUBSECTION Subsection;
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ LOCK_PFN (OldIrql); //fixfix this lock can be released earlier. see above routine
+
+ ASSERT (ControlArea->u.Flags.BeingCreated == 0);
+ ASSERT (ControlArea->u.Flags.BeingDeleted == 0);
+ ASSERT (ControlArea->u.Flags.BeingPurged == 0);
+
+ PointerPte = FirstPte;
+ LastPte = FirstPte + NumberOfPtes - 1;
+
+#if DBG
+
+ //
+ // Zero out the next pointer field.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = 0;
+#endif //DBG
+
+ //
+ // Make sure the PTEs are not in the extended part of the
+ // segment.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ }
+
+ ProtoPte = &Subsection->SubsectionBase[PteOffset];
+
+ LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+
+ while (PointerPte <= LastPte) {
+
+ if (ProtoPte >= LastProto) {
+
+ //
+ // Handle extended subsections.
+ //
+
+ Subsection = Subsection->NextSubsection;
+ ProtoPte = Subsection->SubsectionBase;
+ LastProto = &Subsection->SubsectionBase[
+ Subsection->PtesInSubsection];
+ }
+ ASSERT (PointerPte->u.Long == ZeroKernelPte.u.Long);
+ PointerPte->u.Long = MiProtoAddressForKernelPte (ProtoPte);
+
+ ASSERT (((ULONG)PointerPte & (MM_COLOR_MASK << PTE_SHIFT)) ==
+ (((ULONG)ProtoPte & (MM_COLOR_MASK << PTE_SHIFT))));
+
+ PointerPte += 1;
+ ProtoPte += 1;
+ }
+
+ if (SystemCache) {
+
+ //
+ // Increment the count of the number of views for the
+ // section object. This requires the PFN mutex to be held.
+ //
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfSystemCacheViews += 1;
+ ASSERT (ControlArea->NumberOfSectionReferences != 0);
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ return STATUS_SUCCESS;
+
+}
+
+VOID
+MmUnmapViewInSystemCache (
+ IN PVOID BaseAddress,
+ IN PVOID SectionToUnmap,
+ IN ULONG AddToFront
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a view from the system cache.
+
+ NOTE: When this function is called, no pages may be locked in
+ the cache for the specified view.
+
+Arguments:
+
+ BaseAddress - Supplies the base address of the section in the
+ system cache.
+
+ SectionToUnmap - Supplies a pointer to the section which the
+ base address maps.
+
+ AddToFront - Supplies TRUE if the unmapped pages should be
+ added to the front of the standby list (i.e., their
+ value in the cache is low). FALSE otherwise
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PMMPTE FirstPte;
+ MMPTE PteContents;
+ KIRQL OldIrql;
+ KIRQL OldIrqlWs;
+ ULONG i;
+ ULONG Entry;
+ ULONG WorkingSetIndex;
+ PCONTROL_AREA ControlArea;
+ ULONG DereferenceSegment = FALSE;
+ ULONG WsHeld = FALSE;
+ ULONG PdeFrameNumber;
+
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+
+ PointerPte = MiGetPteAddress (BaseAddress);
+ FirstPte = PointerPte;
+ Entry = PointerPte - MmSystemCachePtes;
+ ControlArea = ((PSECTION)SectionToUnmap)->Segment->ControlArea;
+ PdeFrameNumber = (MiGetPteAddress (PointerPte))->u.Hard.PageFrameNumber;
+
+ //
+ // Get the control area for the segment which is mapped here.
+ //
+
+ i = 0;
+
+ do {
+
+ //
+ // The cache is organized in chucks of 64k bytes, clear
+ // the first chunk then check to see if this is the last
+ // chunk.
+ //
+
+ //
+ // The page table page is always resident for the system cache.
+ // Check each PTE, it is in one of two states, either valid or
+ // prototype PTE format.
+ //
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+ if (PteContents.u.Hard.Valid == 1) {
+
+ if (!WsHeld) {
+ WsHeld = TRUE;
+ LOCK_SYSTEM_WS (OldIrqlWs);
+ continue;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ WorkingSetIndex = MiLocateWsle (BaseAddress,
+ MmSystemCacheWorkingSetList,
+ Pfn1->u1.WsIndex );
+ MiRemoveWsle (WorkingSetIndex,
+ MmSystemCacheWorkingSetList );
+ MiReleaseWsle (WorkingSetIndex, &MmSystemCacheWs);
+
+ //
+ // The Pte is valid.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the state of the modified bit for this
+ // pte.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ MiDecrementShareAndValidCount (PdeFrameNumber);
+
+ //
+ // Decrement the share count for the physical page.
+ //
+
+#if DBG
+ if (ControlArea->NumberOfMappedViews == 1) {
+ PMMPFN Pfn;
+ Pfn = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ ASSERT (Pfn->u2.ShareCount == 1);
+ }
+#endif //DBG
+
+
+ MmFrontOfList = AddToFront;
+ MiDecrementShareCount (PteContents.u.Hard.PageFrameNumber);
+ MmFrontOfList = FALSE;
+ UNLOCK_PFN (OldIrql);
+ } else {
+ if (WsHeld) {
+ UNLOCK_SYSTEM_WS (OldIrqlWs);
+ WsHeld = FALSE;
+ }
+
+ ASSERT ((PteContents.u.Long == ZeroKernelPte.u.Long) ||
+ (PteContents.u.Soft.Prototype == 1));
+ NOTHING;
+ }
+ *PointerPte = ZeroKernelPte;
+
+ PointerPte += 1;
+ BaseAddress = (PVOID)((ULONG)BaseAddress + PAGE_SIZE);
+ i += 1;
+ } while (i < (X256K / PAGE_SIZE));
+
+ if (WsHeld) {
+ UNLOCK_SYSTEM_WS (OldIrqlWs);
+ }
+
+ FirstPte->u.Hard.PageFrameNumber = MM_EMPTY_PTE_LIST;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Free this entry to the end of the list.
+ //
+
+ if (MmFlushSystemCache == 0) {
+
+ //
+ // If there is no entry marked to initiate a TB flush when
+ // reused, mark this entry as the one. This way the TB
+ // only needs flushed when the list wraps.
+ //
+
+ MmFlushSystemCache = Entry;
+ }
+
+ MmSystemCachePtes[MmLastFreeSystemCache].u.Hard.PageFrameNumber = Entry;
+ MmLastFreeSystemCache = Entry;
+
+ //
+ // Decrement the number of mapped views for the segment
+ // and check to see if the segment should be deleted.
+ //
+
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfSystemCacheViews -= 1;
+
+ //
+ // Check to see if the control area (segment) should be deleted.
+ // This routine releases the PFN lock.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+
+ return;
+}
+
+
+VOID
+MiRemoveMappedPtes (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfPtes,
+ IN PCONTROL_AREA ControlArea,
+ ULONG SystemCache
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a view from the system cache.
+
+ NOTE: When this function is called, no pages may be locked in
+ the cache for the specified view.
+
+Arguments:
+
+ BaseAddress - Supplies the base address of the section in the
+ system cache.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPFN Pfn1;
+ PMMPTE FirstPte;
+ MMPTE PteContents;
+ KIRQL OldIrql;
+ KIRQL OldIrqlWs;
+ ULONG i;
+ ULONG Entry;
+ ULONG WorkingSetIndex;
+ ULONG DereferenceSegment = FALSE;
+ MMPTE_FLUSH_LIST PteFlushList;
+ ULONG WsHeld = FALSE;
+
+ PteFlushList.Count = 0;
+ PointerPte = MiGetPteAddress (BaseAddress);
+ FirstPte = PointerPte;
+ Entry = PointerPte - MmSystemCachePtes;
+
+ //
+ // Get the control area for the segment which is mapped here.
+ //
+
+ while (NumberOfPtes) {
+
+ //
+ // The cache is organized in chucks of 64k bytes, clear
+ // the first chunk then check to see if this is the last
+ // chunk.
+ //
+
+ //
+ // The page table page is always resident for the system cache.
+ // Check each PTE, it is in one of two states, either valid or
+ // prototype PTE format.
+ //
+
+ PteContents = *PointerPte;
+ if (PteContents.u.Hard.Valid == 1) {
+
+ if (!WsHeld) {
+ WsHeld = TRUE;
+ LOCK_SYSTEM_WS (OldIrqlWs);
+ continue;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ WorkingSetIndex = MiLocateWsle (BaseAddress,
+ MmSystemCacheWorkingSetList,
+ Pfn1->u1.WsIndex );
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
+
+ MiRemoveWsle (WorkingSetIndex,
+ MmSystemCacheWorkingSetList );
+ MiReleaseWsle (WorkingSetIndex, &MmSystemCacheWs);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // The Pte is valid.
+ //
+
+ //
+ // Capture the state of the modified bit for this
+ // pte.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
+
+ //
+ // Flush the TB for this page.
+ //
+
+ if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
+ PteFlushList.FlushPte[PteFlushList.Count] = PointerPte;
+ PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
+ PteFlushList.Count += 1;
+ }
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiDecrementShareAndValidCount (PointerPde->u.Hard.PageFrameNumber);
+
+ //
+ // Decrement the share count for the physical page.
+ //
+
+ MiDecrementShareCount (PteContents.u.Hard.PageFrameNumber);
+ UNLOCK_PFN (OldIrql);
+
+ } else {
+ if (WsHeld) {
+ UNLOCK_SYSTEM_WS (OldIrqlWs);
+ WsHeld = FALSE;
+ }
+
+ ASSERT ((PteContents.u.Long == ZeroKernelPte.u.Long) ||
+ (PteContents.u.Soft.Prototype == 1));
+ NOTHING;
+ }
+ *PointerPte = ZeroKernelPte;
+
+ PointerPte += 1;
+ BaseAddress = (PVOID)((ULONG)BaseAddress + PAGE_SIZE);
+ NumberOfPtes -= 1;
+ }
+
+ if (WsHeld) {
+ UNLOCK_SYSTEM_WS (OldIrqlWs);
+ }
+ LOCK_PFN (OldIrql);
+
+ MiFlushPteList (&PteFlushList, TRUE, ZeroKernelPte);
+
+ if (SystemCache) {
+
+ //
+ // Free this entry back to the list.
+ //
+
+ FirstPte->u.Hard.PageFrameNumber = MmFirstFreeSystemCache;
+ MmFirstFreeSystemCache = Entry;
+ ControlArea->NumberOfSystemCacheViews -= 1;
+ } else {
+ ControlArea->NumberOfUserReferences -= 1;
+ }
+
+ //
+ // Decrement the number of mapped views for the segment
+ // and check to see if the segment should be deleted.
+ //
+
+ ControlArea->NumberOfMappedViews -= 1;
+
+ //
+ // Check to see if the control area (segment) should be deleted.
+ // This routine releases the PFN lock.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+
+ return;
+}
+
+ULONG
+MiInitializeSystemCache (
+ IN ULONG SizeOfSystemCacheInPages,
+ IN ULONG MinimumWorkingSet,
+ IN ULONG MaximumWorkingSet
+ )
+
+/*++
+
+Routine Description:
+
+ This routine initializes the system cache working set and
+ data management structures.
+
+Arguments:
+
+ SizeOfSystemCacheInPages - Supplies the size of the cache in pages.
+
+ MinimumWorkingSet - Supplies the minimum working set for the system
+ cache.
+
+ MaximumWorkingSet - Supplies the maximum working set size for the
+ system cache.
+
+Return Value:
+
+ Returns a BOOLEAN value indicating whether or not the initialization
+ succeeded.
+
+Environment:
+
+ Kernel mode, called only at phase 0 initialization.
+
+--*/
+
+{
+ ULONG HunksOf256KInCache;
+ PMMWSLE WslEntry;
+ ULONG NumberOfEntriesMapped;
+ ULONG i;
+ PMMPTE PointerPte;
+ ULONG NextFree;
+
+ PointerPte = MiGetPteAddress (MmSystemCacheWorkingSetList);
+
+ i = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ *PointerPte = ValidKernelPte;
+ PointerPte->u.Hard.PageFrameNumber = i;
+
+ MiInitializePfn (i, PointerPte, 1L);
+
+ MmSystemCacheWsle =
+ (PMMWSLE)(&MmSystemCacheWorkingSetList->UsedPageTableEntries[0]);
+
+ MmSystemCacheWs.VmWorkingSetList = MmSystemCacheWorkingSetList;
+ MmSystemCacheWs.WorkingSetSize = 0;
+ MmSystemCacheWs.MinimumWorkingSetSize = MinimumWorkingSet;
+ MmSystemCacheWs.MaximumWorkingSetSize = MaximumWorkingSet;
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &MmSystemCacheWs.WorkingSetExpansionLinks);
+
+ MmSystemCacheWs.AllowWorkingSetAdjustment = TRUE;
+
+ //
+ // Don't use entry 0 as an index of zero in the PFN database
+ // means that the page can be assigned to a slot. This is not
+ // a problem for process working sets as page 0 is private.
+ //
+
+ MmSystemCacheWorkingSetList->FirstFree = 1;
+ MmSystemCacheWorkingSetList->FirstDynamic = 1;
+ MmSystemCacheWorkingSetList->NextSlot = 1;
+ MmSystemCacheWorkingSetList->LastEntry = MmSystemCacheWsMinimum;
+ MmSystemCacheWorkingSetList->Quota = MmSystemCacheWorkingSetList->LastEntry;
+ MmSystemCacheWorkingSetList->HashTable = NULL;
+ MmSystemCacheWorkingSetList->HashTableSize = 0;
+ MmSystemCacheWorkingSetList->Wsle = MmSystemCacheWsle;
+
+ NumberOfEntriesMapped = ((PMMWSLE)((ULONG)MmSystemCacheWorkingSetList +
+ PAGE_SIZE)) - MmSystemCacheWsle;
+
+ while (NumberOfEntriesMapped < MmSystemCacheWsMaximum) {
+
+ PointerPte += 1;
+ i = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+ *PointerPte = ValidKernelPte;
+ PointerPte->u.Hard.PageFrameNumber = i;
+ MiInitializePfn (i, PointerPte, 1L);
+ NumberOfEntriesMapped += PAGE_SIZE / sizeof(MMWSLE);
+ }
+
+ //
+ // Initialize the following slots as free.
+ //
+
+ WslEntry = MmSystemCacheWsle + 1;
+
+ for (i = 1; i < NumberOfEntriesMapped; i++) {
+
+ //
+ // Build the free list, note that the first working
+ // set entries (CurrentEntry) are not on the free list.
+ // These entries are reserved for the pages which
+ // map the working set and the page which contains the PDE.
+ //
+
+ WslEntry->u1.Long = (i + 1) << MM_FREE_WSLE_SHIFT;
+ WslEntry += 1;
+ }
+
+ WslEntry -= 1;
+ WslEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
+
+ MmSystemCacheWorkingSetList->LastInitializedWsle = NumberOfEntriesMapped - 1;
+
+ //
+ // Build a free list structure in the PTEs for the system
+ // cache.
+ //
+
+ HunksOf256KInCache = SizeOfSystemCacheInPages / (X256K / PAGE_SIZE);
+
+ MmFirstFreeSystemCache = 0;
+ NextFree = 0;
+ MmSystemCachePtes = MiGetPteAddress (MmSystemCacheStart);
+
+ for (i = 0; i < HunksOf256KInCache; i++) {
+ MmSystemCachePtes[NextFree].u.Hard.PageFrameNumber =
+ NextFree + (X256K / PAGE_SIZE);
+ NextFree += X256K / PAGE_SIZE;
+ }
+
+ MmLastFreeSystemCache = NextFree - (X256K / PAGE_SIZE);
+ MmSystemCachePtes[MmLastFreeSystemCache].u.Hard.PageFrameNumber =
+ MM_EMPTY_PTE_LIST;
+
+ if (MaximumWorkingSet > ((1536*1024) >> PAGE_SHIFT)) {
+
+ //
+ // The working set list consists of more than a single page.
+ //
+
+ MiGrowWsleHash (&MmSystemCacheWs, FALSE);
+ }
+
+ return TRUE;
+}
+
+BOOLEAN
+MmCheckCachedPageState (
+ IN PVOID Address,
+ IN BOOLEAN SetToZero
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the state of the specified page that is mapped in
+ the system cache. If the specified virtual address can be made valid
+ (i.e., the page is already in memory), it is made valid and the value
+ TRUE is returned.
+
+ If the page is not in memory, and SetToZero is FALSE, the
+ value FALSE is returned. However, if SetToZero is TRUE, a page of
+ zeroes is materalized for the specified virtual address and the address
+ is made valid and the value TRUE is returned.
+
+ This routine is for usage by the cache manager.
+
+Arguments:
+
+ Address - Supplies the address of a page mapped in the system cache.
+
+ SetToZero - Supplies TRUE if a page of zeroes should be created in the
+ case where no page is already mapped.
+
+Return Value:
+
+ FALSE if there if touching this page would cause a page fault resulting
+ in a page read.
+
+ TRUE if there is a physical page in memory for this address.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE ProtoPte;
+ ULONG PageFrameIndex;
+ ULONG WorkingSetIndex;
+ MMPTE TempPte;
+ MMPTE ProtoPteContents;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+
+ PointerPte = MiGetPteAddress (Address);
+
+ //
+ // Make a the PTE valid if possible.
+ //
+
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ return TRUE;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ goto UnlockAndReturnTrue;
+ }
+
+ ASSERT (PointerPte->u.Soft.Prototype == 1);
+
+ ProtoPte = MiPteToProto (PointerPte);
+
+ //
+ // Pte is not valid, check the state of the prototype PTE.
+ //
+
+ if (MiMakeSystemAddressValidPfn (ProtoPte)) {
+
+ //
+ // If page fault occurred, recheck state of original PTE.
+ //
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ goto UnlockAndReturnTrue;
+ }
+ }
+
+ ProtoPteContents = *ProtoPte;
+
+ if (ProtoPteContents.u.Hard.Valid == 1) {
+
+ PageFrameIndex = ProtoPteContents.u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // The prototype PTE is valid, make the cache PTE
+ // valid and add it to the working set.
+ //
+
+ TempPte = ProtoPteContents;
+
+ } else if ((ProtoPteContents.u.Soft.Transition == 1) &&
+ (ProtoPteContents.u.Soft.Prototype == 0)) {
+
+ //
+ // Prototype PTE is in the transition state. Remove the page
+ // from the page list and make it valid.
+ //
+
+ PageFrameIndex = ProtoPteContents.u.Trans.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ if ((Pfn1->u3.e1.ReadInProgress) ||
+ (Pfn1->u3.e1.InPageError)) {
+
+ //
+ // Collided page fault, return.
+ //
+
+ goto UnlockAndReturnTrue;
+ }
+
+ MiUnlinkPageFromList (Pfn1);
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ Pfn1->OriginalPte.u.Soft.Protection,
+ NULL );
+
+ *ProtoPte = TempPte;
+
+ //
+ // Increment the valid pte count for the page containing
+ // the prototype PTE.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (Pfn1->PteFrame);
+
+ } else {
+
+ //
+ // Page is not in memory, if a page of zeroes is requested,
+ // get a page of zeroes and make it valid.
+ //
+
+ if ((SetToZero == FALSE) || (MmAvailablePages < 8)) {
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Fault the page into memory.
+ //
+
+ MmAccessFault (FALSE, Address, KernelMode);
+ return FALSE;
+ }
+
+ //
+ // Increment the count of Pfn references for the control area
+ // corresponding to this file.
+ //
+
+ MiGetSubsectionAddress (
+ ProtoPte)->ControlArea->NumberOfPfnReferences += 1;
+
+ PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ MiInitializePfn (PageFrameIndex, ProtoPte, 1);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e1.PrototypePte = 1;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ Pfn1->OriginalPte.u.Soft.Protection,
+ NULL );
+
+ *ProtoPte = TempPte;
+ }
+
+ //
+ // Increment the share count since the page is being put into a working
+ // set.
+ //
+
+ Pfn1->u2.ShareCount += 1;
+
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+
+ //
+ // Increment the reference count of the page table
+ // page for this PTE.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
+
+ Pfn2->u2.ShareCount += 1;
+
+ MI_SET_GLOBAL_STATE (TempPte, 1);
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ LOCK_SYSTEM_WS (OldIrql);
+
+ WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs);
+
+ MiUpdateWsle (&WorkingSetIndex,
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ MmSystemCacheWorkingSetList,
+ Pfn1);
+
+ MmSystemCacheWsle[WorkingSetIndex].u1.e1.SameProtectAsProto = 1;
+
+ UNLOCK_SYSTEM_WS (OldIrql);
+
+ return TRUE;
+
+UnlockAndReturnTrue:
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+}
+
+
+NTSTATUS
+MmCopyToCachedPage (
+ IN PVOID Address,
+ IN PVOID UserBuffer,
+ IN ULONG Offset,
+ IN ULONG CountInBytes,
+ IN BOOLEAN DontZero
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the state of the specified page that is mapped in
+ the system cache. If the specified virtual address can be made valid
+ (i.e., the page is already in memory), it is made valid and the value
+ TRUE is returned.
+
+ If the page is not in memory, and SetToZero is FALSE, the
+ value FALSE is returned. However, if SetToZero is TRUE, a page of
+ zeroes is materalized for the specified virtual address and the address
+ is made valid and the value TRUE is returned.
+
+ This routine is for usage by the cache manager.
+
+Arguments:
+
+ Address - Supplies the address of a page mapped in the system cache.
+ This MUST be a page aligned address!
+
+ UserBuffer - Supplies the address of a user buffer to copy into the
+ system cache at the specified address + offset.
+
+ Offset - Supplies the offset into the UserBuffer to copy the data.
+
+ ByteCount - Supplies the byte count to copy from the user buffer.
+
+ DontZero - Supplies TRUE if the buffer should not be zeroed (the
+ caller will track zeroing). FALSE if it should be zeroed.
+
+Return Value:
+
+ Returns the status of the copy.
+
+Environment:
+
+ Kernel mode, <= APC_LEVEL.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE ProtoPte;
+ ULONG PageFrameIndex;
+ ULONG WorkingSetIndex;
+ MMPTE TempPte;
+ MMPTE ProtoPteContents;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+ ULONG TransitionState = FALSE;
+ ULONG AddToWorkingSet = FALSE;
+ ULONG ShareCountUpped;
+ ULONG EndFill;
+ PVOID Buffer;
+ NTSTATUS status;
+ PMMINPAGE_SUPPORT Event;
+ PCONTROL_AREA ControlArea;
+ PETHREAD Thread;
+ ULONG SavedState;
+
+ ASSERT (((ULONG)Address & (PAGE_SIZE - 1)) == 0);
+ ASSERT ((CountInBytes + Offset) <= PAGE_SIZE);
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+
+ PointerPte = MiGetPteAddress (Address);
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ goto Copy;
+ }
+
+ //
+ // Touch the user's buffer to make it resident. This prevents a
+ // fatal problem if the user is mapping the file and doing I/O
+ // to the same offset into the file.
+ //
+
+ try {
+
+ *(volatile CHAR *)UserBuffer;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Make a the PTE valid if possible.
+ //
+
+ LOCK_PFN (OldIrql);
+
+Recheck:
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ goto UnlockAndCopy;
+ }
+
+ ASSERT (PointerPte->u.Soft.Prototype == 1);
+
+ ProtoPte = MiPteToProto (PointerPte);
+
+ //
+ // Pte is not valid, check the state of the prototype PTE.
+ //
+
+ if (MiMakeSystemAddressValidPfn (ProtoPte)) {
+
+ //
+ // If page fault occurred, recheck state of original PTE.
+ //
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ goto UnlockAndCopy;
+ }
+ }
+
+ ShareCountUpped = FALSE;
+ ProtoPteContents = *ProtoPte;
+
+ if (ProtoPteContents.u.Hard.Valid == 1) {
+
+ PageFrameIndex = ProtoPteContents.u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // Increment the share count so the prototype PTE will remain
+ // valid until this can be added into the system's working set.
+ //
+
+ Pfn1->u2.ShareCount += 1;
+ ShareCountUpped = TRUE;
+
+ //
+ // The prototype PTE is valid, make the cache PTE
+ // valid and add it to the working set.
+ //
+
+ TempPte = ProtoPteContents;
+
+ } else if ((ProtoPteContents.u.Soft.Transition == 1) &&
+ (ProtoPteContents.u.Soft.Prototype == 0)) {
+
+ //
+ // Prototype PTE is in the transition state. Remove the page
+ // from the page list and make it valid.
+ //
+
+ PageFrameIndex = ProtoPteContents.u.Trans.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ if ((Pfn1->u3.e1.ReadInProgress) ||
+ (Pfn1->u3.e1.InPageError)) {
+
+ //
+ // Collided page fault or in page error, try the copy
+ // operation incuring a page fault.
+ //
+
+ goto UnlockAndCopy;
+ }
+
+ MiUnlinkPageFromList (Pfn1);
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.Modified = 1;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ Pfn1->OriginalPte.u.Soft.Protection,
+ NULL );
+ MI_SET_PTE_DIRTY (TempPte);
+
+ *ProtoPte = TempPte;
+
+ //
+ // Increment the valid pte count for the page containing
+ // the prototype PTE.
+ //
+
+ } else {
+
+ //
+ // Page is not in memory, if a page of zeroes is requested,
+ // get a page of zeroes and make it valid.
+ //
+
+ if (MiEnsureAvailablePageOrWait (NULL, NULL)) {
+
+ //
+ // A wait operation occurred which could have changed the
+ // state of the PTE. Recheck the pte state.
+ //
+
+ goto Recheck;
+ }
+
+ Event = MiGetInPageSupportBlock (FALSE);
+ if (Event == NULL) {
+ goto Recheck;
+ }
+
+ //
+ // Increment the count of Pfn references for the control area
+ // corresponding to this file.
+ //
+
+ ControlArea = MiGetSubsectionAddress (ProtoPte)->ControlArea;
+ ControlArea->NumberOfPfnReferences += 1;
+ if (ControlArea->NumberOfUserReferences > 0) {
+
+ //
+ // There is a user reference to this file, always zero ahead.
+ //
+
+ DontZero = FALSE;
+ }
+
+ //
+ // Remove any page from the list and turn it into a transition
+ // page in the cache with read in progress set. This causes
+ // any other references to this page to block on the specified
+ // event while the copy operation to the cache is on-going.
+ //
+
+ PageFrameIndex = MiRemoveAnyPage(MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MiInitializeTransitionPfn (PageFrameIndex, ProtoPte, 0xFFFFFFFF);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PrototypePte = 1;
+ Pfn1->u3.e1.Modified = 1;
+ Pfn1->u3.e1.ReadInProgress = 1;
+ Pfn1->u1.Event = &Event->Event;
+ TransitionState = TRUE;
+
+ //
+ // Increment the valid pte count for the page containing
+ // the prototype PTE.
+ //
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ Pfn1->OriginalPte.u.Soft.Protection,
+ NULL);
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+
+ //
+ // Increment the reference count of the page table
+ // page for this PTE.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
+
+ Pfn2->u2.ShareCount += 1;
+
+ MI_SET_GLOBAL_STATE (TempPte, 1);
+ *PointerPte = TempPte;
+
+ AddToWorkingSet = TRUE;
+
+UnlockAndCopy:
+
+ //
+ // Unlock the PFN database and perform the copy.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+Copy:
+
+ Thread = PsGetCurrentThread ();
+ MmSavePageFaultReadAhead( Thread, &SavedState );
+ MmSetPageFaultReadAhead( Thread, 0 );
+ status = STATUS_SUCCESS;
+
+ //
+ // Copy the user buffer into the cache under an exception handler.
+ //
+
+ try {
+
+ Buffer = (PVOID)((PCHAR)Address + Offset);
+ RtlCopyBytes (Buffer, UserBuffer, CountInBytes);
+
+ if (TransitionState) {
+
+ //
+ // Only zero the memory outside the range if a page was taken
+ // from the free list.
+ //
+
+ if (Offset != 0) {
+ RtlZeroMemory (Address, Offset);
+ }
+
+ if (DontZero == FALSE) {
+ EndFill = PAGE_SIZE - (Offset + CountInBytes);
+
+ if (EndFill != 0) {
+ Buffer = (PVOID)((PCHAR)Buffer + CountInBytes);
+ RtlZeroMemory (Buffer, EndFill);
+ }
+ }
+ }
+ } except (MiMapCacheExceptionFilter (&status, GetExceptionInformation())) {
+
+ //
+ // Zero out the page if it came from the free list.
+ //
+
+ if (TransitionState) {
+ RtlZeroMemory (Address, PAGE_SIZE);
+ }
+ }
+
+ MmResetPageFaultReadAhead(Thread, SavedState);
+
+ if (AddToWorkingSet) {
+
+ LOCK_PFN (OldIrql);
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
+ ASSERT (Pfn1->PteAddress == ProtoPte);
+
+ if (TransitionState) {
+#if DBG
+ if (Pfn1->u2.ShareCount == 0) {
+ ASSERT (!ShareCountUpped);
+ } else {
+ ASSERT (Pfn1->u2.ShareCount == 1);
+ }
+ ASSERT (Pfn1->u1.Event == &Event->Event);
+#endif //DBG
+ MiMakeSystemAddressValidPfn (ProtoPte);
+ MI_SET_GLOBAL_STATE (TempPte, 0);
+ *ProtoPte = TempPte;
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ ASSERT (Pfn1->u3.e1.ReadInProgress == 1);
+ ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
+ Pfn1->u3.e1.ReadInProgress = 0;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ MiFreeInPageSupportBlock (Event);
+ if (DontZero != FALSE) {
+ Pfn1->u3.e2.ReferenceCount += 1;
+ status = STATUS_CACHE_PAGE_LOCKED;
+ }
+ } else {
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+ }
+
+ //
+ // Increment the share count since the page is being put into a working
+ // set.
+ //
+
+ if (!ShareCountUpped) {
+ Pfn1->u2.ShareCount += 1;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ LOCK_SYSTEM_WS (OldIrql);
+
+ WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs);
+
+ MiUpdateWsle (&WorkingSetIndex,
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ MmSystemCacheWorkingSetList,
+ Pfn1);
+
+ MmSystemCacheWsle[WorkingSetIndex].u1.e1.SameProtectAsProto = 1;
+
+ UNLOCK_SYSTEM_WS (OldIrql);
+ }
+ return status;
+}
+
+
+LONG
+MiMapCacheExceptionFilter (
+ IN PNTSTATUS Status,
+ IN PEXCEPTION_POINTERS ExceptionPointer
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is a filter for exceptions during copying data
+ from the user buffer to the system cache. It stores the
+ status code from the exception record into the status argument.
+ In the case of an in page i/o error it returns the actual
+ error code and in the case of an access violation it returns
+ STATUS_INVALID_USER_BUFFER.
+
+Arguments:
+
+ Status - Returns the status from the exception record.
+
+ ExceptionCode - Supplies the exception code to being checked.
+
+Return Value:
+
+ ULONG - returns EXCEPTION_EXECUTE_HANDLER
+
+--*/
+
+{
+ NTSTATUS local;
+ local = ExceptionPointer->ExceptionRecord->ExceptionCode;
+
+ //
+ // If the exception is STATUS_IN_PAGE_ERROR, get the I/O error code
+ // from the exception record.
+ //
+
+ if (local == STATUS_IN_PAGE_ERROR) {
+ if (ExceptionPointer->ExceptionRecord->NumberParameters >= 3) {
+ local = ExceptionPointer->ExceptionRecord->ExceptionInformation[2];
+ }
+ }
+
+ if (local == STATUS_ACCESS_VIOLATION) {
+ local = STATUS_INVALID_USER_BUFFER;
+ }
+
+ *Status = local;
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+VOID
+MmUnlockCachedPage (
+ IN PVOID AddressInCache
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unlocks a previous locked cached page.
+
+Arguments:
+
+ AddressInCache - Supplies the address where the page was locked
+ in the system cache. This must be the same
+ address that MmCopyToCachePages was called with.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ PointerPte = MiGetPteAddress (AddressInCache);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ LOCK_PFN (OldIrql);
+
+ if (Pfn1->u3.e2.ReferenceCount <= 1) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x777,
+ PointerPte->u.Hard.PageFrameNumber,
+ Pfn1->u3.e2.ReferenceCount,
+ (ULONG)AddressInCache);
+ return;
+ }
+
+ Pfn1->u3.e2.ReferenceCount -= 1;
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
diff --git a/private/ntos/mm/mapview.c b/private/ntos/mm/mapview.c
new file mode 100644
index 000000000..8274ff72d
--- /dev/null
+++ b/private/ntos/mm/mapview.c
@@ -0,0 +1,3388 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ Mapview.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtMapViewOfSection service.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+ULONG MMPPTE_NAME = 'tPmM'; //MmPt
+ULONG MMDB = 'bDmM';
+extern ULONG MMVADKEY;
+
+
+NTSTATUS
+MiMapViewOfPhysicalSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN ULONG ProtectionMask,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ OUT PBOOLEAN ReleasedWsMutex
+ );
+
+VOID
+MiSetPageModified (
+ IN PVOID Address
+ );
+
+
+extern LIST_ENTRY MmLoadedUserImageList;
+
+extern ULONG MmSharedCommit;
+
+#define X256MEG (256*1024*1024)
+
+#if DBG
+extern PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+
+
+VOID
+MiCheckPurgeAndUpMapCount (
+ IN PCONTROL_AREA ControlArea
+ );
+
+NTSTATUS
+MiMapViewOfPhysicalSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN ULONG ProtectionMask,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ OUT PBOOLEAN ReleasedWsMutex
+ );
+
+NTSTATUS
+MiMapViewOfImageSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN PSECTION Section,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG ZeroBits,
+ IN ULONG ImageCommitment,
+ OUT PBOOLEAN ReleasedWsMutex
+ );
+
+NTSTATUS
+MiMapViewOfDataSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN PSECTION Section,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG ProtectionMask,
+ IN ULONG CommitSize,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ OUT PBOOLEAN ReleasedWsMutex
+ );
+
+VOID
+VadTreeWalk (
+ PMMVAD Start
+ );
+
+#if DBG
+VOID
+MiDumpConflictingVad(
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad
+ );
+
+
+VOID
+MiDumpConflictingVad(
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad
+ )
+{
+ if (NtGlobalFlag & FLG_SHOW_LDR_SNAPS) {
+ DbgPrint( "MM: [%lX ... %lX) conflicted with Vad %lx\n",
+ StartingAddress, EndingAddress, Vad);
+ if ((Vad->u.VadFlags.PrivateMemory == 1) ||
+ (Vad->ControlArea == NULL)) {
+ return;
+ }
+ if (Vad->ControlArea->u.Flags.Image)
+ DbgPrint( " conflict with %Z image at [%lX .. %lX)\n",
+ &Vad->ControlArea->FilePointer->FileName,
+ Vad->StartingVa,
+ Vad->EndingVa
+ );
+ else
+ if (Vad->ControlArea->u.Flags.File)
+ DbgPrint( " conflict with %Z file at [%lX .. %lX)\n",
+ &Vad->ControlArea->FilePointer->FileName,
+ Vad->StartingVa,
+ Vad->EndingVa
+ );
+ else
+ DbgPrint( " conflict with section at [%lX .. %lX)\n",
+ Vad->StartingVa,
+ Vad->EndingVa
+ );
+ }
+}
+#endif //DBG
+
+
+ULONG
+CacheImageSymbols(
+ IN PVOID ImageBase
+ );
+
+PVOID
+MiInsertInSystemSpace (
+ IN ULONG SizeIn64k,
+ IN PCONTROL_AREA ControlArea
+ );
+
+ULONG
+MiRemoveFromSystemSpace (
+ IN PVOID Base,
+ OUT PCONTROL_AREA *ControlArea
+ );
+
+NTSTATUS
+MiAddMappedPtes (
+ IN PMMPTE FirstPte,
+ IN ULONG NumberOfPtes,
+ IN PCONTROL_AREA ControlArea,
+ IN ULONG PteOffset,
+ IN ULONG SystemCache
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiInitializeSystemSpaceMap)
+
+#pragma alloc_text(PAGE,NtMapViewOfSection)
+#pragma alloc_text(PAGE,MmMapViewOfSection)
+#pragma alloc_text(PAGE,MmSecureVirtualMemory)
+#pragma alloc_text(PAGE,MmUnsecureVirtualMemory)
+#pragma alloc_text(PAGE,CacheImageSymbols)
+
+#pragma alloc_text(PAGELK,MiMapViewOfPhysicalSection)
+#pragma alloc_text(PAGELK,MmMapViewInSystemSpace)
+#pragma alloc_text(PAGELK,MmUnmapViewInSystemSpace)
+#pragma alloc_text(PAGELK,MiInsertInSystemSpace)
+#pragma alloc_text(PAGELK,MiRemoveFromSystemSpace)
+
+#endif
+
+
+NTSTATUS
+NtMapViewOfSection(
+ IN HANDLE SectionHandle,
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN ULONG ZeroBits,
+ IN ULONG CommitSize,
+ IN OUT PLARGE_INTEGER SectionOffset OPTIONAL,
+ IN OUT PULONG ViewSize,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG AllocationType,
+ IN ULONG Protect
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps a view in the specified subject process to
+ the section object.
+
+Arguments:
+
+ SectionHandle - Supplies an open handle to a section object.
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address of the view. If the initial value
+ of this argument is not null, then the view will
+ be allocated starting at the specified virtual
+ address rounded down to the next 64kb address
+ boundary. If the initial value of this argument is
+ null, then the operating system will determine
+ where to allocate the view using the information
+ specified by the ZeroBits argument value and the
+ section allocation attributes (i.e. based and
+ tiled).
+
+ ZeroBits - Supplies the number of high order address bits that
+ must be zero in the base address of the section
+ view. The value of this argument must be less than
+ 21 and is only used when the operating system
+ determines where to allocate the view (i.e. when
+ BaseAddress is null).
+
+ CommitSize - Supplies the size of the initially committed region
+ of the view in bytes. This value is rounded up to
+ the next host page size boundary.
+
+ SectionOffset - Supplies the offset from the beginning of the
+ section to the view in bytes. This value is
+ rounded down to the next host page size boundary.
+
+ ViewSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the view. If the value
+ of this argument is zero, then a view of the
+ section will be mapped starting at the specified
+ section offset and continuing to the end of the
+ section. Otherwise the initial value of this
+ argument specifies the size of the view in bytes
+ and is rounded up to the next host page size
+ boundary.
+
+ InheritDisposition - Supplies a value that specifies how the
+ view is to be shared by a child process created
+ with a create process operation.
+
+ InheritDisposition Values
+
+ ViewShare - Inherit view and share a single copy
+ of the committed pages with a child process
+ using the current protection value.
+
+ ViewUnmap - Do not map the view into a child
+ process.
+
+ AllocationType - Supplies the type of allocation.
+
+ MEM_TOP_DOWN
+ MEM_DOS_LIM
+ MEM_LARGE_PAGES
+
+ Protect - Supplies the protection desired for the region of
+ initially committed pages.
+
+ Protect Values
+
+
+ PAGE_NOACCESS - No access to the committed region
+ of pages is allowed. An attempt to read,
+ write, or execute the committed region
+ results in an access violation (i.e. a GP
+ fault).
+
+ PAGE_EXECUTE - Execute access to the committed
+ region of pages is allowed. An attempt to
+ read or write the committed region results in
+ an access violation.
+
+ PAGE_READONLY - Read only and execute access to the
+ committed region of pages is allowed. An
+ attempt to write the committed region results
+ in an access violation.
+
+ PAGE_READWRITE - Read, write, and execute access to
+ the region of committed pages is allowed. If
+ write access to the underlying section is
+ allowed, then a single copy of the pages are
+ shared. Otherwise the pages are shared read
+ only/copy on write.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+--*/
+
+{
+ PSECTION Section;
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+ PVOID CapturedBase;
+ ULONG CapturedViewSize;
+ LARGE_INTEGER TempViewSize;
+ LARGE_INTEGER CapturedOffset;
+ ACCESS_MASK DesiredSectionAccess;
+ ULONG ProtectMaskForAccess;
+
+ PAGED_CODE();
+
+ //
+ // Check the zero bits argument for correctness.
+ //
+
+ if (ZeroBits > 21) {
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ //
+ // Check the inherit disposition flags.
+ //
+
+ if ((InheritDisposition > ViewUnmap) ||
+ (InheritDisposition < ViewShare)) {
+ return STATUS_INVALID_PARAMETER_8;
+ }
+
+ //
+ // Check the allocation type field.
+ //
+
+#ifdef i386
+
+ //
+ // Only allow DOS_LIM support for i386. The MEM_DOS_LIM flag allows
+ // map views of data sections to be done on 4k boudaries rather
+ // than 64k boundaries.
+ //
+
+ if ((AllocationType & ~(MEM_TOP_DOWN | MEM_LARGE_PAGES | MEM_DOS_LIM | SEC_NO_CHANGE)) != 0) {
+ return STATUS_INVALID_PARAMETER_9;
+ }
+#else
+ if ((AllocationType & ~(MEM_TOP_DOWN | MEM_LARGE_PAGES | SEC_NO_CHANGE)) != 0) {
+ return STATUS_INVALID_PARAMETER_9;
+ }
+
+#endif //i386
+
+ //
+ // Check the protection field. This could raise an exception.
+ //
+
+ try {
+ ProtectMaskForAccess = MiMakeProtectionMask (Protect) & 0x7;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ DesiredSectionAccess = MmMakeSectionAccess[ProtectMaskForAccess];
+
+ PreviousMode = KeGetPreviousMode();
+
+ //
+ // Establish an exception handler, probe the specified addresses
+ // for write access and capture the initial values.
+ //
+
+ try {
+ if (PreviousMode != KernelMode) {
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (ViewSize);
+
+ }
+
+ if (ARGUMENT_PRESENT (SectionOffset)) {
+ if (PreviousMode != KernelMode) {
+ ProbeForWrite (SectionOffset,
+ sizeof(LARGE_INTEGER),
+ sizeof(ULONG));
+ }
+ CapturedOffset = *SectionOffset;
+ } else {
+ ZERO_LARGE (CapturedOffset);
+ }
+
+ //
+ // Capture the base address.
+ //
+
+ CapturedBase = *BaseAddress;
+
+ //
+ // Capture the region size.
+ //
+
+ CapturedViewSize = *ViewSize;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess ) {
+ DbgPrint("mapview process handle %lx section %lx base address %lx zero bits %lx\n",
+ ProcessHandle, SectionHandle, CapturedBase, ZeroBits);
+ DbgPrint(" view size %lx offset %lx commitsize %lx protect %lx\n",
+ CapturedViewSize, CapturedOffset.LowPart, CommitSize, Protect);
+ DbgPrint(" Inheritdisp %lx Allocation type %lx\n",
+ InheritDisposition, AllocationType);
+ }
+ }
+#endif
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_VAD_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER_3;
+ }
+
+ if (((ULONG)MM_HIGHEST_VAD_ADDRESS - (ULONG)CapturedBase) <
+ CapturedViewSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER_3;
+
+ }
+
+ if (((ULONG)CapturedBase + CapturedViewSize) > (0xFFFFFFFF >> ZeroBits)) {
+
+ //
+ // Desired Base and zero_bits conflict.
+ //
+
+ return STATUS_INVALID_PARAMETER_4;
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // Reference the section object, if a view is mapped to the section
+ // object, the object is not dereferenced as the virtual address
+ // descriptor contains a pointer to the section object.
+ //
+
+ Status = ObReferenceObjectByHandle ( SectionHandle,
+ DesiredSectionAccess,
+ MmSectionObjectType,
+ PreviousMode,
+ (PVOID *)&Section,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ goto ErrorReturn1;
+ }
+
+ if (Section->u.Flags.Image == 0) {
+
+ //
+ // This is not an image section, make sure the section page
+ // protection is compatable with the specified page protection.
+ //
+
+ if (!MiIsProtectionCompatible (Section->InitialPageProtection,
+ Protect)) {
+ Status = STATUS_SECTION_PROTECTION;
+ goto ErrorReturn;
+ }
+ }
+
+ //
+ // Check to see if this the section backs physical memory, if
+ // so DON'T align the offset on a 64K boundary, just a 4k boundary.
+ //
+
+ if (Section->Segment->ControlArea->u.Flags.PhysicalMemory) {
+ CapturedOffset.LowPart = (ULONG)PAGE_ALIGN (CapturedOffset.LowPart);
+ } else {
+
+ //
+ // Make sure alignments are correct for specified address
+ // and offset into the file.
+ //
+
+ if ((AllocationType & MEM_DOS_LIM) == 0) {
+ if (((ULONG)*BaseAddress & (X64K - 1)) != 0) {
+ Status = STATUS_MAPPED_ALIGNMENT;
+ goto ErrorReturn;
+ }
+
+ if ((ARGUMENT_PRESENT (SectionOffset)) &&
+ ((SectionOffset->LowPart & (X64K - 1)) != 0)) {
+ Status = STATUS_MAPPED_ALIGNMENT;
+ goto ErrorReturn;
+ }
+ }
+ }
+
+ //
+ // Check to make sure the section offset is within the section.
+ //
+
+ if ((CapturedOffset.QuadPart + CapturedViewSize) >
+ Section->SizeOfSection.QuadPart) {
+
+ Status = STATUS_INVALID_VIEW_SIZE;
+ goto ErrorReturn;
+ }
+
+ if (CapturedViewSize == 0) {
+
+ //
+ // Set the view size to be size of the section less the offset.
+ //
+
+ TempViewSize.QuadPart = Section->SizeOfSection.QuadPart -
+ CapturedOffset.QuadPart;
+
+ CapturedViewSize = TempViewSize.LowPart;
+
+ if ((TempViewSize.HighPart != 0) ||
+ (((ULONG)MM_HIGHEST_VAD_ADDRESS - (ULONG)CapturedBase) <
+ CapturedViewSize)) {
+
+ //
+ // Invalid region size;
+ //
+
+ Status = STATUS_INVALID_VIEW_SIZE;
+ goto ErrorReturn;
+ }
+
+ } else {
+
+ //
+ // Check to make sure the view size plus the offset is less
+ // than the size of the section.
+ //
+
+ if ((CapturedViewSize + CapturedOffset.QuadPart) >
+ Section->SizeOfSection.QuadPart) {
+
+ Status = STATUS_INVALID_VIEW_SIZE;
+ goto ErrorReturn;
+ }
+ }
+
+ //
+ // Check commit size.
+ //
+
+ if (CommitSize > CapturedViewSize) {
+ Status = STATUS_INVALID_PARAMETER_5;
+ goto ErrorReturn;
+ }
+
+ Status = MmMapViewOfSection ( (PVOID)Section,
+ Process,
+ &CapturedBase,
+ ZeroBits,
+ CommitSize,
+ &CapturedOffset,
+ &CapturedViewSize,
+ InheritDisposition,
+ AllocationType,
+ Protect);
+
+ if (!NT_SUCCESS(Status) ) {
+ if ( (Section->Segment->ControlArea->u.Flags.Image) &&
+ Process == PsGetCurrentProcess() ) {
+ if (Status == STATUS_CONFLICTING_ADDRESSES ) {
+ DbgkMapViewOfSection(
+ SectionHandle,
+ CapturedBase,
+ CapturedOffset.LowPart,
+ CapturedViewSize
+ );
+ }
+ }
+ goto ErrorReturn;
+ }
+
+ //
+ // Anytime the current process maps an image file,
+ // a potential debug event occurs. DbgkMapViewOfSection
+ // handles these events.
+ //
+
+ if ( (Section->Segment->ControlArea->u.Flags.Image) &&
+ Process == PsGetCurrentProcess() ) {
+ if (Status != STATUS_IMAGE_NOT_AT_BASE ) {
+ DbgkMapViewOfSection(
+ SectionHandle,
+ CapturedBase,
+ CapturedOffset.LowPart,
+ CapturedViewSize
+ );
+ }
+ }
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ *ViewSize = CapturedViewSize;
+ *BaseAddress = CapturedBase;
+
+ if (ARGUMENT_PRESENT(SectionOffset)) {
+ *SectionOffset = CapturedOffset;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto ErrorReturn;
+ }
+
+#if 0 // test code...
+ if ((Status == STATUS_SUCCESS) &&
+ (Section->u.Flags.Image == 0)) {
+
+ PVOID Base;
+ ULONG Size = 0;
+ NTSTATUS Status;
+
+ Status = MmMapViewInSystemSpace ((PVOID)Section,
+ &Base,
+ &Size);
+ if (Status == STATUS_SUCCESS) {
+ MmUnmapViewInSystemSpace (Base);
+ }
+ }
+#endif //0
+
+ {
+ErrorReturn:
+ ObDereferenceObject (Section);
+ErrorReturn1:
+ ObDereferenceObject (Process);
+ return Status;
+ }
+}
+
+NTSTATUS
+MmMapViewOfSection(
+ IN PVOID SectionToMap,
+ IN PEPROCESS Process,
+ IN OUT PVOID *CapturedBase,
+ IN ULONG ZeroBits,
+ IN ULONG CommitSize,
+ IN OUT PLARGE_INTEGER SectionOffset,
+ IN OUT PULONG CapturedViewSize,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG AllocationType,
+ IN ULONG Protect
+ )
+
+/*++
+
+Routine Description:
+
+ This function maps a view in the specified subject process to
+ the section object.
+
+ This function is a kernel mode interface to allow LPC to map
+ a section given the section pointer to map.
+
+ This routine assumes all arguments have been probed and captured.
+
+ ********************************************************************
+ ********************************************************************
+ ********************************************************************
+
+ NOTE:
+
+ CapturedViewSize, SectionOffset, and CapturedBase must be
+ captured in non-paged system space (i.e., kernel stack).
+
+ ********************************************************************
+ ********************************************************************
+ ********************************************************************
+
+Arguments:
+
+ SectionToMap - Supplies a pointer to the section object.
+
+ Process - Supplies a pointer to the process object.
+
+ BaseAddress - Supplies a pointer to a variable that will receive
+ the base address of the view. If the initial value
+ of this argument is not null, then the view will
+ be allocated starting at the specified virtual
+ address rounded down to the next 64kb address
+ boundary. If the initial value of this argument is
+ null, then the operating system will determine
+ where to allocate the view using the information
+ specified by the ZeroBits argument value and the
+ section allocation attributes (i.e. based and
+ tiled).
+
+ ZeroBits - Supplies the number of high order address bits that
+ must be zero in the base address of the section
+ view. The value of this argument must be less than
+ 21 and is only used when the operating system
+ determines where to allocate the view (i.e. when
+ BaseAddress is null).
+
+ CommitSize - Supplies the size of the initially committed region
+ of the view in bytes. This value is rounded up to
+ the next host page size boundary.
+
+ SectionOffset - Supplies the offset from the beginning of the
+ section to the view in bytes. This value is
+ rounded down to the next host page size boundary.
+
+ ViewSize - Supplies a pointer to a variable that will receive
+ the actual size in bytes of the view. If the value
+ of this argument is zero, then a view of the
+ section will be mapped starting at the specified
+ section offset and continuing to the end of the
+ section. Otherwise the initial value of this
+ argument specifies the size of the view in bytes
+ and is rounded up to the next host page size
+ boundary.
+
+ InheritDisposition - Supplies a value that specifies how the
+ view is to be shared by a child process created
+ with a create process operation.
+
+ AllocationType - Supplies the type of allocation.
+
+ Protect - Supplies the protection desired for the region of
+ initially committed pages.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+{
+ BOOLEAN Attached = FALSE;
+ PSECTION Section;
+ PCONTROL_AREA ControlArea;
+ ULONG ProtectionMask;
+ NTSTATUS status;
+ BOOLEAN ReleasedWsMutex = TRUE;
+ ULONG ImageCommitment;
+
+ PAGED_CODE();
+
+ Section = (PSECTION)SectionToMap;
+
+ //
+ // Check to make sure the section is not smaller than the view size.
+ //
+
+ if (*CapturedViewSize > Section->SizeOfSection.LowPart) {
+ if ((LONGLONG)*CapturedViewSize >
+ Section->SizeOfSection.QuadPart) {
+
+ return STATUS_INVALID_VIEW_SIZE;
+ }
+ }
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ if (Section->u.Flags.NoCache) {
+ Protect |= PAGE_NOCACHE;
+ }
+
+ //
+ // Check the protection field. This could raise an exception.
+ //
+
+ try {
+ ProtectionMask = MiMakeProtectionMask (Protect);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ ControlArea = Section->Segment->ControlArea;
+ ImageCommitment = Section->Segment->ImageCommitment;
+
+ if (PsGetCurrentProcess() != Process) {
+ KeAttachProcess (&Process->Pcb);
+ Attached = TRUE;
+ }
+
+ //
+ // Get the address creation mutex to block multiple threads
+ // creating or deleting address space at the same time.
+ //
+
+ LOCK_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ //
+ // Map the view base on the type.
+ //
+
+ ReleasedWsMutex = FALSE;
+
+ if (ControlArea->u.Flags.PhysicalMemory) {
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ status = MiMapViewOfPhysicalSection (ControlArea,
+ Process,
+ CapturedBase,
+ SectionOffset,
+ CapturedViewSize,
+ ProtectionMask,
+ ZeroBits,
+ AllocationType,
+ &ReleasedWsMutex);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+
+ } else if (ControlArea->u.Flags.Image) {
+
+ status = MiMapViewOfImageSection (
+ ControlArea,
+ Process,
+ CapturedBase,
+ SectionOffset,
+ CapturedViewSize,
+ Section,
+ InheritDisposition,
+ ZeroBits,
+ ImageCommitment,
+ &ReleasedWsMutex
+ );
+
+ } else {
+
+ //
+ // Not an image section, therefore it is a data section.
+ //
+
+ status = MiMapViewOfDataSection (ControlArea,
+ Process,
+ CapturedBase,
+ SectionOffset,
+ CapturedViewSize,
+ Section,
+ InheritDisposition,
+ ProtectionMask,
+ CommitSize,
+ ZeroBits,
+ AllocationType,
+ &ReleasedWsMutex
+ );
+ }
+
+ErrorReturn:
+ if (!ReleasedWsMutex) {
+ UNLOCK_WS (Process);
+ }
+ UNLOCK_ADDRESS_SPACE (Process);
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+
+ return status;
+}
+
+#ifndef _ALPHA_
+
+NTSTATUS
+MiMapViewOfPhysicalSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN ULONG ProtectionMask,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ OUT PBOOLEAN ReleasedWsMutex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified phyiscal section into the
+ specified process's address space.
+
+Arguments:
+
+ see MmMapViewOfSection above...
+
+ ControlArea - Supplies the control area for the section.
+
+ Process - Supplies the process pointer which is receiving the section.
+
+ ProtectionMask - Supplies the initial page protection-mask.
+
+ ReleasedWsMutex - Supplies FALSE. If the working set mutex is
+ not held when returning this must be set to TRUE
+ so the caller will release the mutex.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, address creation mutex held.
+
+--*/
+
+{
+ PMMVAD Vad;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ KIRQL OldIrql;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE TempPte;
+ PMMPFN Pfn2;
+ ULONG PhysicalViewSize;
+ ULONG Alignment;
+#ifdef LARGE_PAGES
+ ULONG size;
+ PMMPTE protoPte;
+ ULONG pageSize;
+ PSUBSECTION Subsection;
+#endif //LARGE_PAGES
+
+ //
+ // Physical memory section.
+ //
+
+ //
+ // If running on an R4000 and MEM_LARGE_PAGES is specified,
+ // set up the PTEs as a series of pointers to the same
+ // prototype PTE. This prototype PTE will reference a subsection
+ // that indicates large pages should be used.
+ //
+ // The R4000 supports pages of 4k, 16k, 64k, etc (powers of 4).
+ // Since the TB supports 2 entries, sizes of 8k, 32k etc can
+ // be mapped by 2 LargePages in a single TB entry. These 2 entries
+ // are maintained in the subsection structure pointed to by the
+ // prototype PTE.
+ //
+
+ Alignment = X64K;
+ LOCK_WS (Process);
+
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+
+ //
+ // Determine the page size and the required alignment.
+ //
+
+ if ((SectionOffset->LowPart & (X64K - 1)) != 0) {
+ return STATUS_INVALID_PARAMETER_9;
+ }
+
+ size = (*CapturedViewSize - 1) >> (PAGE_SHIFT + 1);
+ pageSize = PAGE_SIZE;
+
+ while (size != 0) {
+ size = size >> 2;
+ pageSize = pageSize << 2;
+ }
+
+ Alignment = pageSize << 1;
+ if (Alignment < MM_VA_MAPPED_BY_PDE) {
+ Alignment = MM_VA_MAPPED_BY_PDE;
+ }
+ }
+#endif //LARGE_PAGES
+
+ if (*CapturedBase == NULL) {
+
+ //
+ // Attempt to locate address space. This could raise an
+ // exception.
+ //
+
+ try {
+
+ //
+ // Find a starting address on a 64k boundary.
+ //
+#ifdef i386
+ ASSERT (SectionOffset->HighPart == 0);
+#endif
+
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ PhysicalViewSize = Alignment;
+ } else {
+#endif //LARGE_PAGES
+
+ PhysicalViewSize = *CapturedViewSize +
+ (SectionOffset->LowPart & (X64K - 1));
+#ifdef LARGE_PAGES
+ }
+#endif //LARGE_PAGES
+
+ StartingAddress = MiFindEmptyAddressRange (PhysicalViewSize,
+ Alignment,
+ ZeroBits);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ return GetExceptionCode();
+ }
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ PhysicalViewSize - 1L) | (PAGE_SIZE - 1L));
+ StartingAddress = (PVOID)((ULONG)StartingAddress +
+ (SectionOffset->LowPart & (X64K - 1)));
+
+ if (ZeroBits > 0) {
+ if (EndingAddress > (PVOID)((ULONG)0xFFFFFFFF >> ZeroBits)) {
+ return STATUS_NO_MEMORY;
+ }
+ }
+
+ } else {
+
+ //
+ // Check to make sure the specified base address to ending address
+ // is currently unused.
+ //
+
+ StartingAddress = (PVOID)((ULONG)MI_64K_ALIGN(*CapturedBase) +
+ (SectionOffset->LowPart & (X64K - 1)));
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ if (((ULONG)StartingAddress & (Alignment - 1)) != 0) {
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+ EndingAddress = (PVOID)((ULONG)StartingAddress + Alignment);
+ }
+#endif //LARGE_PAGES
+
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+
+ if (Vad != (PMMVAD)NULL) {
+#if DBG
+ MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
+#endif
+
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+ }
+
+ //
+ // An unoccuppied address range has been found, build the virtual
+ // address descriptor to describe this range.
+ //
+
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ //
+ // Allocate a subsection and 4 prototype PTEs to hold
+ // the information for the large pages.
+ //
+
+ Subsection = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(SUBSECTION) + (4 * sizeof(MMPTE)),
+ MMPPTE_NAME);
+ if (Subsection == NULL) {
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+ }
+#endif //LARGE_PAGES
+
+ //
+ // Establish an exception handler and attempt to allocate
+ // the pool and charge quota. Note that the InsertVad routine
+ // will also charge quota which could raise an exception.
+ //
+
+ try {
+
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD),
+ MMVADKEY);
+ if (Vad == NULL) {
+ ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ Vad->StartingVa = StartingAddress;
+ Vad->EndingVa = EndingAddress;
+ Vad->ControlArea = ControlArea;
+ Vad->u.LongFlags = 0;
+ Vad->u2.LongFlags2 = 0;
+ Vad->u.VadFlags.Inherit = MM_VIEW_UNMAP;
+ Vad->u.VadFlags.PhysicalMapping = 1;
+ Vad->u.VadFlags.Protection = ProtectionMask;
+ Vad->Banked = NULL;
+
+ //
+ // Set the last contiguous PTE field in the Vad to the page frame
+ // number of the starting physical page.
+ //
+
+ Vad->LastContiguousPte = (PMMPTE)(ULONG)(
+ SectionOffset->QuadPart >> PAGE_SHIFT);
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ Vad->u.VadFlags.LargePages = 1;
+ Vad->FirstPrototypePte = (PMMPTE)Subsection;
+ } else {
+#endif //LARGE_PAGES
+ // Vad->u.VadFlags.LargePages = 0;
+ Vad->FirstPrototypePte = Vad->LastContiguousPte;
+#ifdef LARGE_PAGES
+ }
+#endif //LARGE_PAGES
+
+ //
+ // Insert the VAD. This could get an exception.
+ //
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The pool allocation suceeded, but the quota charge
+ // in InsertVad failed, deallocate the pool and return
+ // and error.
+ //
+
+ ExFreePool (Vad);
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ ExFreePool (Subsection);
+ }
+#endif //LARGE_PAGES
+ return GetExceptionCode();
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ //
+ // Increment the count of the number of views for the
+ // section object. This requires the PFN mutex to be held.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfUserReferences += 1;
+
+ ASSERT (ControlArea->NumberOfSectionReferences != 0);
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Build the PTEs in the address space.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+
+
+ MI_MAKE_VALID_PTE (TempPte,
+ (ULONG)Vad->LastContiguousPte,
+ ProtectionMask,
+ PointerPte);
+
+ if (TempPte.u.Hard.Write) {
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+
+#ifdef LARGE_PAGES
+ if (AllocationType & MEM_LARGE_PAGES) {
+ Subsection->StartingSector = pageSize;
+ Subsection->EndingSector = (ULONG)StartingAddress;
+ Subsection->u.LongFlags = 0;
+ Subsection->u.SubsectionFlags.LargePages = 1;
+ protoPte = (PMMPTE)(Subsection + 1);
+
+ //
+ // Build the first 2 ptes as entries for the TLB to
+ // map the specified physical address.
+ //
+
+ *protoPte = TempPte;
+ protoPte += 1;
+
+ if (*CapturedViewSize > pageSize) {
+ *protoPte = TempPte;
+ protoPte->u.Hard.PageFrameNumber += (pageSize >> PAGE_SHIFT);
+ } else {
+ *protoPte = ZeroPte;
+ }
+ protoPte += 1;
+
+ //
+ // Build the first prototype PTE as a paging file format PTE
+ // referring to the subsection.
+ //
+
+ protoPte->u.Long = (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ protoPte->u.Soft.Prototype = 1;
+ protoPte->u.Soft.Protection = ProtectionMask;
+
+ //
+ // Set the PTE up for all the user's PTE entries, proto pte
+ // format pointing to the 3rd prototype PTE.
+ //
+
+ TempPte.u.Long = MiProtoAddressForPte (protoPte);
+ }
+#endif // LARGE_PAGES
+
+#ifdef LARGE_PAGES
+ if (!(AllocationType & MEM_LARGE_PAGES)) {
+#endif //LARGE_PAGES
+
+ MiMakePdeExistAndMakeValid (PointerPde, Process, FALSE);
+ Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+ Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ *PointerPte = TempPte;
+ Pfn2->u2.ShareCount += 1;
+
+ //
+ // Increment the count of non-zero page table entires for this
+ // page table and the number of private pages for the process.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+
+ PointerPte += 1;
+ TempPte.u.Hard.PageFrameNumber += 1;
+ }
+#ifdef LARGE_PAGES
+ }
+#endif //LARGE_PAGES
+
+ UNLOCK_WS (Process);
+ *ReleasedWsMutex = TRUE;
+
+ //
+ // Update the current virtual size in the process header.
+ //
+
+ *CapturedViewSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ Process->VirtualSize += *CapturedViewSize;
+
+ if (Process->VirtualSize > Process->PeakVirtualSize) {
+ Process->PeakVirtualSize = Process->VirtualSize;
+ }
+
+ *CapturedBase = StartingAddress;
+
+ return STATUS_SUCCESS;
+}
+
+#endif //!_ALPHA_
+
+
+NTSTATUS
+MiMapViewOfImageSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN PSECTION Section,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG ZeroBits,
+ IN ULONG ImageCommitment,
+ IN OUT PBOOLEAN ReleasedWsMutex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified Image section into the
+ specified process's address space.
+
+Arguments:
+
+ see MmMapViewOfSection above...
+
+ ControlArea - Supplies the control area for the section.
+
+ Process - Supplies the process pointer which is receiving the section.
+
+ ReleasedWsMutex - Supplies FALSE. If the working set mutex is
+ not held when returning this must be set to TRUE
+ so the caller will release the mutex.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, working set mutex and address creation mutex held.
+
+--*/
+
+{
+ PMMVAD Vad;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ BOOLEAN Attached = FALSE;
+ KIRQL OldIrql;
+ PSUBSECTION Subsection;
+ ULONG PteOffset;
+ NTSTATUS ReturnedStatus;
+ PMMPTE ProtoPte;
+ PVOID BasedAddress;
+
+ //
+ // Image file.
+ //
+ // Locate the first subsection (text) and create a virtual
+ // address descriptor to map the entire image here.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ if (ControlArea->u.Flags.ImageMappedInSystemSpace) {
+
+ //
+ // Mapping in system space as a driver, hence copy on write does
+ // not work. Don't allow user processes to map the image.
+ //
+
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+
+ //
+ // Check to see if a purge operation is in progress and if so, wait
+ // for the purge to complete. In addition, up the count of mapped
+ // views for this control area.
+ //
+
+ MiCheckPurgeAndUpMapCount (ControlArea);
+
+ //
+ // Capture the based address to the stack, to prevent page faults.
+ //
+
+ BasedAddress = ControlArea->Segment->BasedAddress;
+
+ if (*CapturedViewSize == 0) {
+ *CapturedViewSize = (ULONG)(Section->SizeOfSection.QuadPart -
+ SectionOffset->QuadPart);
+ }
+
+ LOCK_WS (Process);
+
+ ReturnedStatus = STATUS_SUCCESS;
+
+ //
+ // Determine if a specific base was specified.
+ //
+
+ if (*CapturedBase != NULL) {
+
+ //
+ // Check to make sure the specified base address to ending address
+ // is currently unused.
+ //
+
+ StartingAddress = MI_64K_ALIGN(*CapturedBase);
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+
+ if (Vad != NULL) {
+#if DBG
+ MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
+#endif
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+
+ if (((ULONG)StartingAddress +
+ (ULONG)MI_64K_ALIGN(SectionOffset->LowPart)) !=
+ (ULONG)BasedAddress) {
+
+ //
+ // Indicate the image does not reside at its base address.
+ //
+
+ ReturnedStatus = STATUS_IMAGE_NOT_AT_BASE;
+ }
+
+ } else {
+
+ //
+ // Captured base is NULL, attempt to base the image at its specified
+ // address.
+ //
+
+ StartingAddress = (PVOID)((ULONG)BasedAddress +
+ (ULONG)MI_64K_ALIGN(SectionOffset->LowPart));
+
+ //
+ // Check to make sure the specified base address to ending address
+ // is currently unused.
+ //
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ if (*CapturedViewSize > (ULONG)PAGE_ALIGN((PVOID)MM_HIGHEST_VAD_ADDRESS)) {
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ return STATUS_NO_MEMORY;
+ }
+
+ if ((StartingAddress < MM_LOWEST_USER_ADDRESS) ||
+ (StartingAddress > MM_HIGHEST_VAD_ADDRESS) ||
+ (EndingAddress > MM_HIGHEST_VAD_ADDRESS)) {
+
+ //
+ // Indicate if the starting address is below the lowest address,
+ // or the ending address above the highest address, so that
+ // the address range will be searched for a valid address.
+ //
+
+ Vad = (PMMVAD)1; //not NULL
+ } else {
+
+#ifdef MIPS
+
+ //
+ // MIPS cannot have images cross a 256mb boundary because
+ // relative jumps are within 256mb.
+ //
+
+ if (((ULONG)StartingAddress & ~(X256MEG - 1)) !=
+ ((ULONG)EndingAddress & ~(X256MEG - 1))) {
+ Vad = (PMMVAD)1;
+ } else {
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+ }
+
+#else
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+#endif //MIPS
+ }
+
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The image could not be mapped at it's natural base address
+ // try to find another place to map it.
+ //
+#if DBG
+ MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
+#endif
+
+ ReturnedStatus = STATUS_IMAGE_NOT_AT_BASE;
+
+ try {
+
+ //
+ // Find a starting address on a 64k boundary.
+ //
+
+#ifdef MIPS
+ //
+ // MIPS images cannot span 265mb boundaries. Find twice
+ // the required size so that it can be place correctly.
+ //
+
+ StartingAddress = MiFindEmptyAddressRange (
+ *CapturedViewSize * 2 + X64K,
+ X64K,
+ ZeroBits);
+
+#else
+ StartingAddress = MiFindEmptyAddressRange (*CapturedViewSize,
+ X64K,
+ ZeroBits);
+#endif //MIPS
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ return GetExceptionCode();
+ }
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+#ifdef MIPS
+ if (((ULONG)StartingAddress & ~(X256MEG - 1)) !=
+ ((ULONG)EndingAddress & ~(X256MEG - 1))) {
+ //
+ // Not in the same 256mb. Up the start to a 256mb boundary.
+ //
+
+ StartingAddress = (PVOID)((ULONG)EndingAddress & ~(X256MEG - 1));
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+ }
+#endif //MIPS
+ }
+ }
+
+ //
+ // Before the VAD can be inserted, make sure a purge operation is
+ // not in progress on this section. If there is a purge operation,
+ // wait for it to complete.
+ //
+
+ try {
+
+ Vad = (PMMVAD)NULL;
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD),
+ MMVADKEY);
+ if (Vad == NULL) {
+ ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ Vad->StartingVa = StartingAddress;
+ Vad->EndingVa = EndingAddress;
+ Vad->u.LongFlags = 0;
+ Vad->u2.LongFlags2 = 0;
+ Vad->u.VadFlags.Inherit = (InheritDisposition == ViewShare);
+ Vad->u.VadFlags.ImageMap = 1;
+
+ //
+ // Set the protection in the VAD as EXECUTE_WRITE_COPY.
+ //
+
+ Vad->u.VadFlags.Protection = MM_EXECUTE_WRITECOPY;
+ Vad->ControlArea = ControlArea;
+ Vad->Banked = NULL;
+
+ //
+ // Set the first prototype PTE field in the Vad.
+ //
+
+ SectionOffset->LowPart = (ULONG)MI_64K_ALIGN (SectionOffset->LowPart);
+ PteOffset = (ULONG)(SectionOffset->QuadPart >> PAGE_SHIFT);
+
+ Vad->FirstPrototypePte = &Subsection->SubsectionBase[PteOffset];
+ Vad->LastContiguousPte = MM_ALLOCATION_FILLS_VAD;
+
+ //
+ // NOTE: the full commitment is charged even if a partial map of an
+ // image is being done. This saves from having to run through the
+ // entire image (via prototype PTEs) and calculate the charge on
+ // a per page basis for the partial map.
+ //
+
+ Vad->u.VadFlags.CommitCharge = ImageCommitment;
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The pool allocation suceeded, but the quota charge
+ // in InsertVad failed, deallocate the pool and return
+ // and error.
+ //
+
+ ExFreePool (Vad);
+ return GetExceptionCode();
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ *CapturedViewSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ *CapturedBase = StartingAddress;
+
+#if DBG
+ if (MmDebug & MM_DBG_WALK_VAD_TREE) {
+ DbgPrint("mapped image section vads\n");
+ VadTreeWalk(Process->VadRoot);
+ }
+#endif
+
+ //
+ // Update the current virtual size in the process header.
+ //
+
+ Process->VirtualSize += *CapturedViewSize;
+
+ if (Process->VirtualSize > Process->PeakVirtualSize) {
+ Process->PeakVirtualSize = Process->VirtualSize;
+ }
+
+ if (ControlArea->u.Flags.FloppyMedia) {
+
+ *ReleasedWsMutex = TRUE;
+ UNLOCK_WS (Process);
+
+ //
+ // The image resides on a floppy disk, in-page all
+ // pages from the floppy and mark them as modified so
+ // they migrate to the paging file rather than reread
+ // them from the floppy disk which may have been removed.
+ //
+
+ ProtoPte = Vad->FirstPrototypePte;
+
+ //
+ // This could get an in-page error from the floppy.
+ //
+
+ while (StartingAddress < EndingAddress) {
+
+ //
+ // If the prototype PTE is valid, transition or
+ // in prototype PTE format, bring the page into
+ // memory and set the modified bit.
+ //
+
+ if ((ProtoPte->u.Hard.Valid == 1) ||
+ (ProtoPte->u.Soft.Prototype == 1) ||
+ (ProtoPte->u.Soft.Transition == 1)) {
+
+ try {
+
+ MiSetPageModified (StartingAddress);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An in page error must have occurred touching the image,
+ // ignore the error and continue to the next page.
+ //
+
+ NOTHING;
+ }
+ }
+ ProtoPte += 1;
+ StartingAddress = (PVOID)((ULONG)StartingAddress + PAGE_SIZE);
+ }
+ }
+
+ if (!*ReleasedWsMutex) {
+ *ReleasedWsMutex = TRUE;
+ UNLOCK_WS (Process);
+ }
+
+ if (NT_SUCCESS(ReturnedStatus)) {
+
+ //
+ // Check to see if this image is for the architecture of the current
+ // machine.
+ //
+
+ if (ControlArea->Segment->ImageInformation.ImageContainsCode &&
+ ((ControlArea->Segment->ImageInformation.Machine <
+ USER_SHARED_DATA->ImageNumberLow) ||
+ (ControlArea->Segment->ImageInformation.Machine >
+ USER_SHARED_DATA->ImageNumberHigh)
+ )
+ ) {
+ return STATUS_IMAGE_MACHINE_TYPE_MISMATCH;
+ }
+
+ StartingAddress = Vad->StartingVa;
+ if ((NtGlobalFlag & FLG_ENABLE_KDEBUG_SYMBOL_LOAD) &&
+ (ControlArea->u.Flags.Image) &&
+ (ReturnedStatus != STATUS_IMAGE_NOT_AT_BASE)) {
+ if (ControlArea->u.Flags.DebugSymbolsLoaded == 0) {
+ if (CacheImageSymbols (StartingAddress)) {
+
+ //
+ // TEMP TEMP TEMP rip out when debugger converted
+ //
+
+ PUNICODE_STRING FileName;
+ ANSI_STRING AnsiName;
+ NTSTATUS Status;
+
+ LOCK_PFN (OldIrql);
+ ControlArea->u.Flags.DebugSymbolsLoaded = 1;
+ UNLOCK_PFN (OldIrql);
+
+ FileName = (PUNICODE_STRING)&ControlArea->FilePointer->FileName;
+ if (FileName->Length != 0 && (NtGlobalFlag & FLG_ENABLE_KDEBUG_SYMBOL_LOAD)) {
+ PLIST_ENTRY Head, Next;
+ PLDR_DATA_TABLE_ENTRY Entry;
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE);
+ Head = &MmLoadedUserImageList;
+ Next = Head->Flink;
+ while (Next != Head) {
+ Entry = CONTAINING_RECORD( Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks
+ );
+ if (Entry->DllBase == StartingAddress) {
+ Entry->LoadCount += 1;
+ break;
+ }
+ Next = Next->Flink;
+ }
+
+ if (Next == Head) {
+ Entry = ExAllocatePoolWithTag( NonPagedPool,
+ sizeof( *Entry ) +
+ FileName->Length +
+ sizeof( UNICODE_NULL ),
+ MMDB
+ );
+ if (Entry != NULL) {
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ RtlZeroMemory( Entry, sizeof( *Entry ) );
+ NtHeaders = RtlImageNtHeader( StartingAddress );
+ if (NtHeaders != NULL) {
+ Entry->SizeOfImage = NtHeaders->OptionalHeader.SizeOfImage;
+ Entry->CheckSum = NtHeaders->OptionalHeader.CheckSum;
+ }
+ Entry->DllBase = StartingAddress;
+ Entry->FullDllName.Buffer = (PWSTR)(Entry+1);
+ Entry->FullDllName.Length = FileName->Length;
+ Entry->FullDllName.MaximumLength = (USHORT)
+ (Entry->FullDllName.Length + sizeof( UNICODE_NULL ));
+ RtlMoveMemory( Entry->FullDllName.Buffer,
+ FileName->Buffer,
+ FileName->Length
+ );
+ Entry->FullDllName.Buffer[ Entry->FullDllName.Length / sizeof( WCHAR )] = UNICODE_NULL;
+ Entry->LoadCount = 1;
+ InsertTailList( &MmLoadedUserImageList,
+ &Entry->InLoadOrderLinks
+ );
+ InitializeListHead( &Entry->InInitializationOrderLinks );
+ InitializeListHead( &Entry->InMemoryOrderLinks );
+ }
+ }
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+ }
+
+ Status = RtlUnicodeStringToAnsiString( &AnsiName,
+ FileName,
+ TRUE );
+
+ if (NT_SUCCESS( Status)) {
+ DbgLoadImageSymbols( &AnsiName,
+ StartingAddress,
+ (ULONG)Process
+ );
+ RtlFreeAnsiString( &AnsiName );
+ }
+ }
+ }
+ }
+ }
+
+ return ReturnedStatus;
+}
+
+NTSTATUS
+MiMapViewOfDataSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PLARGE_INTEGER SectionOffset,
+ IN PULONG CapturedViewSize,
+ IN PSECTION Section,
+ IN SECTION_INHERIT InheritDisposition,
+ IN ULONG ProtectionMask,
+ IN ULONG CommitSize,
+ IN ULONG ZeroBits,
+ IN ULONG AllocationType,
+ IN PBOOLEAN ReleasedWsMutex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified phyiscal section into the
+ specified process's address space.
+
+Arguments:
+
+ see MmMapViewOfSection above...
+
+ ControlArea - Supplies the control area for the section.
+
+ Process - Supplies the process pointer which is receiving the section.
+
+ ProtectionMask - Supplies the initial page protection-mask.
+
+ ReleasedWsMutex - Supplies FALSE. If the working set mutex is
+ not held when returning this must be set to TRUE
+ so the caller will release the mutex.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, working set mutex and address creation mutex held.
+
+--*/
+
+{
+ PMMVAD Vad;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ BOOLEAN Attached = FALSE;
+ KIRQL OldIrql;
+ PSUBSECTION Subsection;
+ ULONG PteOffset;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE TempPte;
+ ULONG Alignment;
+ ULONG QuotaCharge = 0;
+ BOOLEAN ChargedQuota = FALSE;
+ PMMPTE TheFirstPrototypePte;
+ PVOID CapturedStartingVa;
+ ULONG CapturedCopyOnWrite;
+
+ //
+ // Check to see if there is a purge operation ongoing for
+ // this segment.
+ //
+
+ if ((AllocationType & MEM_DOS_LIM) != 0) {
+ if (*CapturedBase == NULL) {
+
+ //
+ // If MEM_DOS_LIM is specified, the address to map the
+ // view MUST be specified as well.
+ //
+
+ *ReleasedWsMutex = TRUE;
+ return STATUS_INVALID_PARAMETER_3;
+ }
+ Alignment = PAGE_SIZE;
+ } else {
+ Alignment = X64K;
+ }
+
+ //
+ // Check to see if a purge operation is in progress and if so, wait
+ // for the purge to complete. In addition, up the count of mapped
+ // views for this control area.
+ //
+
+ MiCheckPurgeAndUpMapCount (ControlArea);
+
+ if (*CapturedViewSize == 0) {
+
+ SectionOffset->LowPart = (ULONG)MI_ALIGN_TO_SIZE (SectionOffset->LowPart,
+ Alignment);
+
+ *CapturedViewSize = (ULONG)(Section->SizeOfSection.QuadPart -
+ SectionOffset->QuadPart);
+ } else {
+ *CapturedViewSize += SectionOffset->LowPart & (Alignment - 1);
+ SectionOffset->LowPart = (ULONG)MI_ALIGN_TO_SIZE (SectionOffset->LowPart,
+ Alignment);
+ }
+
+ if ((LONG)*CapturedViewSize <= 0) {
+
+ //
+ // Section offset or view size past size of section.
+ //
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ *ReleasedWsMutex = TRUE;
+ return STATUS_INVALID_VIEW_SIZE;
+ }
+
+ //
+ // Calulcate the first prototype PTE field in the Vad.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+ SectionOffset->LowPart = (ULONG)MI_ALIGN_TO_SIZE (
+ SectionOffset->LowPart,
+ Alignment);
+ PteOffset = (ULONG)(SectionOffset->QuadPart >> PAGE_SHIFT);
+
+ //
+ // Make sure the PTEs are not in the extended part of the
+ // segment.
+ //
+
+ while (PteOffset >= Subsection->PtesInSubsection) {
+ PteOffset -= Subsection->PtesInSubsection;
+ Subsection = Subsection->NextSubsection;
+ ASSERT (Subsection != NULL);
+ }
+
+ TheFirstPrototypePte = &Subsection->SubsectionBase[PteOffset];
+
+ //
+ // Calulate the quota for the specified pages.
+ //
+
+ if ((ControlArea->FilePointer == NULL) &&
+ (CommitSize != 0) &&
+ (ControlArea->Segment->NumberOfCommittedPages <
+ ControlArea->Segment->TotalNumberOfPtes)) {
+
+
+ ExAcquireFastMutex (&MmSectionCommitMutex);
+
+ PointerPte = TheFirstPrototypePte;
+ LastPte = PointerPte + BYTES_TO_PAGES(CommitSize);
+
+ while (PointerPte < LastPte) {
+ if (PointerPte->u.Long == 0) {
+ QuotaCharge += 1;
+ }
+ PointerPte += 1;
+ }
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+ }
+
+ CapturedStartingVa = Section->Address.StartingVa;
+ CapturedCopyOnWrite = Section->u.Flags.CopyOnWrite;
+ LOCK_WS (Process);
+
+ if ((*CapturedBase == NULL) && (CapturedStartingVa == NULL)) {
+
+ //
+ // The section is not based, find an empty range.
+ // This could raise an exception.
+
+ try {
+
+ //
+ // Find a starting address on a 64k boundary.
+ //
+
+ if ( AllocationType & MEM_TOP_DOWN ) {
+ StartingAddress = MiFindEmptyAddressRangeDown (
+ *CapturedViewSize,
+ (PVOID)((ULONG)MM_HIGHEST_VAD_ADDRESS + 1),
+ Alignment
+ );
+ } else {
+ StartingAddress = MiFindEmptyAddressRange (*CapturedViewSize,
+ Alignment,
+ ZeroBits);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+
+ return GetExceptionCode();
+ }
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ if (ZeroBits > 0) {
+ if (EndingAddress > (PVOID)((ULONG)0xFFFFFFFF >> ZeroBits)) {
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ return STATUS_NO_MEMORY;
+ }
+ }
+
+ } else {
+
+ if (*CapturedBase == NULL) {
+
+ //
+ // The section is based.
+ //
+
+ StartingAddress = (PVOID)((ULONG)CapturedStartingVa +
+ SectionOffset->LowPart);
+ } else {
+
+ StartingAddress = MI_ALIGN_TO_SIZE (*CapturedBase, Alignment);
+
+ }
+
+ //
+ // Check to make sure the specified base address to ending address
+ // is currently unused.
+ //
+
+ EndingAddress = (PVOID)(((ULONG)StartingAddress +
+ *CapturedViewSize - 1L) | (PAGE_SIZE - 1L));
+
+ Vad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+ if (Vad != (PMMVAD)NULL) {
+#if DBG
+ MiDumpConflictingVad (StartingAddress, EndingAddress, Vad);
+#endif
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ return STATUS_CONFLICTING_ADDRESSES;
+ }
+ }
+
+ //
+ // An unoccuppied address range has been found, build the virtual
+ // address descriptor to describe this range.
+ //
+
+ try {
+
+ Vad = (PMMVAD)NULL;
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD),
+ MMVADKEY);
+ if (Vad == NULL) {
+ ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ Vad->StartingVa = StartingAddress;
+ Vad->EndingVa = EndingAddress;
+ Vad->FirstPrototypePte = TheFirstPrototypePte;
+
+ //
+ // The the protection in the PTE template field of the VAD.
+ //
+
+ Vad->ControlArea = ControlArea;
+
+ Vad->u.LongFlags = 0;
+ Vad->u2.LongFlags2 = 0;
+ Vad->u.VadFlags.Inherit = (InheritDisposition == ViewShare);
+ Vad->u.VadFlags.Protection = ProtectionMask;
+ Vad->u.VadFlags.CopyOnWrite = CapturedCopyOnWrite;
+ Vad->Banked = NULL;
+
+ if ((AllocationType & SEC_NO_CHANGE) || (Section->u.Flags.NoChange)) {
+ Vad->u.VadFlags.NoChange = 1;
+ Vad->u2.VadFlags2.SecNoChange = 1;
+ }
+
+ //
+ // If the page protection is write-copy or execute-write-copy
+ // charge for each page in the view as it may become private.
+ //
+
+ if (MI_IS_PTE_PROTECTION_COPY_WRITE(ProtectionMask)) {
+ Vad->u.VadFlags.CommitCharge = (BYTES_TO_PAGES ((ULONG)EndingAddress -
+ (ULONG)StartingAddress));
+ }
+
+ //
+ // If this is a page file backed section, charge the process's page
+ // file quota as if all the pages have been committed. This solves
+ // the problem when other processes commit all the pages and leave
+ // only one process around who may not have been charged the proper
+ // quota. This is solved by charging everyone the maximum quota.
+ //
+//
+// commented out for commitment charging.
+//
+
+#if 0
+ if (ControlArea->FilePointer == NULL) {
+
+ //
+ // This is a page file backed section. Charge for all the pages.
+ //
+
+ Vad->CommitCharge += (BYTES_TO_PAGES ((ULONG)EndingAddress -
+ (ULONG)StartingAddress));
+ }
+#endif
+
+
+ PteOffset +=
+ (((ULONG)Vad->StartingVa - (ULONG)Vad->EndingVa) >> PAGE_SHIFT);
+
+ if (PteOffset < Subsection->PtesInSubsection ) {
+ Vad->LastContiguousPte = &Subsection->SubsectionBase[PteOffset];
+
+ } else {
+ Vad->LastContiguousPte = &Subsection->SubsectionBase[
+ Subsection->PtesInSubsection - 1];
+ }
+
+ if (QuotaCharge != 0) {
+ MiChargeCommitment (QuotaCharge, Process);
+ ChargedQuota = TRUE;
+ }
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+
+ if (Vad != (PMMVAD)NULL) {
+
+ //
+ // The pool allocation suceeded, but the quota charge
+ // in InsertVad failed, deallocate the pool and return
+ // and error.
+ //
+
+ ExFreePool (Vad);
+ if (ChargedQuota) {
+ MiReturnCommitment (QuotaCharge);
+ }
+ return GetExceptionCode();
+ }
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ *ReleasedWsMutex = TRUE;
+ UNLOCK_WS (Process);
+
+#if DBG
+ if (((ULONG)EndingAddress - (ULONG)StartingAddress) >
+ ROUND_TO_PAGES(Section->Segment->SizeOfSegment.LowPart)) {
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ ASSERT(((ULONG)EndingAddress - (ULONG)StartingAddress) <=
+ ROUND_TO_PAGES(Section->Segment->SizeOfSegment.LowPart));
+
+ //
+ // If a commit size was specified, make sure those pages are committed.
+ //
+
+ if (QuotaCharge != 0) {
+
+ ExAcquireFastMutex (&MmSectionCommitMutex);
+
+ PointerPte = Vad->FirstPrototypePte;
+ LastPte = PointerPte + BYTES_TO_PAGES(CommitSize);
+ TempPte = ControlArea->Segment->SegmentPteTemplate;
+
+ while (PointerPte < LastPte) {
+
+ if (PointerPte->u.Long == 0) {
+
+ *PointerPte = TempPte;
+ }
+ PointerPte += 1;
+ }
+
+ ControlArea->Segment->NumberOfCommittedPages += QuotaCharge;
+
+ ASSERT (ControlArea->Segment->NumberOfCommittedPages <=
+ ControlArea->Segment->TotalNumberOfPtes);
+ MmSharedCommit += QuotaCharge;
+
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+ }
+
+ //
+ // Update the current virtual size in the process header.
+ //
+
+ *CapturedViewSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ Process->VirtualSize += *CapturedViewSize;
+
+ if (Process->VirtualSize > Process->PeakVirtualSize) {
+ Process->PeakVirtualSize = Process->VirtualSize;
+ }
+
+ *CapturedBase = StartingAddress;
+
+ return STATUS_SUCCESS;
+}
+
+VOID
+MiCheckPurgeAndUpMapCount (
+ IN PCONTROL_AREA ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This routine synchronizes with any on going purge operations
+ on the same segment (identified via the control area). If
+ another purge operation is occuring, the function blocks until
+ it is completed.
+
+ When this function returns the MappedView and the NumberOfUserReferences
+ count for the control area will be incremented thereby referencing
+ the control area.
+
+Arguments:
+
+ ControlArea - Supplies the control area for the segment to be purged.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PEVENT_COUNTER PurgedEvent = NULL;
+ PEVENT_COUNTER WaitEvent;
+ ULONG OldRef = 1;
+
+ LOCK_PFN (OldIrql);
+
+ while (ControlArea->u.Flags.BeingPurged != 0) {
+
+ //
+ // A purge operation is in progress.
+ //
+
+ if (PurgedEvent == NULL) {
+
+ //
+ // Release the locks and allocate pool for the event.
+ //
+
+ PurgedEvent = MiGetEventCounter ();
+ continue;
+ }
+
+ if (ControlArea->WaitingForDeletion == NULL) {
+ ControlArea->WaitingForDeletion = PurgedEvent;
+ WaitEvent = PurgedEvent;
+ PurgedEvent = NULL;
+ } else {
+ WaitEvent = ControlArea->WaitingForDeletion;
+ WaitEvent->RefCount += 1;
+ }
+
+ //
+ // Release the pfn lock and wait for the event.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(&WaitEvent->Event,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+ LOCK_PFN (OldIrql);
+ MiFreeEventCounter (WaitEvent, FALSE);
+ }
+
+ //
+ // Indicate another file is mapped for the segment.
+ //
+
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfUserReferences += 1;
+ ControlArea->u.Flags.HadUserReference = 1;
+ ASSERT (ControlArea->NumberOfSectionReferences != 0);
+
+ if (PurgedEvent != NULL) {
+ MiFreeEventCounter (PurgedEvent, TRUE);
+ }
+ UNLOCK_PFN (OldIrql);
+
+ return;
+}
+
+typedef struct _NTSYM {
+ struct _NTSYM *Next;
+ PVOID SymbolTable;
+ ULONG NumberOfSymbols;
+ PVOID StringTable;
+ USHORT Flags;
+ USHORT EntrySize;
+ ULONG MinimumVa;
+ ULONG MaximumVa;
+ PCHAR MapName;
+ ULONG MapNameLen;
+} NTSYM, *PNTSYM;
+
+ULONG
+CacheImageSymbols(
+ IN PVOID ImageBase
+ )
+{
+ PIMAGE_DEBUG_DIRECTORY DebugDirectory;
+ ULONG DebugSize;
+
+ PAGED_CODE();
+
+ try {
+ DebugDirectory = (PIMAGE_DEBUG_DIRECTORY)
+ RtlImageDirectoryEntryToData( ImageBase,
+ TRUE,
+ IMAGE_DIRECTORY_ENTRY_DEBUG,
+ &DebugSize
+ );
+ if (!DebugDirectory) {
+ return FALSE;
+ }
+
+ //
+ // If using remote KD, ImageBase is what it wants to see.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+VOID
+MiSetPageModified (
+ IN PVOID Address
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets the modified bit in the PFN database for the
+ pages that correspond to the specified address range.
+
+ Note that the dirty bit in the PTE is cleared by this operation.
+
+Arguments:
+
+ Address - Supplies the address of the start of the range. This
+ range must reside within the system cache.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. APC_LEVEL and below for pageable addresses,
+ DISPATCH_LEVEL and below for non-pageable addresses.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ MMPTE PteContents;
+ KIRQL OldIrql;
+
+ //
+ // Loop on the copy on write case until the page is only
+ // writable.
+ //
+
+ PointerPte = MiGetPteAddress (Address);
+
+ *(volatile CCHAR *)Address;
+
+ LOCK_PFN (OldIrql);
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+
+ if (PteContents.u.Hard.Valid == 0) {
+
+ //
+ // Page is no longer valid.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ *(volatile CCHAR *)Address;
+ LOCK_PFN (OldIrql);
+ PteContents = *(volatile MMPTE *)PointerPte;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.Modified = 1;
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+#ifdef NT_UP
+ if (MI_IS_PTE_DIRTY (PteContents)) {
+#endif //NT_UP
+ MI_SET_PTE_CLEAN (PteContents);
+
+ //
+ // Clear the dirty bit in the PTE so new writes can be tracked.
+ //
+
+ (VOID)KeFlushSingleTb (Address,
+ FALSE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ PteContents.u.Flush);
+#ifdef NT_UP
+ }
+#endif //NT_UP
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+
+
+typedef struct _MMVIEW {
+ ULONG Entry;
+ PCONTROL_AREA ControlArea;
+} MMVIEW, *PMMVIEW;
+
+
+PMMVIEW MmSystemSpaceViewTable;
+ULONG MmSystemSpaceHashSize;
+ULONG MmSystemSpaceHashEntries;
+ULONG MmSystemSpaceHashKey;
+PRTL_BITMAP MmSystemSpaceBitMap;
+PCHAR MmSystemSpaceViewStart;
+
+VOID
+MiRemoveMappedPtes (
+ IN PVOID BaseAddress,
+ IN ULONG NumberOfPtes,
+ IN PCONTROL_AREA ControlArea,
+ BOOLEAN SystemCache
+ );
+
+FAST_MUTEX MmSystemSpaceViewLock;
+
+#define MMLOCK_SYSTEM_SPACE() \
+ ExAcquireFastMutex( &MmSystemSpaceViewLock)
+
+#define MMUNLOCK_SYSTEM_SPACE() \
+ ExReleaseFastMutex(&MmSystemSpaceViewLock)
+
+
+
+NTSTATUS
+MmMapViewInSystemSpace (
+ IN PVOID Section,
+ OUT PVOID *MappedBase,
+ IN OUT PULONG ViewSize
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified section into the system's address space.
+
+Arguments:
+
+ Section - Supplies a pointer to the section to map.
+
+ *MappedBase - Returns the address where the section was mapped.
+
+ ViewSize - Supplies the size of the view to map. If this
+ is specified as zero, the whole section is mapped.
+ Returns the actual size mapped.
+
+ Protect - Supplies the protection for the view. Must be
+ either PAGE_READWRITE or PAGE_READONLY.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, IRQL of dispatch level.
+
+--*/
+
+{
+ PVOID Base;
+ KIRQL OldIrql;
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ PMMPTE LastPte;
+ MMPTE TempPte;
+ ULONG StartBit;
+ ULONG SizeIn64k;
+ PMMPTE BasePte;
+ ULONG NumberOfPtes;
+ PMMPTE FirstPde;
+ PMMPTE LastPde;
+ PMMPTE FirstSystemPde;
+ PMMPTE LastSystemPde;
+ ULONG PageFrameIndex;
+
+ PAGED_CODE();
+
+ //
+ // Check to see if a purge operation is in progress and if so, wait
+ // for the purge to complete. In addition, up the count of mapped
+ // views for this control area.
+ //
+
+ ControlArea = ((PSECTION)Section)->Segment->ControlArea;
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ MiCheckPurgeAndUpMapCount (ControlArea);
+
+ if (*ViewSize == 0) {
+
+ *ViewSize = ((PSECTION)Section)->SizeOfSection.LowPart;
+
+ } else if (*ViewSize > ((PSECTION)Section)->SizeOfSection.LowPart) {
+
+ //
+ // Section offset or view size past size of section.
+ //
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return STATUS_INVALID_VIEW_SIZE;
+ }
+
+ //
+ // Calculate the first prototype PTE field in the Vad.
+ //
+
+ SizeIn64k = (*ViewSize / X64K) + ((*ViewSize & (X64K - 1)) != 0);
+
+ Base = MiInsertInSystemSpace (SizeIn64k, ControlArea);
+
+ if (Base == NULL) {
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return STATUS_NO_MEMORY;
+ }
+
+ BasePte = MiGetPteAddress (Base);
+ NumberOfPtes = BYTES_TO_PAGES (*ViewSize);
+
+ FirstPde = MiGetPdeAddress (Base);
+ LastPde = MiGetPdeAddress ((PVOID)(((PCHAR)Base) +
+ (SizeIn64k * X64K) - 1));
+ FirstSystemPde = &MmSystemPagePtes[((ULONG)FirstPde &
+ ((PDE_PER_PAGE * sizeof(MMPTE)) - 1)) / sizeof(MMPTE) ];
+ LastSystemPde = &MmSystemPagePtes[((ULONG)LastPde &
+ ((PDE_PER_PAGE * sizeof(MMPTE)) - 1)) / sizeof(MMPTE) ];
+
+ do {
+ if (FirstSystemPde->u.Hard.Valid == 0) {
+
+ //
+ // No page table page exists, get a page and map it in.
+ //
+
+ TempPte = ValidKernelPde;
+
+ LOCK_PFN (OldIrql);
+
+ if (((volatile MMPTE *)FirstSystemPde)->u.Hard.Valid == 0) {
+
+ if (MiEnsureAvailablePageOrWait (NULL, FirstPde)) {
+
+ //
+ // PFN_LOCK was dropped, redo this loop as another process
+ // could have made this PDE valid.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ continue;
+ }
+
+ MiChargeCommitmentCantExpand (1, TRUE);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (FirstSystemPde));
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *FirstSystemPde = TempPte;
+ *FirstPde = TempPte;
+
+ MiInitializePfnForOtherProcess (PageFrameIndex,
+ FirstPde,
+ MmSystemPageDirectory);
+
+ RtlFillMemoryUlong (MiGetVirtualAddressMappedByPte (FirstPde),
+ PAGE_SIZE,
+ MM_ZERO_KERNEL_PTE);
+ }
+ UNLOCK_PFN (OldIrql);
+ }
+
+ FirstSystemPde += 1;
+ FirstPde += 1;
+ } while (FirstPde <= LastPde );
+
+ //
+ // Setup PTEs to point to prototype PTEs.
+ //
+
+ if (((PSECTION)Section)->u.Flags.Image) {
+ LOCK_PFN (OldIrql)
+ ((PSECTION)Section)->Segment->ControlArea->u.Flags.ImageMappedInSystemSpace = 1;
+ UNLOCK_PFN (OldIrql);
+ }
+ MiAddMappedPtes (BasePte,
+ NumberOfPtes,
+ ControlArea,
+ 0,
+ FALSE);
+
+ *MappedBase = Base;
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MmUnmapViewInSystemSpace (
+ IN PVOID MappedBase
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unmaps the specified section from the system's address space.
+
+Arguments:
+
+ MappedBase - Supplies the address of the view to unmap.
+
+Return Value:
+
+ Status of the map view operation.
+
+Environment:
+
+ Kernel Mode, IRQL of dispatch level.
+
+--*/
+
+{
+ ULONG StartBit;
+ ULONG Size;
+ PCONTROL_AREA ControlArea;
+
+ PAGED_CODE();
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ StartBit = ((ULONG)MappedBase - (ULONG)MmSystemSpaceViewStart) >> 16;
+
+ MMLOCK_SYSTEM_SPACE ();
+
+ Size = MiRemoveFromSystemSpace (MappedBase, &ControlArea);
+
+ RtlClearBits (MmSystemSpaceBitMap,
+ StartBit,
+ Size);
+
+ //
+ // Zero PTEs.
+ //
+
+ Size = Size * (X64K >> PAGE_SHIFT);
+
+ MiRemoveMappedPtes (MappedBase, Size, ControlArea, FALSE);
+
+ MMUNLOCK_SYSTEM_SPACE ();
+
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return STATUS_SUCCESS;
+}
+
+
+PVOID
+MiInsertInSystemSpace (
+ IN ULONG SizeIn64k,
+ IN PCONTROL_AREA ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates a view in system space for the specified control
+ area (file mapping).
+
+Arguments:
+
+ SizeIn64k - Supplies the size of the view to be created.
+
+ ControlArea - Supplies a pointer to the control area for this view.
+
+Return Value:
+
+ Base address where the view was mapped, NULL if the view could not be
+ mapped.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+
+ PVOID Base;
+ ULONG Entry;
+ ULONG Hash;
+ ULONG i;
+ ULONG AllocSize;
+ PMMVIEW OldTable;
+ ULONG StartBit;
+
+ PAGED_CODE();
+
+ //
+ // CODE IS ALREADY LOCKED BY CALLER.
+ //
+
+ MMLOCK_SYSTEM_SPACE ();
+
+ StartBit = RtlFindClearBitsAndSet (MmSystemSpaceBitMap,
+ SizeIn64k,
+ 0);
+
+ if (StartBit == 0xFFFFFFFF) {
+ MMUNLOCK_SYSTEM_SPACE ();
+ return NULL;
+ }
+
+ Base = (PVOID)((PCHAR)MmSystemSpaceViewStart + (StartBit * X64K));
+
+ Entry = ((ULONG)Base & 0xFFFF0000) + SizeIn64k;
+
+ Hash = (Entry >> 16) % MmSystemSpaceHashKey;
+
+ while (MmSystemSpaceViewTable[Hash].Entry != 0) {
+ Hash += 1;
+ if (Hash >= MmSystemSpaceHashSize) {
+ Hash = 0;
+ }
+ }
+
+ MmSystemSpaceHashEntries += 1;
+
+ MmSystemSpaceViewTable[Hash].Entry = Entry;
+ MmSystemSpaceViewTable[Hash].ControlArea = ControlArea;
+
+ if (MmSystemSpaceHashSize < (MmSystemSpaceHashEntries + 8)) {
+
+ //
+ // Less than 8 free slots, reallocate and rehash.
+ //
+
+ MmSystemSpaceHashSize += MmSystemSpaceHashSize;
+
+ AllocSize = sizeof(MMVIEW) * MmSystemSpaceHashSize;
+ ASSERT (AllocSize < PAGE_SIZE);
+
+ MmSystemSpaceHashKey = MmSystemSpaceHashSize - 1;
+ OldTable = MmSystemSpaceViewTable;
+
+ MmSystemSpaceViewTable = ExAllocatePoolWithTag (PagedPool,
+ AllocSize,
+ ' mM');
+
+ if (MmSystemSpaceViewTable == NULL) {
+ MmSystemSpaceViewTable = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ AllocSize,
+ ' mM');
+ }
+
+ RtlZeroMemory (MmSystemSpaceViewTable, AllocSize);
+
+ for (i = 0; i < (MmSystemSpaceHashSize / 2); i++) {
+ if (OldTable[i].Entry != 0) {
+ Hash = (OldTable[i].Entry >> 16) % MmSystemSpaceHashKey;
+
+ while (MmSystemSpaceViewTable[Hash].Entry != 0) {
+ Hash += 1;
+ if (Hash >= MmSystemSpaceHashSize) {
+ Hash = 0;
+ }
+ }
+ MmSystemSpaceViewTable[Hash] = OldTable[i];
+ }
+ }
+ ExFreePool (OldTable);
+ }
+
+ MMUNLOCK_SYSTEM_SPACE ();
+ return Base;
+}
+
+
+ULONG
+MiRemoveFromSystemSpace (
+ IN PVOID Base,
+ OUT PCONTROL_AREA *ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This routine looks up the specified view in the system space hash
+ table and unmaps the view from system space and the table.
+
+Arguments:
+
+ Base - Supplies the base address for the view. If this address is
+ NOT found in the hash table, the system bugchecks.
+
+ ControlArea - Returns the control area corresponding the the base
+ address.
+
+Return Value:
+
+ Size of the view divided by 64k.
+
+Environment:
+
+ Kernel Mode, system view hash table locked.
+
+--*/
+
+{
+ ULONG Base16;
+ ULONG Hash;
+ ULONG Size;
+ ULONG count = 0;
+
+ PAGED_CODE();
+
+ //
+ // CODE IS ALREADY LOCKED BY CALLER.
+ //
+
+ Base16 = (ULONG)Base >> 16;
+ Hash = Base16 % MmSystemSpaceHashKey;
+
+ while ((MmSystemSpaceViewTable[Hash].Entry >> 16) != Base16) {
+ Hash += 1;
+ if (Hash >= MmSystemSpaceHashSize) {
+ Hash = 0;
+ count += 1;
+ if (count == 2) {
+ KeBugCheckEx (MEMORY_MANAGEMENT, 787, 0, 0, 0);
+ }
+ }
+ }
+
+ MmSystemSpaceHashEntries -= 1;
+ Size = MmSystemSpaceViewTable[Hash].Entry & 0xFFFF;
+ MmSystemSpaceViewTable[Hash].Entry = 0;
+ *ControlArea = MmSystemSpaceViewTable[Hash].ControlArea;
+ return Size;
+}
+
+
+VOID
+MiInitializeSystemSpaceMap (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine initializes the tables for mapping views into system space.
+ Views are kept in multiple of 64k bytes in a growable hashed table.
+
+Arguments:
+
+ none.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel Mode, initialization.
+
+--*/
+
+{
+ ULONG AllocSize;
+ ULONG Size;
+
+ Size = MM_SYSTEM_VIEW_SIZE;
+
+ ExInitializeFastMutex(&MmSystemSpaceViewLock);
+
+ MmSystemSpaceViewStart = (PCHAR)MM_SYSTEM_VIEW_START;
+ MiCreateBitMap (&MmSystemSpaceBitMap, Size / X64K, NonPagedPool);
+ RtlClearAllBits (MmSystemSpaceBitMap);
+
+ //
+ // Build the view table.
+ //
+
+ MmSystemSpaceHashSize = 31;
+ MmSystemSpaceHashKey = MmSystemSpaceHashSize - 1;
+
+ AllocSize = sizeof(MMVIEW) * MmSystemSpaceHashSize;
+ ASSERT (AllocSize < PAGE_SIZE);
+
+ MmSystemSpaceViewTable = ExAllocatePoolWithTag (PagedPool,
+ AllocSize,
+ ' mM');
+
+ ASSERT (MmSystemSpaceViewTable != NULL);
+ RtlZeroMemory (MmSystemSpaceViewTable, AllocSize);
+
+ return;
+}
+
+
+HANDLE
+MmSecureVirtualMemory (
+ IN PVOID Address,
+ IN ULONG Size,
+ IN ULONG ProbeMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine probes the requested address range and protects
+ the specified address range from having it's protection made
+ more restrited and being deleted.
+
+ MmUnsecureVirtualMemory is used to allow the range to return
+ to a normal state.
+
+Arguments:
+
+ Address - Supplies the base address to probe and secure.
+
+ Size - Supplies the size of the range to secure.
+
+ ProbeMode - Supples one of PAGE_READONLY or PAGE_READWRITE.
+
+Return Value:
+
+ Returns a handle to be used to unsecure the range.
+ If the range could not be locked because of protection
+ problems ornoncommitted memory, the value (HANDLE)0
+ is returned.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+ ULONG EndAddress;
+ PVOID StartAddress;
+ CHAR Temp;
+ ULONG Probe;
+ HANDLE Handle = (HANDLE)0;
+ PMMVAD Vad;
+ PMMVAD NewVad;
+ PMMSECURE_ENTRY Secure;
+ PEPROCESS Process;
+
+ PAGED_CODE();
+
+ if (Address > MM_HIGHEST_USER_ADDRESS) {
+ return (HANDLE)0;
+ }
+
+ Probe = (ProbeMode == PAGE_READONLY);
+
+ Process = PsGetCurrentProcess();
+ StartAddress = Address;
+
+ LOCK_ADDRESS_SPACE (Process);
+ try {
+
+ if (ProbeMode == PAGE_READONLY) {
+
+ EndAddress = (ULONG)Address + Size - 1;
+ EndAddress = (EndAddress & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
+
+ do {
+ Temp = *(volatile CHAR *)Address;
+ Address = (PVOID)(((ULONG)Address & ~(PAGE_SIZE - 1)) + PAGE_SIZE);
+ } while ((ULONG)Address != EndAddress);
+ } else {
+ ProbeForWrite (Address, Size, 1);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto Return1;
+ }
+
+ //
+ // Locate VAD and add in secure descriptor.
+ //
+
+ EndAddress = (ULONG)StartAddress + Size - 1;
+ Vad = MiLocateAddress (StartAddress);
+
+ if (Vad == NULL) {
+ goto Return1;
+ }
+
+ if ((StartAddress < Vad->StartingVa) ||
+ ((PVOID)EndAddress > Vad->EndingVa)) {
+
+ //
+ // Not withing the section virtual address descriptor,
+ // return an error.
+ //
+
+ goto Return1;
+ }
+
+ //
+ // If this is a short VAD, it needs to be reallocated as a large
+ // VAD.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory) && (!Vad->u.VadFlags.NoChange)) {
+
+ NewVad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD),
+ MMVADKEY);
+ if (NewVad == NULL) {
+ goto Return1;
+ }
+
+ RtlCopyMemory (NewVad, Vad, sizeof(MMVAD_SHORT));
+ NewVad->u.VadFlags.NoChange = 1;
+ NewVad->u2.LongFlags2 = 0;
+ NewVad->u2.VadFlags2.OneSecured = 1;
+ NewVad->u2.VadFlags2.StoredInVad = 1;
+ NewVad->u2.VadFlags2.ReadOnly = Probe;
+ NewVad->u3.Secured.StartVa = StartAddress;
+ NewVad->u3.Secured.EndVa = (PVOID)EndAddress;
+ NewVad->Banked = NULL;
+
+ //
+ // Replace the current VAD with this expanded VAD.
+ //
+
+ LOCK_WS (Process);
+ if (Vad->Parent) {
+ if (Vad->Parent->RightChild == Vad) {
+ Vad->Parent->RightChild = NewVad;
+ } else {
+ ASSERT (Vad->Parent->LeftChild == Vad);
+ Vad->Parent->LeftChild = NewVad;
+ }
+ } else {
+ Process->VadRoot = NewVad;
+ }
+ if (Vad->LeftChild) {
+ Vad->LeftChild->Parent = NewVad;
+ }
+ if (Vad->RightChild) {
+ Vad->RightChild->Parent = NewVad;
+ }
+ if (Process->VadHint == Vad) {
+ Process->VadHint = NewVad;
+ }
+ if (Process->VadFreeHint == Vad) {
+ Process->VadFreeHint = NewVad;
+ }
+ UNLOCK_WS (Process);
+ ExFreePool (Vad);
+ Handle = (HANDLE)&NewVad->u2.LongFlags2;
+ goto Return1;
+ }
+
+ ASSERT (Vad->u2.VadFlags2.Reserved == 0);
+
+ //
+ // This is already a large VAD, add the secure entry.
+ //
+
+ if (Vad->u2.VadFlags2.OneSecured) {
+
+ //
+ // This VAD already is secured. Move the info out of the
+ // block into pool.
+ //
+
+ Secure = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof (MMSECURE_ENTRY),
+ 'eSmM');
+ if (Secure == NULL) {
+ goto Return1;
+ }
+
+ ASSERT (Vad->u.VadFlags.NoChange == 1);
+ Vad->u2.VadFlags2.OneSecured = 0;
+ Vad->u2.VadFlags2.MultipleSecured = 1;
+ Secure->u2.LongFlags2 = Vad->u.LongFlags;
+ Secure->u2.VadFlags2.StoredInVad = 0;
+ Secure->StartVa = StartAddress;
+ Secure->EndVa = (PVOID)EndAddress;
+
+ InitializeListHead (&Vad->u3.List);
+ InsertTailList (&Vad->u3.List,
+ &Secure->List);
+ }
+
+ if (Vad->u2.VadFlags2.MultipleSecured) {
+
+ //
+ // This VAD already has a secured element in it's list, allocate and
+ // add in the new secured element.
+ //
+
+ Secure = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof (MMSECURE_ENTRY),
+ 'eSmM');
+ if (Secure == NULL) {
+ goto Return1;
+ }
+
+ Secure->u2.LongFlags2 = 0;
+ Secure->u2.VadFlags2.ReadOnly = Probe;
+ Secure->StartVa = StartAddress;
+ Secure->EndVa = (PVOID)EndAddress;
+
+ InsertTailList (&Vad->u3.List,
+ &Secure->List);
+ Handle = (HANDLE)Secure;
+
+ } else {
+
+ //
+ // This list does not have a secure element. Put it in the VAD.
+ //
+
+ Vad->u.VadFlags.NoChange = 1;
+ Vad->u2.VadFlags2.OneSecured = 1;
+ Vad->u2.VadFlags2.StoredInVad = 1;
+ Vad->u2.VadFlags2.ReadOnly = Probe;
+ Vad->u3.Secured.StartVa = StartAddress;
+ Vad->u3.Secured.EndVa = (PVOID)EndAddress;
+ Handle = (HANDLE)&Vad->u2.LongFlags2;
+ }
+
+Return1:
+ UNLOCK_ADDRESS_SPACE (Process);
+ return Handle;
+}
+
+
+VOID
+MmUnsecureVirtualMemory (
+ IN HANDLE SecureHandle
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unsecures memory previous secured via a call to
+ MmSecureVirtualMemory.
+
+Arguments:
+
+ SecureHandle - Supplies the handle returned in MmSecureVirtualMemory.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+ PMMSECURE_ENTRY Secure;
+ PEPROCESS Process;
+ PMMVAD Vad;
+
+ PAGED_CODE();
+
+ Secure = (PMMSECURE_ENTRY)SecureHandle;
+ Process = PsGetCurrentProcess ();
+ LOCK_ADDRESS_SPACE (Process);
+
+ if (Secure->u2.VadFlags2.StoredInVad) {
+ Vad = CONTAINING_RECORD( Secure,
+ MMVAD,
+ u2.LongFlags2);
+ } else {
+ Vad = MiLocateAddress (Secure->StartVa);
+ }
+
+ ASSERT (Vad);
+ ASSERT (Vad->u.VadFlags.NoChange == 1);
+
+ if (Vad->u2.VadFlags2.OneSecured) {
+ ASSERT (Secure == (PMMSECURE_ENTRY)&Vad->u2.LongFlags2);
+ Vad->u2.VadFlags2.OneSecured = 0;
+ ASSERT (Vad->u2.VadFlags2.MultipleSecured == 0);
+ if (Vad->u2.VadFlags2.SecNoChange == 0) {
+
+ //
+ // No more secure entries in this list, remove the state.
+ //
+
+ Vad->u.VadFlags.NoChange = 0;
+ }
+ } else {
+ ASSERT (Vad->u2.VadFlags2.MultipleSecured == 1);
+
+ if (Secure == (PMMSECURE_ENTRY)&Vad->u2.LongFlags2) {
+
+ //
+ // This was a single block that got converted into a list.
+ // Reset the entry.
+ //
+
+ Secure = CONTAINING_RECORD (Vad->u3.List.Flink,
+ MMSECURE_ENTRY,
+ List);
+ }
+ RemoveEntryList (&Secure->List);
+ ExFreePool (Secure);
+ if (IsListEmpty (&Vad->u3.List)) {
+
+ //
+ // No more secure entries, reset the state.
+ //
+
+ Vad->u2.VadFlags2.MultipleSecured = 0;
+
+ if ((Vad->u2.VadFlags2.SecNoChange == 0) &&
+ (Vad->u.VadFlags.PrivateMemory == 0)) {
+
+ //
+ // No more secure entries in this list, remove the state
+ // if and only if this VAD is not private. If this VAD
+ // is private, removing the state NoChange flag indicates
+ // that this is a short VAD which it no longer is.
+ //
+
+ Vad->u.VadFlags.NoChange = 0;
+ }
+ }
+ }
+
+ UNLOCK_ADDRESS_SPACE (Process);
+ return;
+}
+
diff --git a/private/ntos/mm/mi.h b/private/ntos/mm/mi.h
new file mode 100644
index 000000000..73c4aef26
--- /dev/null
+++ b/private/ntos/mm/mi.h
@@ -0,0 +1,3301 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mi.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the memory management system.
+
+Author:
+
+ Lou Perazzoli (loup) 20-Mar-1989
+
+Revision History:
+
+--*/
+
+#ifndef _MI_
+#define _MI_
+
+#include "ntos.h"
+#include "ntimage.h"
+#include "ki.h"
+#include "fsrtl.h"
+#include "zwapi.h"
+#include "pool.h"
+#include "ntiodump.h"
+#include "stdio.h"
+#include "string.h"
+
+#if defined(_X86_)
+#include "..\mm\i386\mi386.h"
+
+#elif defined(_MIPS_)
+#include "..\mm\mips\mir4000.h"
+
+#elif defined(_ALPHA_)
+#include "..\mm\alpha\mialpha.h"
+
+#elif defined(_PPC_)
+#include "..\mm\ppc\mippc.h"
+
+#else
+#error "mm: a target architecture must be defined."
+#endif
+
+#define MM_EMPTY_LIST ((ULONG)0xFFFFFFFF)
+
+#define MM_EMPTY_PTE_LIST ((ULONG)0xFFFFF)
+
+// #define MM_DELETED_PFN ((PMMPTE)0xFFFFFFFF)
+
+#define MM_FREE_WSLE_SHIFT 4
+
+#define WSLE_NULL_INDEX ((ULONG)0xFFFFFFF)
+
+#define MM_FREE_POOL_SIGNATURE (0x50554F4C)
+
+#define MM_MINIMUM_PAGED_POOL_NTAS ((ULONG)(48*1024*1024))
+
+#define MM_ALLOCATION_FILLS_VAD ((PMMPTE)0xFFFFFFFC)
+
+#define MM_WORKING_SET_LIST_SEARCH 17
+
+#define MM_FLUID_WORKING_SET 8
+
+#define MM_FLUID_PHYSICAL_PAGES 32 //see MmResidentPages below.
+
+#define MM_USABLE_PAGES_FREE 32
+
+#define MM_WSLE_MAX_HASH_SIZE \
+ (((MM_WORKING_SET_END - (ULONG)(PAGE_SIZE + (ULONG)WORKING_SET_LIST \
+ + sizeof(MMWSL) + \
+ ((ULONG)MM_MAXIMUM_WORKING_SET * sizeof(MMWSLE)))) & ~(PAGE_SIZE - 1)) / \
+ sizeof(MMWSLE_HASH))
+
+#define X64K (ULONG)65536
+
+#define SEC_PHYSICAL_MEMORY (ULONG)0x80000000
+
+#define MM_HIGHEST_VAD_ADDRESS ((PVOID)((ULONG)MM_HIGHEST_USER_ADDRESS - (64*1024)))
+
+#define MM_NO_WS_EXPANSION ((PLIST_ENTRY)0)
+#define MM_WS_EXPANSION_IN_PROGRESS ((PLIST_ENTRY)35)
+#define MM_WS_SWAPPED_OUT ((PLIST_ENTRY)37)
+#define MM_IO_IN_PROGRESS ((PLIST_ENTRY)97) // MUST HAVE THE HIGHEST VALUE
+
+#define MM_PAGES_REQUIRED_FOR_MAPPED_IO 7
+
+#define MMSECTOR_SHIFT 9 //MUST BE LESS THAN OR EQUAL TO PAGE_SHIFT
+
+#define MMSECTOR_MASK 0x1ff
+
+#define MM_LOCK_BY_REFCOUNT 0
+
+#define MM_LOCK_BY_NONPAGE 1
+
+#define MM_FORCE_TRIM 6
+
+#define MM_GROW_WSLE_HASH 20
+
+#define MM_MAXIMUM_WRITE_CLUSTER (MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE)
+
+//
+// Number of PTEs to flush singularly before flushing the entire TB.
+//
+
+#define MM_MAXIMUM_FLUSH_COUNT (FLUSH_MULTIPLE_MAXIMUM-1)
+
+//
+// Page protections
+//
+
+#define MM_ZERO_ACCESS 0 // this value is not used.
+#define MM_READONLY 1
+#define MM_EXECUTE 2
+#define MM_EXECUTE_READ 3
+#define MM_READWRITE 4 // bit 2 is set if this is writeable.
+#define MM_WRITECOPY 5
+#define MM_EXECUTE_READWRITE 6
+#define MM_EXECUTE_WRITECOPY 7
+
+#define MM_NOCACHE 0x8
+#define MM_GUARD_PAGE 0x10
+#define MM_DECOMMIT 0x10 //NO_ACCESS, Guard page
+#define MM_NOACCESS 0x18 //no_access, guard_page, nocache.
+#define MM_UNKNOWN_PROTECTION 0x100 //bigger than 5 bits!
+#define MM_LARGE_PAGES 0x111
+
+#define MM_PROTECTION_WRITE_MASK 4
+#define MM_PROTECTION_COPY_MASK 1
+#define MM_PROTECTION_OPERATION_MASK 7 // mask off guard page and nocache.
+#define MM_PROTECTION_EXECUTE_MASK 2
+
+#define MM_SECURE_DELETE_CHECK 0x55
+
+//
+// Debug flags
+//
+
+#define MM_DBG_WRITEFAULT 0x1
+#define MM_DBG_PTE_UPDATE 0x2
+#define MM_DBG_DUMP_WSL 0x4
+#define MM_DBG_PAGEFAULT 0x8
+#define MM_DBG_WS_EXPANSION 0x10
+#define MM_DBG_MOD_WRITE 0x20
+#define MM_DBG_CHECK_PTE 0x40
+#define MM_DBG_VAD_CONFLICT 0x80
+#define MM_DBG_SECTIONS 0x100
+#define MM_DBG_SYS_PTES 0x400
+#define MM_DBG_CLEAN_PROCESS 0x800
+#define MM_DBG_COLLIDED_PAGE 0x1000
+#define MM_DBG_DUMP_BOOT_PTES 0x2000
+#define MM_DBG_FORK 0x4000
+#define MM_DBG_DIR_BASE 0x8000
+#define MM_DBG_FLUSH_SECTION 0x10000
+#define MM_DBG_PRINTS_MODWRITES 0x20000
+#define MM_DBG_PAGE_IN_LIST 0x40000
+#define MM_DBG_CHECK_PFN_LOCK 0x80000
+#define MM_DBG_PRIVATE_PAGES 0x100000
+#define MM_DBG_WALK_VAD_TREE 0x200000
+#define MM_DBG_SWAP_PROCESS 0x400000
+#define MM_DBG_LOCK_CODE 0x800000
+#define MM_DBG_STOP_ON_ACCVIO 0x1000000
+#define MM_DBG_PAGE_REF_COUNT 0x2000000
+#define MM_DBG_SHOW_NT_CALLS 0x10000000
+#define MM_DBG_SHOW_FAULTS 0x40000000
+
+//
+// if the PTE.protection & MM_COPY_ON_WRITE_MASK == MM_COPY_ON_WRITE_MASK
+// then the pte is copy on write.
+//
+
+#define MM_COPY_ON_WRITE_MASK 5
+
+extern ULONG MmProtectToValue[32];
+extern ULONG MmProtectToPteMask[32];
+extern ULONG MmMakeProtectNotWriteCopy[32];
+extern ACCESS_MASK MmMakeSectionAccess[8];
+extern ACCESS_MASK MmMakeFileAccess[8];
+
+
+//
+// Time constants
+//
+
+extern LARGE_INTEGER MmSevenMinutes;
+extern LARGE_INTEGER MmWorkingSetProtectionTime;
+extern LARGE_INTEGER MmOneSecond;
+extern LARGE_INTEGER MmTwentySeconds;
+extern LARGE_INTEGER MmShortTime;
+extern LARGE_INTEGER MmHalfSecond;
+extern LARGE_INTEGER Mm30Milliseconds;
+extern LARGE_INTEGER MmCriticalSectionTimeout;
+
+//
+// A month worth
+//
+
+extern ULONG MmCritsectTimeoutSeconds;
+
+
+//++
+//
+// ULONG
+// MI_CONVERT_FROM_PTE_PROTECTION (
+// IN ULONG PROTECTION_MASK
+// )
+//
+// Routine Description:
+//
+// This routine converts a PTE protection into a Protect value.
+//
+// Arguments:
+//
+//
+// Return Value:
+//
+// Returns the
+//
+//--
+
+#define MI_CONVERT_FROM_PTE_PROTECTION(PROTECTION_MASK) \
+ (MmProtectToValue[PROTECTION_MASK])
+
+#define MI_MASK_TO_PTE(PMASK) MmProtectToPteMask[PROTECTION_MASK]
+
+
+#define MI_IS_PTE_PROTECTION_COPY_WRITE(PROTECTION_MASK) \
+ (((PROTECTION_MASK) & MM_COPY_ON_WRITE_MASK) == MM_COPY_ON_WRITE_MASK)
+
+//++
+//
+// ULONG
+// MI_ROUND_TO_64K (
+// IN ULONG LENGTH
+// )
+//
+// Routine Description:
+//
+//
+// The ROUND_TO_64k macro takes a LENGTH in bytes and rounds it up to a multiple
+// of 64K.
+//
+// Arguments:
+//
+// LENGTH - LENGTH in bytes to round up to 64k.
+//
+// Return Value:
+//
+// Returns the LENGTH rounded up to a multiple of 64k.
+//
+//--
+
+#define MI_ROUND_TO_64K(LENGTH) (((ULONG)(LENGTH) + X64K - 1) & ~(X64K - 1))
+
+//++
+//
+// ULONG
+// MI_ROUND_TO_SIZE (
+// IN ULONG LENGTH,
+// IN ULONG ALIGNMENT
+// )
+//
+// Routine Description:
+//
+//
+// The ROUND_TO_SIZE macro takes a LENGTH in bytes and rounds it up to a
+// multiple of the alignment.
+//
+// Arguments:
+//
+// LENGTH - LENGTH in bytes to round up to.
+//
+// ALIGNMENT - aligment to round to, must be a power of 2, e.g, 2**n.
+//
+// Return Value:
+//
+// Returns the LENGTH rounded up to a multiple of the aligment.
+//
+//--
+
+#define MI_ROUND_TO_SIZE(LENGTH,ALIGNMENT) \
+ (((ULONG)(LENGTH) + (ALIGNMENT) - 1) & ~((ALIGNMENT) - 1))
+
+//++
+//
+// PVOID
+// MI_64K_ALIGN (
+// IN PVOID VA
+// )
+//
+// Routine Description:
+//
+//
+// The MI_64K_ALIGN macro takes a virtual address and returns a 64k-aligned
+// virtual address for that page.
+//
+// Arguments:
+//
+// VA - Virtual address.
+//
+// Return Value:
+//
+// Returns the 64k aligned virtual address.
+//
+//--
+
+#define MI_64K_ALIGN(VA) ((PVOID)((ULONG)(VA) & ~(X64K - 1)))
+
+//++
+//
+// PVOID
+// MI_ALIGN_TO_SIZE (
+// IN PVOID VA
+// IN ULONG ALIGNMENT
+// )
+//
+// Routine Description:
+//
+//
+// The MI_ALIGN_TO_SIZE macro takes a virtual address and returns a
+// virtual address for that page with the specified alignment.
+//
+// Arguments:
+//
+// VA - Virtual address.
+//
+// ALIGNMENT - aligment to round to, must be a power of 2, e.g, 2**n.
+//
+// Return Value:
+//
+// Returns the aligned virtual address.
+//
+//--
+
+#define MI_ALIGN_TO_SIZE(VA,ALIGNMENT) ((PVOID)((ULONG)(VA) & ~(ALIGNMENT - 1)))
+
+
+//++
+//
+// LONGLONG
+// MI_STARTING_OFFSET (
+// IN PSUBSECTION SUBSECT
+// IN PMMPTE PTE
+// )
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PTE within a subsection and a pointer
+// to that subsection and calculates the offset for that PTE within the
+// file.
+//
+// Arguments:
+//
+// PTE - PTE within subsection.
+//
+// SUBSECT - Subsection
+//
+// Return Value:
+//
+// Offset for issuing I/O from.
+//
+//--
+
+#define MI_STARTING_OFFSET(SUBSECT,PTE) \
+ (((LONGLONG)((ULONG)((PTE) - ((SUBSECT)->SubsectionBase))) << PAGE_SHIFT) + \
+ ((LONGLONG)((SUBSECT)->StartingSector) << MMSECTOR_SHIFT));
+
+
+// PVOID
+// MiFindEmptyAddressRangeDown (
+// IN ULONG SizeOfRange,
+// IN PVOID HighestAddressToEndAt,
+// IN ULONG Alignment
+// )
+//
+//
+// Routine Description:
+//
+// The function examines the virtual address descriptors to locate
+// an unused range of the specified size and returns the starting
+// address of the range. This routine looks from the top down.
+//
+// Arguments:
+//
+// SizeOfRange - Supplies the size in bytes of the range to locate.
+//
+// HighestAddressToEndAt - Supplies the virtual address to begin looking
+// at.
+//
+// Alignment - Supplies the alignment for the address. Must be
+// a power of 2 and greater than the page_size.
+//
+//Return Value:
+//
+// Returns the starting address of a suitable range.
+//
+
+#define MiFindEmptyAddressRangeDown(SizeOfRange,HighestAddressToEndAt,Alignment) \
+ (MiFindEmptyAddressRangeDownTree( \
+ (SizeOfRange), \
+ (HighestAddressToEndAt), \
+ (Alignment), \
+ (PMMADDRESS_NODE)(PsGetCurrentProcess()->VadRoot)))
+
+// PMMVAD
+// MiGetPreviousVad (
+// IN PMMVAD Vad
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically precedes the specified virtual
+// address descriptor.
+//
+// Arguments:
+//
+// Vad - Supplies a pointer to a virtual address descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// next address range, NULL if none.
+//
+//
+
+#define MiGetPreviousVad(VAD) ((PMMVAD)MiGetPreviousNode((PMMADDRESS_NODE)(VAD)))
+
+
+// PMMVAD
+// MiGetNextVad (
+// IN PMMVAD Vad
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically follows the specified address range.
+//
+// Arguments:
+//
+// VAD - Supplies a pointer to a virtual address descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// next address range, NULL if none.
+//
+
+#define MiGetNextVad(VAD) ((PMMVAD)MiGetNextNode((PMMADDRESS_NODE)(VAD)))
+
+
+
+// PMMVAD
+// MiGetFirstVad (
+// Process
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically is first within the address space.
+//
+// Arguments:
+//
+// Process - Specifies the process in which to locate the VAD.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// first address range, NULL if none.
+
+#define MiGetFirstVad(Process) \
+ ((PMMVAD)MiGetFirstNode((PMMADDRESS_NODE)(Process->VadRoot)))
+
+
+
+// PMMVAD
+// MiCheckForConflictingVad (
+// IN PVOID StartingAddress,
+// IN PVOID EndingAddress
+// )
+//
+// Routine Description:
+//
+// The function determines if any addresses between a given starting and
+// ending address is contained within a virtual address descriptor.
+//
+// Arguments:
+//
+// StartingAddress - Supplies the virtual address to locate a containing
+// descriptor.
+//
+// EndingAddress - Supplies the virtual address to locate a containing
+// descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the first conflicting virtual address descriptor
+// if one is found, othersize a NULL value is returned.
+//
+
+#define MiCheckForConflictingVad(StartingAddress,EndingAddress) \
+ ((PMMVAD)MiCheckForConflictingNode( \
+ (StartingAddress), \
+ (EndingAddress), \
+ (PMMADDRESS_NODE)(PsGetCurrentProcess()->VadRoot)))
+
+// PMMCLONE_DESCRIPTOR
+// MiGetNextClone (
+// IN PMMCLONE_DESCRIPTOR Clone
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically follows the specified address range.
+//
+// Arguments:
+//
+// Clone - Supplies a pointer to a virtual address descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// next address range, NULL if none.
+//
+//
+
+#define MiGetNextClone(CLONE) \
+ ((PMMCLONE_DESCRIPTOR)MiGetNextNode((PMMADDRESS_NODE)(CLONE)))
+
+
+
+// PMMCLONE_DESCRIPTOR
+// MiGetPreviousClone (
+// IN PMMCLONE_DESCRIPTOR Clone
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically precedes the specified virtual
+// address descriptor.
+//
+// Arguments:
+//
+// Clone - Supplies a pointer to a virtual address descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// next address range, NULL if none.
+
+
+#define MiGetPreviousClone(CLONE) \
+ ((PMMCLONE_DESCRIPTOR)MiGetPreviousNode((PMMADDRESS_NODE)(CLONE)))
+
+
+
+// PMMCLONE_DESCRIPTOR
+// MiGetFirstClone (
+// )
+//
+// Routine Description:
+//
+// This function locates the virtual address descriptor which contains
+// the address range which logically is first within the address space.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor containing the
+// first address range, NULL if none.
+//
+
+
+#define MiGetFirstClone() \
+ ((PMMCLONE_DESCRIPTOR)MiGetFirstNode((PMMADDRESS_NODE)(PsGetCurrentProcess()->CloneRoot)))
+
+
+
+// VOID
+// MiInsertClone (
+// IN PMMCLONE_DESCRIPTOR Clone
+// )
+//
+// Routine Description:
+//
+// This function inserts a virtual address descriptor into the tree and
+// reorders the splay tree as appropriate.
+//
+// Arguments:
+//
+// Clone - Supplies a pointer to a virtual address descriptor
+//
+//
+// Return Value:
+//
+// None.
+//
+
+#define MiInsertClone(CLONE) \
+ { \
+ ASSERT ((CLONE)->NumberOfPtes != 0); \
+ MiInsertNode(((PMMADDRESS_NODE)(CLONE)),(PMMADDRESS_NODE *)&(PsGetCurrentProcess()->CloneRoot)); \
+ }
+
+
+
+
+// VOID
+// MiRemoveClone (
+// IN PMMCLONE_DESCRIPTOR Clone
+// )
+//
+// Routine Description:
+//
+// This function removes a virtual address descriptor from the tree and
+// reorders the splay tree as appropriate.
+//
+// Arguments:
+//
+// Clone - Supplies a pointer to a virtual address descriptor.
+//
+// Return Value:
+//
+// None.
+//
+
+#define MiRemoveClone(CLONE) \
+ MiRemoveNode((PMMADDRESS_NODE)(CLONE),(PMMADDRESS_NODE *)&(PsGetCurrentProcess()->CloneRoot));
+
+
+
+// PMMCLONE_DESCRIPTOR
+// MiLocateCloneAddress (
+// IN PVOID VirtualAddress
+// )
+//
+// /*++
+//
+// Routine Description:
+//
+// The function locates the virtual address descriptor which describes
+// a given address.
+//
+// Arguments:
+//
+// VirtualAddress - Supplies the virtual address to locate a descriptor
+// for.
+//
+// Return Value:
+//
+// Returns a pointer to the virtual address descriptor which contains
+// the supplied virtual address or NULL if none was located.
+//
+
+#define MiLocateCloneAddress(VA) \
+ (PsGetCurrentProcess()->CloneRoot ? \
+ ((PMMCLONE_DESCRIPTOR)MiLocateAddressInTree((VA), \
+ (PMMADDRESS_NODE *)&(PsGetCurrentProcess()->CloneRoot))) : \
+ ((PMMCLONE_DESCRIPTOR)NULL))
+
+
+
+// PMMCLONE_DESCRIPTOR
+// MiCheckForConflictingClone (
+// IN PVOID StartingAddress,
+// IN PVOID EndingAddress
+// )
+//
+// Routine Description:
+//
+// The function determines if any addresses between a given starting and
+// ending address is contained within a virtual address descriptor.
+//
+// Arguments:
+//
+// StartingAddress - Supplies the virtual address to locate a containing
+// descriptor.
+//
+// EndingAddress - Supplies the virtual address to locate a containing
+// descriptor.
+//
+// Return Value:
+//
+// Returns a pointer to the first conflicting virtual address descriptor
+// if one is found, othersize a NULL value is returned.
+//
+
+#define MiCheckForConflictingClone(START,END) \
+ ((PMMCLONE_DESCRIPTOR)(MiCheckForConflictingNode(START,END, \
+ (PMMADDRESS_NODE)(PsGetCurrentProcess()->CloneRoot))))
+
+
+//
+// MiGetVirtualPageNumber returns the virtual page number
+// for a given address.
+//
+
+#define MiGetVirtualPageNumber(va) ((ULONG)(va) >> PAGE_SHIFT)
+
+#define MI_VA_TO_PAGE(va) ((ULONG)(va) >> PAGE_SHIFT)
+
+#define MI_BYTES_TO_64K_PAGES(Size) (((ULONG)Size + X64K - 1) >> 16)
+
+
+#define MiGetByteOffset(va) ((ULONG)(va) & (PAGE_SIZE - 1))
+
+//
+// In order to avoid using the multiply unit to calculate pfn database
+// elements the following macro is used. Note that it assumes
+// that each PFN database element is 24 bytes in size.
+//
+
+#define MI_PFN_ELEMENT(index) ((PMMPFN)(((PUCHAR)(MmPfnDatabase)) + \
+ (((ULONG)(index)) << 3) + (((ULONG)(index)) << 4)))
+
+//
+// Make a write-copy PTE, only writable.
+//
+
+#define MI_MAKE_PROTECT_NOT_WRITE_COPY(PROTECT) \
+ (MmMakeProtectNotWriteCopy[PROTECT])
+
+// #define LOCK_PFN KeWaitForSingleObject(&MmPfnMutex,
+// FreePage,
+// KernelMode,
+// FALSE,
+// (PLARGE_INTEGER)NULL)
+//
+// #define UNLOCK_PFN KeReleaseMutex(&MmPfnMutex,FALSE)
+//
+// #define UNLOCK_PFN_AND_THEN_WAIT KeReleaseMutex(&MmPfnMutex,TRUE)
+ //if ((MmDebug) && ((MmInfoCounters.PageFaultCount & 0xf) == 0)) KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+
+#if DBG
+#define LOCK_PFN(OLDIRQL) ASSERT (KeGetCurrentIrql() <= APC_LEVEL); \
+ ExAcquireSpinLock ( &MmPfnLock, &OLDIRQL );
+#else
+#define LOCK_PFN(OLDIRQL) ExAcquireSpinLock ( &MmPfnLock, &OLDIRQL );
+#endif //DBG
+
+#define LOCK_PFN_WITH_TRY(OLDIRQL) \
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL); \
+ do { \
+ } while (KeTryToAcquireSpinLock(&MmPfnLock, &OLDIRQL) == FALSE)
+
+#define UNLOCK_PFN(OLDIRQL) ExReleaseSpinLock ( &MmPfnLock, OLDIRQL ); \
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+
+
+#define UNLOCK_PFN_AND_THEN_WAIT(OLDIRQL) \
+ { \
+ KIRQL XXX; \
+ ASSERT (KeGetCurrentIrql() == 2); \
+ ASSERT (OLDIRQL <= APC_LEVEL); \
+ KeAcquireSpinLock (&KiDispatcherLock,&XXX); \
+ KiReleaseSpinLock (&MmPfnLock); \
+ (KeGetCurrentThread())->WaitIrql = OLDIRQL; \
+ (KeGetCurrentThread())->WaitNext = TRUE; \
+ }
+
+#define LOCK_PFN2(OLDIRQL) ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL); \
+ ExAcquireSpinLock ( &MmPfnLock, &OLDIRQL );
+
+#define UNLOCK_PFN2(OLDIRQL) ExReleaseSpinLock (&MmPfnLock, OLDIRQL); \
+ ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+#if DBG
+#define MM_PFN_LOCK_ASSERT() \
+ if (MmDebug & 0x80000) { \
+ ASSERT (KeGetCurrentIrql() == 2); \
+ }
+#else
+#define MM_PFN_LOCK_ASSERT()
+#endif //DBG
+
+
+#define LOCK_EXPANSION(OLDIRQL) ASSERT (KeGetCurrentIrql() <= APC_LEVEL); \
+ ExAcquireSpinLock ( &MmExpansionLock, &OLDIRQL );
+
+
+
+#define UNLOCK_EXPANSION(OLDIRQL) ExReleaseSpinLock ( &MmExpansionLock, OLDIRQL ); \
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+
+#define UNLOCK_EXPANSION_AND_THEN_WAIT(OLDIRQL) \
+ { \
+ KIRQL XXX; \
+ ASSERT (KeGetCurrentIrql() == 2); \
+ ASSERT (OLDIRQL <= APC_LEVEL); \
+ KeAcquireSpinLock (&KiDispatcherLock,&XXX); \
+ KiReleaseSpinLock (&MmExpansionLock); \
+ (KeGetCurrentThread())->WaitIrql = OLDIRQL; \
+ (KeGetCurrentThread())->WaitNext = TRUE; \
+ }
+
+#ifdef _ALPHA_
+#define LOCK_EXPANSION_IF_ALPHA(OLDIRQL) \
+ ExAcquireSpinLock ( &MmExpansionLock, &OLDIRQL )
+#else
+#define LOCK_EXPANSION_IF_ALPHA(OLDIRQL)
+#endif //ALPHA
+
+
+#ifdef _ALPHA_
+#define UNLOCK_EXPANSION_IF_ALPHA(OLDIRQL) \
+ ExReleaseSpinLock ( &MmExpansionLock, OLDIRQL )
+#else
+#define UNLOCK_EXPANSION_IF_ALPHA(OLDIRQL)
+#endif //ALPHA
+
+
+extern PETHREAD MmSystemLockOwner;
+
+#if DBG
+#define LOCK_SYSTEM_WS(OLDIRQL) ASSERT (KeGetCurrentIrql() <= APC_LEVEL); \
+ KeRaiseIrql(APC_LEVEL,&OLDIRQL); \
+ ExAcquireResourceExclusive(&MmSystemWsLock,TRUE); \
+ ASSERT (MmSystemLockOwner == NULL); \
+ MmSystemLockOwner = PsGetCurrentThread();
+#else
+#define LOCK_SYSTEM_WS(OLDIRQL) \
+ KeRaiseIrql(APC_LEVEL,&OLDIRQL); \
+ ExAcquireResourceExclusive(&MmSystemWsLock,TRUE); \
+ MmSystemLockOwner = PsGetCurrentThread();
+#endif //DBG
+
+#if DBG
+#define UNLOCK_SYSTEM_WS(OLDIRQL) \
+ ASSERT (MmSystemLockOwner == PsGetCurrentThread()); \
+ MmSystemLockOwner = NULL; \
+ ExReleaseResource (&MmSystemWsLock); \
+ KeLowerIrql (OLDIRQL); \
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+#else
+#define UNLOCK_SYSTEM_WS(OLDIRQL) \
+ MmSystemLockOwner = NULL; \
+ ExReleaseResource (&MmSystemWsLock); \
+ KeLowerIrql (OLDIRQL);
+#endif //DBG
+
+#if DBG
+#define MM_SYSTEM_WS_LOCK_ASSERT() \
+ //ASSERT (PsGetCurrentThread() == MmSystemLockOwner);
+#else
+#define MM_SYSTEM_WS_LOCK_ASSERT()
+#endif //DBG
+
+#define LOCK_HYPERSPACE(OLDIRQL) \
+ ExAcquireSpinLock ( &(PsGetCurrentProcess())->HyperSpaceLock, OLDIRQL );
+
+
+#define UNLOCK_HYPERSPACE(OLDIRQL) \
+ ExReleaseSpinLock ( &(PsGetCurrentProcess())->HyperSpaceLock, OLDIRQL );
+
+#define LOCK_WS(PROCESS) \
+ ExAcquireFastMutex( &((PROCESS)->WorkingSetLock))
+
+
+#define UNLOCK_WS(PROCESS) \
+ ExReleaseFastMutex(&((PROCESS)->WorkingSetLock))
+
+
+#define LOCK_ADDRESS_SPACE(PROCESS) \
+ ExAcquireFastMutex( &((PROCESS)->AddressCreationLock))
+
+
+#define LOCK_WS_AND_ADDRESS_SPACE(PROCESS) \
+ LOCK_ADDRESS_SPACE(PROCESS); \
+ LOCK_WS(PROCESS);
+
+#define UNLOCK_ADDRESS_SPACE(PROCESS) \
+ ExReleaseFastMutex( &((PROCESS)->AddressCreationLock))
+
+
+#define ZERO_LARGE(LargeInteger) \
+ (LargeInteger).LowPart = 0; \
+ (LargeInteger).HighPart = 0;
+
+//++
+//
+// ULONG
+// MI_CHECK_BIT (
+// IN PULONG ARRAY
+// IN ULONG BIT
+// )
+//
+// Routine Description:
+//
+// The MI_CHECK_BIT macro checks to see if the specified bit is
+// set within the specified array.
+//
+// Arguments:
+//
+// ARRAY - First element of the array to check.
+//
+// BIT - bit number (first bit is 0) to check.
+//
+// Return Value:
+//
+// Returns the value of the bit (0 or 1).
+//
+//--
+
+#define MI_CHECK_BIT(ARRAY,BIT) \
+ (((ULONG)ARRAY[(BIT) / (sizeof(ULONG)*8)] >> ((BIT) & 0x1F)) & 1)
+
+
+//++
+//
+// VOID
+// MI_SET_BIT (
+// IN PULONG ARRAY
+// IN ULONG BIT
+// )
+//
+// Routine Description:
+//
+// The MI_SET_BIT macro sets the specified bit within the
+// specified array.
+//
+// Arguments:
+//
+// ARRAY - First element of the array to set.
+//
+// BIT - bit number.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_BIT(ARRAY,BIT) \
+ (ULONG)ARRAY[(BIT) / (sizeof(ULONG)*8)] |= (1 << ((BIT) & 0x1F))
+
+
+//++
+//
+// VOID
+// MI_CLEAR_BIT (
+// IN PULONG ARRAY
+// IN ULONG BIT
+// )
+//
+// Routine Description:
+//
+// The MI_CLEAR_BIT macro sets the specified bit within the
+// specified array.
+//
+// Arguments:
+//
+// ARRAY - First element of the array to clear.
+//
+// BIT - bit number.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CLEAR_BIT(ARRAY,BIT) \
+ (ULONG)ARRAY[(BIT) / (sizeof(ULONG)*8)] &= ~(1 << ((BIT) & 0x1F))
+
+
+
+//
+// PFN database element.
+//
+
+//
+// Define pseudo fields for start and end of allocation.
+//
+
+#define StartOfAllocation ReadInProgress
+
+#define EndOfAllocation WriteInProgress
+
+//
+// The PteFrame field size determines the largest physical page that
+// can be supported on the system. On a 4k page sized machine, 20 bits
+// limits it to 4GBs.
+//
+
+typedef struct _MMPFNENTRY {
+ ULONG Modified : 1;
+ ULONG ReadInProgress : 1;
+ ULONG WriteInProgress : 1;
+ ULONG PrototypePte: 1;
+ ULONG PageColor : 3;
+ ULONG ParityError : 1;
+ ULONG PageLocation : 3;
+ ULONG InPageError : 1;
+ ULONG Reserved : 4;
+ ULONG DontUse : 16; //overlays USHORT for reference count field.
+} MMPFNENTRY;
+
+typedef struct _MMPFN {
+ union {
+ ULONG Flink;
+ ULONG WsIndex;
+ PKEVENT Event;
+ NTSTATUS ReadStatus;
+ struct _MMPFN *NextStackPfn;
+ } u1;
+ PMMPTE PteAddress;
+ union {
+ ULONG Blink;
+ ULONG ShareCount;
+ ULONG SecondaryColorFlink;
+ } u2;
+ union {
+ MMPFNENTRY e1;
+ struct {
+ USHORT ShortFlags;
+ USHORT ReferenceCount;
+ } e2;
+ } u3;
+ MMPTE OriginalPte;
+ ULONG PteFrame;
+} MMPFN;
+
+typedef MMPFN *PMMPFN;
+
+
+typedef enum _MMSHARE_TYPE {
+ Normal,
+ ShareCountOnly,
+ AndValid
+} MMSHARE_TYPE;
+
+typedef struct _MMWSLE_HASH {
+ ULONG Key;
+ ULONG Index;
+} MMWSLE_HASH, *PMMWSLE_HASH;
+
+//
+// Working Set List Entry.
+//
+
+typedef struct _MMWSLENTRY {
+ ULONG Valid : 1;
+ ULONG LockedInWs : 1;
+ ULONG LockedInMemory : 1;
+ ULONG WasInTree : 1;
+ ULONG Protection : 5;
+ ULONG SameProtectAsProto : 1;
+ ULONG Direct : 1;
+ ULONG Filler : (32 - (MM_VIRTUAL_PAGE_SHIFT + 11));
+ ULONG VirtualPageNumber : MM_VIRTUAL_PAGE_SHIFT;
+ } MMWSLENTRY;
+
+typedef struct _MMWSLE {
+ union {
+ PVOID VirtualAddress;
+ ULONG Long;
+ MMWSLENTRY e1;
+ } u1;
+} MMWSLE;
+
+typedef MMWSLE *PMMWSLE;
+
+//
+// Working Set List. Must be quadword sized.
+//
+
+typedef struct _MMWSL {
+ ULONG Quota;
+ ULONG FirstFree;
+ ULONG FirstDynamic;
+ ULONG LastEntry;
+ ULONG NextSlot;
+ PMMWSLE Wsle;
+ ULONG NumberOfCommittedPageTables;
+ ULONG LastInitializedWsle;
+ ULONG NonDirectCount;
+ PMMWSLE_HASH HashTable;
+ ULONG HashTableSize;
+ PKEVENT WaitingForImageMapping;
+
+ //MUST BE QUADWORD ALIGNEDED AT THIS POINT!
+
+ USHORT UsedPageTableEntries[MM_USER_PAGE_TABLE_PAGES]; //this must be at
+ // the end.
+ // not used in system cache
+ // working set list.
+ ULONG CommittedPageTables[MM_USER_PAGE_TABLE_PAGES/(sizeof(ULONG)*8)];
+
+ } MMWSL;
+
+typedef MMWSL *PMMWSL;
+
+//
+// Memory Management Object structures.
+//
+
+
+typedef enum _SECTION_CHECK_TYPE {
+ CheckDataSection,
+ CheckImageSection,
+ CheckUserDataSection,
+ CheckBothSection
+} SECTION_CHECK_TYPE;
+
+typedef struct _SEGMENT {
+ PVOID SegmentBaseAddress;
+ ULONG TotalNumberOfPtes;
+ LARGE_INTEGER SizeOfSegment;
+ ULONG NonExtendedPtes;
+ ULONG ImageCommitment;
+ struct _CONTROL_AREA *ControlArea;
+ SECTION_IMAGE_INFORMATION ImageInformation;
+ PVOID SystemImageBase;
+ ULONG NumberOfCommittedPages;
+ MMPTE SegmentPteTemplate;
+ PVOID BasedAddress;
+ PMMPTE PrototypePte;
+ MMPTE ThePtes[MM_PROTO_PTE_ALIGNMENT / PAGE_SIZE];
+
+} SEGMENT, *PSEGMENT;
+
+typedef struct _EVENT_COUNTER {
+ ULONG RefCount;
+ KEVENT Event;
+ LIST_ENTRY ListEntry;
+} EVENT_COUNTER, *PEVENT_COUNTER;
+
+typedef struct _MMSECTION_FLAGS {
+ unsigned BeingDeleted : 1;
+ unsigned BeingCreated : 1;
+ unsigned BeingPurged : 1;
+ unsigned NoModifiedWriting : 1;
+ unsigned FailAllIo : 1;
+ unsigned Image : 1;
+ unsigned Based : 1;
+ unsigned File : 1;
+ unsigned Networked : 1;
+ unsigned NoCache : 1;
+ unsigned PhysicalMemory : 1;
+ unsigned CopyOnWrite : 1;
+ unsigned Reserve : 1; // not a spare bit!
+ unsigned Commit : 1;
+ unsigned FloppyMedia : 1;
+ unsigned WasPurged : 1;
+ unsigned UserReference : 1;
+ unsigned GlobalMemory : 1;
+ unsigned DeleteOnClose : 1;
+ unsigned FilePointerNull : 1;
+ unsigned DebugSymbolsLoaded : 1;
+ unsigned SetMappedFileIoComplete : 1;
+ unsigned CollidedFlush : 1;
+ unsigned NoChange : 1;
+ unsigned HadUserReference : 1;
+ unsigned ImageMappedInSystemSpace : 1;
+ unsigned filler : 6;
+} MMSECTION_FLAGS;
+
+typedef struct _CONTROL_AREA { // must be quadword sized.
+ PSEGMENT Segment;
+ LIST_ENTRY DereferenceList;
+ ULONG NumberOfSectionReferences;
+ ULONG NumberOfPfnReferences;
+ ULONG NumberOfMappedViews;
+ USHORT NumberOfSubsections;
+ USHORT FlushInProgressCount;
+ ULONG NumberOfUserReferences;
+ union {
+ ULONG LongFlags;
+ MMSECTION_FLAGS Flags;
+ } u;
+ PFILE_OBJECT FilePointer;
+ PEVENT_COUNTER WaitingForDeletion;
+ USHORT ModifiedWriteCount;
+ USHORT NumberOfSystemCacheViews;
+} CONTROL_AREA;
+
+typedef CONTROL_AREA *PCONTROL_AREA;
+
+typedef struct _MMSUBSECTION_FLAGS {
+ unsigned ReadOnly : 1;
+ unsigned ReadWrite : 1;
+ unsigned CopyOnWrite : 1;
+ unsigned GlobalMemory: 1;
+ unsigned Protection : 5;
+ unsigned LargePages : 1;
+ unsigned filler1 : 6;
+ unsigned SectorEndOffset : 9;
+ unsigned filler2: 7;
+} MMSUBSECTION_FLAGS;
+
+typedef struct _SUBSECTION { // Must start on quadword boundary and be quad sized
+ PCONTROL_AREA ControlArea;
+ union {
+ ULONG LongFlags;
+ MMSUBSECTION_FLAGS SubsectionFlags;
+ } u;
+ ULONG StartingSector;
+ ULONG EndingSector;
+ PMMPTE SubsectionBase;
+ ULONG UnusedPtes;
+ ULONG PtesInSubsection;
+ struct _SUBSECTION *NextSubsection;
+} SUBSECTION;
+
+typedef SUBSECTION *PSUBSECTION;
+
+typedef struct _MMDEREFERENCE_SEGMENT_HEADER {
+ KSPIN_LOCK Lock;
+ KSEMAPHORE Semaphore;
+ LIST_ENTRY ListHead;
+} MMDEREFERENCE_SEGMENT_HEADER;
+
+//
+// This entry is used for calling the segment dereference thread
+// to perform page file expansion. It has a similar structure
+// to a control area to allow either a contol area or a page file
+// expansion entry to be placed on the list. Note that for a control
+// area the segment pointer is valid whereas for page file expansion
+// it is null.
+//
+
+typedef struct _MMPAGE_FILE_EXPANSION {
+ PSEGMENT Segment;
+ LIST_ENTRY DereferenceList;
+ ULONG RequestedExpansionSize;
+ ULONG ActualExpansion;
+ KEVENT Event;
+ ULONG InProgress;
+} MMPAGE_FILE_EXPANSION;
+
+typedef MMPAGE_FILE_EXPANSION *PMMPAGE_FILE_EXPANSION;
+
+
+typedef struct _MMWORKING_SET_EXPANSION_HEAD {
+ LIST_ENTRY ListHead;
+} MMWORKING_SET_EXPANSION_HEAD;
+
+#define SUBSECTION_READ_ONLY 1L
+#define SUBSECTION_READ_WRITE 2L
+#define SUBSECTION_COPY_ON_WRITE 4L
+#define SUBSECTION_SHARE_ALLOW 8L
+
+typedef struct _MMFLUSH_BLOCK {
+ LARGE_INTEGER ErrorOffset;
+ IO_STATUS_BLOCK IoStatus;
+ KEVENT IoEvent;
+ ULONG IoCount;
+} MMFLUSH_BLOCK, *PMMFLUSH_BLOCK;
+
+typedef struct _MMINPAGE_SUPPORT {
+ KEVENT Event;
+ IO_STATUS_BLOCK IoStatus;
+ LARGE_INTEGER ReadOffset;
+ ULONG WaitCount;
+ union {
+ PETHREAD Thread;
+ PMMFLUSH_BLOCK Flush;
+ } u;
+ PFILE_OBJECT FilePointer;
+ PMMPTE BasePte;
+ PMMPFN Pfn;
+ MDL Mdl;
+ ULONG Page[MM_MAXIMUM_READ_CLUSTER_SIZE + 1];
+ LIST_ENTRY ListEntry;
+} MMINPAGE_SUPPORT;
+
+typedef MMINPAGE_SUPPORT *PMMINPAGE_SUPPORT;
+
+typedef struct _MMPAGE_READ {
+ LARGE_INTEGER ReadOffset;
+ PFILE_OBJECT FilePointer;
+ PMMPTE BasePte;
+ PMMPFN Pfn;
+ MDL Mdl;
+ ULONG Page[MM_MAXIMUM_READ_CLUSTER_SIZE + 1];
+} MMPAGE_READ, *PMMPAGE_READ;
+
+//
+// Address Node.
+//
+
+typedef struct _MMADDRESS_NODE {
+ PVOID StartingVa;
+ PVOID EndingVa;
+ struct _MMADDRESS_NODE *Parent;
+ struct _MMADDRESS_NODE *LeftChild;
+ struct _MMADDRESS_NODE *RightChild;
+} MMADDRESS_NODE;
+
+typedef MMADDRESS_NODE *PMMADDRESS_NODE;
+
+typedef struct _SECTION {
+ MMADDRESS_NODE Address;
+ PSEGMENT Segment;
+ LARGE_INTEGER SizeOfSection;
+ union {
+ ULONG LongFlags;
+ MMSECTION_FLAGS Flags;
+ } u;
+ ULONG InitialPageProtection;
+} SECTION;
+
+
+typedef SECTION *PSECTION;
+
+//
+// Banked memory descriptor. Pointed to by VAD which has
+// the PhyiscalMemory flags set and the Banked pointer field as
+// non-NULL.
+//
+
+
+typedef struct _MMBANKED_SECTION {
+ ULONG BasePhysicalPage;
+ PMMPTE BasedPte;
+ ULONG BankSize;
+ ULONG BankShift; //shift for PTEs to calculate bank number
+ PBANKED_SECTION_ROUTINE BankedRoutine;
+ PVOID Context;
+ PMMPTE CurrentMappedPte;
+ MMPTE BankTemplate[1];
+} MMBANKED_SECTION, *PMMBANKED_SECTION;
+
+
+//
+// Virtual address descriptor
+//
+// ***** NOTE **********
+// The first part of a virtual address descriptor is a MMADDRESS_NODE!!!
+//
+
+#define COMMIT_SIZE 19
+
+#if ((COMMIT_SIZE + PAGE_SHIFT) < 31)
+#error COMMIT_SIZE too small
+#endif
+
+#define MM_MAX_COMMIT ((1 << COMMIT_SIZE) - 1)
+
+#define MM_VIEW_UNMAP 0
+#define MM_VIEW_SHARE 1
+
+typedef struct _MMVAD_FLAGS {
+ unsigned CommitCharge : COMMIT_SIZE; //limits system to 4k pages or bigger!
+ unsigned PhysicalMapping : 1;
+ unsigned ImageMap : 1;
+ unsigned Inherit : 1; //1 = ViewShare, 0 = ViewUnmap
+ unsigned NoChange : 1;
+ unsigned CopyOnWrite : 1;
+ unsigned Protection : 5;
+ unsigned LargePages : 1;
+ unsigned MemCommit: 1;
+ unsigned PrivateMemory : 1; //used to tell VAD from VAD_SHORT
+} MMVAD_FLAGS;
+
+typedef struct _MMVAD_FLAGS2 {
+ unsigned SecNoChange : 1; // set if SEC_NOCHANGE specified
+ unsigned OneSecured : 1; // set if u3 field is a range
+ unsigned MultipleSecured : 1; // set if u3 field is a list head
+ unsigned ReadOnly : 1; // protected as ReadOnly
+ unsigned StoredInVad : 1; // set if secure is stored in VAD
+ unsigned Reserved : 27;
+} MMVAD_FLAGS2;
+
+typedef struct _MMADDRESS_LIST {
+ PVOID StartVa;
+ PVOID EndVa;
+} MMADDRESS_LIST, *PMMADDRESS_LIST;
+
+typedef struct _MMSECURE_ENTRY {
+ union {
+ ULONG LongFlags2;
+ MMVAD_FLAGS2 VadFlags2;
+ } u2;
+ PVOID StartVa;
+ PVOID EndVa;
+ LIST_ENTRY List;
+} MMSECURE_ENTRY, *PMMSECURE_ENTRY;
+
+typedef struct _MMVAD {
+ PVOID StartingVa;
+ PVOID EndingVa;
+ struct _MMVAD *Parent;
+ struct _MMVAD *LeftChild;
+ struct _MMVAD *RightChild;
+ union {
+ ULONG LongFlags;
+ MMVAD_FLAGS VadFlags;
+ } u;
+ PCONTROL_AREA ControlArea;
+ PMMPTE FirstPrototypePte;
+ PMMPTE LastContiguousPte;
+ union {
+ ULONG LongFlags2;
+ MMVAD_FLAGS2 VadFlags2;
+ } u2;
+ union {
+ LIST_ENTRY List;
+ MMADDRESS_LIST Secured;
+ } u3;
+ PMMBANKED_SECTION Banked;
+} MMVAD, *PMMVAD;
+
+
+typedef struct _MMVAD_SHORT {
+ PVOID StartingVa;
+ PVOID EndingVa;
+ struct _MMVAD *Parent;
+ struct _MMVAD *LeftChild;
+ struct _MMVAD *RightChild;
+ union {
+ ULONG LongFlags;
+ MMVAD_FLAGS VadFlags;
+ } u;
+} MMVAD_SHORT, *PMMVAD_SHORT;
+
+
+//
+// Stuff for support of POSIX Fork.
+//
+
+
+typedef struct _MMCLONE_BLOCK {
+ MMPTE ProtoPte;
+ LONG CloneRefCount;
+} MMCLONE_BLOCK;
+
+typedef MMCLONE_BLOCK *PMMCLONE_BLOCK;
+
+typedef struct _MMCLONE_HEADER {
+ ULONG NumberOfPtes;
+ ULONG NumberOfProcessReferences;
+ PMMCLONE_BLOCK ClonePtes;
+} MMCLONE_HEADER;
+
+typedef MMCLONE_HEADER *PMMCLONE_HEADER;
+
+
+typedef struct _MMCLONE_DESCRIPTOR {
+ PVOID StartingVa;
+ PVOID EndingVa;
+ struct _MMCLONE_DESCRIPTOR *Parent;
+ struct _MMCLONE_DESCRIPTOR *LeftChild;
+ struct _MMCLONE_DESCRIPTOR *RightChild;
+ PMMCLONE_HEADER CloneHeader;
+ ULONG NumberOfPtes;
+ ULONG NumberOfReferences;
+ ULONG PagedPoolQuotaCharge;
+} MMCLONE_DESCRIPTOR;
+
+typedef MMCLONE_DESCRIPTOR *PMMCLONE_DESCRIPTOR;
+
+//
+// The following macro will allocate and initialize a bitmap from the
+// specified pool of the specified size
+//
+// VOID
+// MiCreateBitMap (
+// OUT PRTL_BITMAP *BitMapHeader,
+// IN ULONG SizeOfBitMap,
+// IN POOL_TYPE PoolType
+// );
+//
+
+#define MiCreateBitMap(BMH,S,P) { \
+ ULONG _S; \
+ _S = sizeof(RTL_BITMAP) + ((((S) + 31) / 32) * 4); \
+ *(BMH) = (PRTL_BITMAP)ExAllocatePoolWithTag( (P), _S, ' mM'); \
+ RtlInitializeBitMap( *(BMH), (PULONG)((*(BMH))+1), S); \
+}
+
+#define MI_INITIALIZE_ZERO_MDL(MDL) { \
+ MDL->Next = (PMDL) NULL; \
+ MDL->MdlFlags = 0; \
+ MDL->StartVa = NULL; \
+ MDL->ByteOffset = 0; \
+ MDL->ByteCount = 0; \
+ }
+
+//
+// Page File structures.
+//
+
+typedef struct _MMMOD_WRITER_LISTHEAD {
+ LIST_ENTRY ListHead;
+ KEVENT Event;
+} MMMOD_WRITER_LISTHEAD, *PMMMOD_WRITER_LISTHEAD;
+
+typedef struct _MMMOD_WRITER_MDL_ENTRY {
+ LIST_ENTRY Links;
+ LARGE_INTEGER WriteOffset;
+ union {
+ IO_STATUS_BLOCK IoStatus;
+ LARGE_INTEGER LastByte;
+ } u;
+ PIRP Irp;
+ ULONG LastPageToWrite;
+ PMMMOD_WRITER_LISTHEAD PagingListHead;
+ PLIST_ENTRY CurrentList;
+ struct _MMPAGING_FILE *PagingFile;
+ PFILE_OBJECT File;
+ PCONTROL_AREA ControlArea;
+ PERESOURCE FileResource;
+ MDL Mdl;
+ ULONG Page[1];
+} MMMOD_WRITER_MDL_ENTRY, *PMMMOD_WRITER_MDL_ENTRY;
+
+
+#define MM_PAGING_FILE_MDLS 2
+
+typedef struct _MMPAGING_FILE {
+ ULONG Size;
+ ULONG MaximumSize;
+ ULONG MinimumSize;
+ ULONG FreeSpace;
+ ULONG CurrentUsage;
+ ULONG PeakUsage;
+ ULONG Hint;
+ ULONG HighestPage;
+ PMMMOD_WRITER_MDL_ENTRY Entry[MM_PAGING_FILE_MDLS];
+ PRTL_BITMAP Bitmap;
+ PFILE_OBJECT File;
+ ULONG PageFileNumber;
+ UNICODE_STRING PageFileName;
+ BOOLEAN Extended;
+ BOOLEAN HintSetToZero;
+ } MMPAGING_FILE, *PMMPAGING_FILE;
+
+typedef struct _MMINPAGE_SUPPORT_LIST {
+ LIST_ENTRY ListHead;
+ ULONG Count;
+} MMINPAGE_SUPPORT_LIST, *PMMINPAGE_SUPPORT_LIST;
+
+typedef struct _MMEVENT_COUNT_LIST {
+ LIST_ENTRY ListHead;
+ ULONG Count;
+} MMEVENT_COUNT_LIST, *PMMEVENT_COUNT_LIST;
+
+//
+// System PTE structures.
+//
+
+#define MM_SYS_PTE_TABLES_MAX 5
+
+typedef enum _MMSYSTEM_PTE_POOL_TYPE {
+ SystemPteSpace,
+ NonPagedPoolExpansion,
+ MaximumPtePoolTypes
+ } MMSYSTEM_PTE_POOL_TYPE;
+
+typedef struct _MMFREE_POOL_ENTRY {
+ LIST_ENTRY List;
+ ULONG Size;
+ ULONG Signature;
+ struct _MMFREE_POOL_ENTRY *Owner;
+} MMFREE_POOL_ENTRY, *PMMFREE_POOL_ENTRY;
+
+//
+// List for flushing TBs singularly.
+//
+
+typedef struct _MMPTE_FLUSH_LIST {
+ ULONG Count;
+ PMMPTE FlushPte[MM_MAXIMUM_FLUSH_COUNT];
+ PVOID FlushVa[MM_MAXIMUM_FLUSH_COUNT];
+} MMPTE_FLUSH_LIST, *PMMPTE_FLUSH_LIST;
+
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ );
+
+VOID
+MiBuildPagedPool (
+ VOID
+ );
+
+VOID
+MiInitializeNonPagedPool (
+ PVOID StartOfNonPagedPool
+ );
+
+VOID
+MiInitializeSystemSpaceMap (
+ VOID
+ );
+
+VOID
+MiFindInitializationCode (
+ OUT PVOID *StartVa,
+ OUT PVOID *EndVa
+ );
+
+VOID
+MiFreeInitializationCode (
+ IN PVOID StartVa,
+ IN PVOID EndVa
+ );
+
+
+ULONG
+MiSectionInitialization (
+ VOID
+ );
+
+VOID
+FASTCALL
+MiDecrementReferenceCount (
+ IN ULONG PageFrameIndex
+ );
+
+VOID
+FASTCALL
+MiDecrementShareCount2 (
+ IN ULONG PageFrameIndex
+ );
+
+#define MiDecrementShareCount(P) MiDecrementShareCount2(P)
+
+#define MiDecrementShareCountOnly(P) MiDecrementShareCount2(P)
+
+#define MiDecrementShareAndValidCount(P) MiDecrementShareCount2(P)
+
+//
+// Routines which operate on the Page Frame Database Lists
+//
+
+VOID
+FASTCALL
+MiInsertPageInList (
+ IN PMMPFNLIST ListHead,
+ IN ULONG PageFrameIndex
+ );
+
+VOID
+FASTCALL
+MiInsertStandbyListAtFront (
+ IN ULONG PageFrameIndex
+ );
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemovePageFromList (
+ IN PMMPFNLIST ListHead
+ );
+
+VOID
+FASTCALL
+MiUnlinkPageFromList (
+ IN PMMPFN Pfn
+ );
+
+VOID
+MiUnlinkFreeOrZeroedPage (
+ IN ULONG Page
+ );
+
+VOID
+FASTCALL
+MiInsertFrontModifiedNoWrite (
+ IN ULONG PageFrameIndex
+ );
+
+ULONG
+FASTCALL
+MiEnsureAvailablePageOrWait (
+ IN PEPROCESS Process,
+ IN PVOID VirtualAddress
+ );
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemoveZeroPage (
+ IN ULONG PageColor
+ );
+
+#define MiRemoveZeroPageIfAny(COLOR) \
+ (MmFreePagesByColor[ZeroedPageList][COLOR].Flink != MM_EMPTY_LIST) ? \
+ MiRemoveZeroPage(COLOR) : 0
+
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemoveAnyPage (
+ IN ULONG PageColor
+ );
+
+//
+// Routines which operate on the page frame database entry.
+//
+
+VOID
+MiInitializePfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG ModifiedState
+ );
+
+VOID
+MiInitializePfnForOtherProcess (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG ContainingPageFrame
+ );
+
+VOID
+MiInitializeCopyOnWritePfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG WorkingSetIndex
+ );
+
+VOID
+MiInitializeTransitionPfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG WorkingSetIndex
+ );
+
+VOID
+MiFlushInPageSupportBlock (
+ );
+
+VOID
+MiFreeInPageSupportBlock (
+ IN PMMINPAGE_SUPPORT Support
+ );
+
+PMMINPAGE_SUPPORT
+MiGetInPageSupportBlock (
+ ULONG OkToReleasePfn
+ );
+
+//
+// Routines which require a physical page to be mapped into hyperspace
+// within the current process.
+//
+
+VOID
+FASTCALL
+MiZeroPhysicalPage (
+ IN ULONG PageFrameIndex,
+ IN ULONG Color
+ );
+
+VOID
+FASTCALL
+MiRestoreTransitionPte (
+ IN ULONG PageFrameIndex
+ );
+
+PSUBSECTION
+MiGetSubsectionAndProtoFromPte (
+ IN PMMPTE PointerPte,
+ IN PMMPTE *ProtoPte,
+ IN PEPROCESS Process
+ );
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ OUT PKIRQL OldIrql
+ );
+
+#define MiUnmapPageInHyperSpace(OLDIRQL) UNLOCK_HYPERSPACE(OLDIRQL)
+
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ );
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ );
+
+VOID
+MiUpdateImageHeaderPage (
+ IN PMMPTE PointerPte,
+ IN ULONG PageFrameNumber,
+ IN PCONTROL_AREA ControlArea
+ );
+
+ULONG
+MiGetPageForHeader (
+ VOID
+ );
+
+VOID
+MiRemoveImageHeaderPage (
+ IN ULONG PageFrameNumber
+ );
+
+PVOID
+MiMapPageToZeroInHyperSpace (
+ IN ULONG PageFrameIndex
+ );
+
+
+//
+// Routines to obtain and release system PTEs.
+//
+
+PMMPTE
+MiReserveSystemPtes (
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPteType,
+ IN ULONG Alignment,
+ IN ULONG Offset,
+ IN ULONG BugCheckOnFailure
+ );
+
+VOID
+MiReleaseSystemPtes (
+ IN PMMPTE StartingPte,
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPteType
+ );
+
+VOID
+MiInitializeSystemPtes (
+ IN PMMPTE StartingPte,
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPteType
+ );
+
+//
+// Access Fault routines.
+//
+
+NTSTATUS
+MiDispatchFault (
+ IN BOOLEAN StoreInstrution,
+ IN PVOID VirtualAdress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte,
+ IN PEPROCESS Process
+ );
+
+NTSTATUS
+MiResolveDemandZeroFault (
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PEPROCESS Process,
+ IN ULONG PrototypePte
+ );
+
+NTSTATUS
+MiResolveTransitionFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PEPROCESS Process,
+ IN ULONG PfnLockHeld
+ );
+
+NTSTATUS
+MiResolvePageFileFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ );
+
+NTSTATUS
+MiResolveProtoPteFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ );
+
+
+NTSTATUS
+MiResolveMappedFileFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ );
+
+VOID
+MiAddValidPageToWorkingSet (
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn1,
+ IN ULONG WsleMask
+ );
+
+NTSTATUS
+MiWaitForInPageComplete (
+ IN PMMPFN Pfn,
+ IN PMMPTE PointerPte,
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPteContents,
+ IN PMMINPAGE_SUPPORT InPageSupport,
+ IN PEPROCESS CurrentProcess
+ );
+
+NTSTATUS
+FASTCALL
+MiCopyOnWrite (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte
+ );
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ );
+
+VOID
+MiSetModifyBit (
+ IN PMMPFN Pfn
+ );
+
+PMMPTE
+MiFindActualFaultingPte (
+ IN PVOID FaultingAddress
+ );
+
+VOID
+MiInitializeReadInProgressPfn (
+ IN PMDL Mdl,
+ IN PMMPTE BasePte,
+ IN PKEVENT Event,
+ IN ULONG WorkingSetIndex
+ );
+
+NTSTATUS
+MiAccessCheck (
+ IN PMMPTE PointerPte,
+ IN BOOLEAN WriteOperation,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN ULONG Protection
+ );
+
+NTSTATUS
+FASTCALL
+MiCheckForUserStackOverflow (
+ IN PVOID FaultingAddress
+ );
+
+PMMPTE
+MiCheckVirtualAddress (
+ IN PVOID VirtualAddress,
+ OUT PULONG ProtectCode
+ );
+
+NTSTATUS
+FASTCALL
+MiCheckPdeForPagedPool (
+ IN PVOID VirtualAddress
+ );
+
+VOID
+MiInitializeMustSucceedPool (
+ VOID
+ );
+
+//
+// Routines which operate on an address tree.
+//
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetNextNode (
+ IN PMMADDRESS_NODE Node
+ );
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetPreviousNode (
+ IN PMMADDRESS_NODE Node
+ );
+
+
+PMMADDRESS_NODE
+FASTCALL
+MiGetFirstNode (
+ IN PMMADDRESS_NODE Root
+ );
+
+PMMADDRESS_NODE
+MiGetLastNode (
+ IN PMMADDRESS_NODE Root
+ );
+
+VOID
+FASTCALL
+MiInsertNode (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ );
+
+VOID
+FASTCALL
+MiRemoveNode (
+ IN PMMADDRESS_NODE Node,
+ IN OUT PMMADDRESS_NODE *Root
+ );
+
+PMMADDRESS_NODE
+FASTCALL
+MiLocateAddressInTree (
+ IN PVOID VirtualAddress,
+ IN PMMADDRESS_NODE *Root
+ );
+
+PMMADDRESS_NODE
+MiCheckForConflictingNode (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMADDRESS_NODE Root
+ );
+
+PVOID
+MiFindEmptyAddressRangeInTree (
+ IN ULONG SizeOfRange,
+ IN ULONG Alignment,
+ IN PMMADDRESS_NODE Root,
+ OUT PMMADDRESS_NODE *PreviousVad
+ );
+
+PVOID
+MiFindEmptyAddressRangeDownTree (
+ IN ULONG SizeOfRange,
+ IN PVOID HighestAddressToEndAt,
+ IN ULONG Alignment,
+ IN PMMADDRESS_NODE Root
+ );
+
+VOID
+NodeTreeWalk (
+ PMMADDRESS_NODE Start
+ );
+
+//
+// Routine which operate on tree of virtual address descriptors.
+//
+
+VOID
+MiInsertVad (
+ IN PMMVAD Vad
+ );
+
+VOID
+MiRemoveVad (
+ IN PMMVAD Vad
+ );
+
+PMMVAD
+FASTCALL
+MiLocateAddress (
+ IN PVOID Vad
+ );
+
+PVOID
+MiFindEmptyAddressRange (
+ IN ULONG SizeOfRange,
+ IN ULONG Alignment,
+ IN ULONG QuickCheck
+ );
+
+//
+// routines which operate on the clone tree structure
+//
+
+
+NTSTATUS
+MiCloneProcessAddressSpace (
+ IN PEPROCESS ProcessToClone,
+ IN PEPROCESS ProcessToInitialize,
+ IN ULONG PdePhysicalPage,
+ IN ULONG HyperPhysicalPage
+ );
+
+
+ULONG
+MiDecrementCloneBlockReference (
+ IN PMMCLONE_DESCRIPTOR CloneDescriptor,
+ IN PMMCLONE_BLOCK CloneBlock,
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+MiWaitForForkToComplete (
+ IN PEPROCESS CurrentProcess
+ );
+
+//
+// Routines which operate of the working set list.
+//
+
+ULONG
+MiLocateAndReserveWsle (
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiReleaseWsle (
+ IN ULONG WorkingSetIndex,
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiUpdateWsle (
+ IN PULONG DesiredIndex,
+ IN PVOID VirtualAddress,
+ IN PMMWSL WorkingSetList,
+ IN PMMPFN Pfn
+ );
+
+VOID
+MiInitializeWorkingSetList (
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+MiGrowWsleHash (
+ IN PMMSUPPORT WsInfo,
+ IN ULONG PfnLockHeld
+ );
+
+ULONG
+MiTrimWorkingSet (
+ ULONG Reduction,
+ IN PMMSUPPORT WsInfo,
+ IN ULONG ForcedReduction
+ );
+
+VOID
+FASTCALL
+MiInsertWsle (
+ IN ULONG Entry,
+ IN PMMWSL WorkingSetList
+ );
+
+VOID
+FASTCALL
+MiRemoveWsle (
+ IN ULONG Entry,
+ IN PMMWSL WorkingSetList
+ );
+
+VOID
+MiFreeWorkingSetRange (
+ IN PVOID StartVa,
+ IN PVOID EndVa,
+ IN PMMSUPPORT WsInfo
+ );
+
+ULONG
+FASTCALL
+MiLocateWsle (
+ IN PVOID VirtualAddress,
+ IN PMMWSL WorkingSetList,
+ IN ULONG WsPfnIndex
+ );
+
+ULONG
+MiFreeWsle (
+ IN ULONG WorkingSetIndex,
+ IN PMMSUPPORT WsInfo,
+ IN PMMPTE PointerPte
+ );
+
+VOID
+MiSwapWslEntries (
+ IN ULONG SwapEntry,
+ IN ULONG Entry,
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiRemoveWsleFromFreeList (
+ IN ULONG Entry,
+ IN PMMWSLE Wsle,
+ IN PMMWSL WorkingSetList
+ );
+
+ULONG
+MiRemovePageFromWorkingSet (
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn1,
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiTakePageFromWorkingSet (
+ IN ULONG Entry,
+ IN PMMSUPPORT WsInfo,
+ IN PMMPTE PointerPte
+ );
+
+NTSTATUS
+MiEmptyWorkingSet (
+ IN PMMSUPPORT WsInfo
+ );
+
+ULONG
+MiDeleteSystemPagableVm (
+ IN PMMPTE PointerPte,
+ IN ULONG NumberOfPtes,
+ IN ULONG NewPteValue,
+ OUT PULONG ResidentPages
+ );
+
+VOID
+MiLockCode (
+ IN PMMPTE FirstPte,
+ IN PMMPTE LastPte,
+ IN ULONG LockType
+ );
+
+PLDR_DATA_TABLE_ENTRY
+MiLookupDataTableEntry (
+ IN PVOID AddressWithinSection,
+ IN ULONG ResourceHeld
+ );
+
+//
+// Routines which perform working set management.
+//
+
+VOID
+MiObtainFreePages (
+ VOID
+ );
+
+VOID
+MiModifiedPageWriter (
+ IN PVOID StartContext
+ );
+
+ULONG
+MiExtendPagingFiles (
+ IN ULONG ExtendSize
+ );
+
+VOID
+MiContractPagingFiles (
+ VOID
+ );
+
+VOID
+MiAttemptPageFileReduction (
+ VOID
+ );
+
+//
+// Routines to delete address space.
+//
+
+VOID
+MiDeleteVirtualAddresses (
+ IN PUCHAR StartingAddress,
+ IN PUCHAR EndingAddress,
+ IN ULONG AddressSpaceDeletion,
+ IN PMMVAD Vad
+ );
+
+VOID
+MiDeletePte (
+ IN PMMPTE PointerPte,
+ IN PVOID VirtualAddress,
+ IN ULONG AddressSpaceDeletion,
+ IN PEPROCESS CurrentProcess,
+ IN PMMPTE PrototypePte,
+ IN PMMPTE_FLUSH_LIST PteFlushList OPTIONAL
+ );
+
+VOID
+MiFlushPteList (
+ IN PMMPTE_FLUSH_LIST PteFlushList,
+ IN ULONG AllProcessors,
+ IN MMPTE FillPte
+ );
+
+
+ULONG
+FASTCALL
+MiReleasePageFileSpace (
+ IN MMPTE PteContents
+ );
+
+VOID
+FASTCALL
+MiUpdateModifiedWriterMdls (
+ IN ULONG PageFileNumber
+ );
+
+
+//
+// General support routines.
+//
+
+ULONG
+MiDoesPdeExistAndMakeValid (
+ IN PMMPTE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN ULONG PfnMutexHeld
+ );
+
+ULONG
+MiMakePdeExistAndMakeValid (
+ IN PMMPTE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN ULONG PfnMutexHeld
+ );
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValid (
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess
+ );
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValidPfnWs (
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess OPTIONAL
+ );
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValidPfn (
+ IN PVOID VirtualAddress
+ );
+
+ULONG
+FASTCALL
+MiLockPagedAddress (
+ IN PVOID VirtualAddress,
+ IN ULONG PfnLockHeld
+ );
+
+VOID
+FASTCALL
+MiUnlockPagedAddress (
+ IN PVOID VirtualAddress,
+ IN ULONG PfnLockHeld
+ );
+
+ULONG
+FASTCALL
+MiIsPteDecommittedPage (
+ IN PMMPTE PointerPte
+ );
+
+ULONG
+FASTCALL
+MiIsProtectionCompatible (
+ IN ULONG OldProtect,
+ IN ULONG NewProtect
+ );
+
+ULONG
+FASTCALL
+MiMakeProtectionMask (
+ IN ULONG Protect
+ );
+
+ULONG
+MiIsEntireRangeCommitted (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ );
+
+ULONG
+MiIsEntireRangeDecommitted (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ );
+
+PMMPTE
+FASTCALL
+MiGetProtoPteAddressExtended (
+ IN PMMVAD Vad,
+ IN PVOID VirtualAddress
+ );
+
+PSUBSECTION
+FASTCALL
+MiLocateSubsection (
+ IN PMMVAD Vad,
+ IN PVOID VirtualAddress
+ );
+
+ULONG
+MiInitializeSystemCache (
+ IN ULONG SizeOfSystemCacheInPages,
+ IN ULONG MinimumWorkingSet,
+ IN ULONG MaximumWorkingSet
+ );
+
+VOID
+MiAdjustWorkingSetManagerParameters(
+ BOOLEAN WorkStation
+ );
+
+//
+// Section support
+//
+
+VOID
+FASTCALL
+MiInsertBasedSection (
+ IN PSECTION Section
+ );
+
+VOID
+FASTCALL
+MiRemoveBasedSection (
+ IN PSECTION Section
+ );
+
+VOID
+MiRemoveMappedView (
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD Vad
+ );
+
+PVOID
+MiFindEmptySectionBaseDown (
+ IN ULONG SizeOfRange,
+ IN PVOID HighestAddressToEndAt
+ );
+
+VOID
+MiSegmentDelete (
+ PSEGMENT Segment
+ );
+
+VOID
+MiSectionDelete (
+ PVOID Object
+ );
+
+VOID
+MiDereferenceSegmentThread (
+ IN PVOID StartContext
+ );
+
+NTSTATUS
+MiCreateImageFileMap (
+ IN PFILE_OBJECT File,
+ OUT PSEGMENT *Segment
+ );
+
+NTSTATUS
+MiCreateDataFileMap (
+ IN PFILE_OBJECT File,
+ OUT PSEGMENT *Segment,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG SectionPageProtection,
+ IN ULONG AllocationAttributes,
+ IN ULONG IgnoreFileSizing
+ );
+
+NTSTATUS
+MiCreatePagingFileMap (
+ OUT PSEGMENT *Segment,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG ProtectionMask,
+ IN ULONG AllocationAttributes
+ );
+
+VOID
+MiPurgeSubsectionInternal (
+ IN PSUBSECTION Subsection,
+ IN ULONG PteOffset
+ );
+
+VOID
+MiPurgeImageSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process
+ );
+
+VOID
+MiCleanSection (
+ IN PCONTROL_AREA ControlArea
+ );
+
+VOID
+MiCheckControlArea (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS CurrentProcess,
+ IN KIRQL PreviousIrql
+ );
+
+VOID
+MiCheckForControlAreaDeletion (
+ IN PCONTROL_AREA ControlArea
+ );
+
+ULONG
+MiCheckControlAreaStatus (
+ IN SECTION_CHECK_TYPE SectionCheckType,
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
+ IN ULONG DelayClose,
+ OUT PCONTROL_AREA *ControlArea,
+ OUT PKIRQL OldIrql
+ );
+
+PEVENT_COUNTER
+MiGetEventCounter (
+ );
+
+VOID
+MiFlushEventCounter (
+ );
+
+VOID
+MiFreeEventCounter (
+ IN PEVENT_COUNTER Support,
+ IN ULONG Flush
+ );
+
+ULONG
+MmCanFileBeTruncatedInternal (
+ IN PSECTION_OBJECT_POINTERS SectionPointer,
+ IN PLARGE_INTEGER NewFileSize OPTIONAL,
+ OUT PKIRQL PreviousIrql
+ );
+
+
+//
+// protection stuff...
+//
+
+NTSTATUS
+MiProtectVirtualMemory (
+ IN PEPROCESS Process,
+ IN PVOID *CapturedBase,
+ IN PULONG CapturedRegionSize,
+ IN ULONG Protect,
+ IN PULONG LastProtect
+ );
+
+ULONG
+MiGetPageProtection (
+ IN PMMPTE PointerPte,
+ IN PEPROCESS Process
+ );
+
+ULONG
+MiSetProtectionOnSection (
+ IN PEPROCESS Process,
+ IN PMMVAD Vad,
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN ULONG NewProtect,
+ OUT PULONG CapturedOldProtect,
+ IN ULONG DontCharge
+ );
+
+NTSTATUS
+MiCheckSecuredVad (
+ IN PMMVAD Vad,
+ IN PVOID Base,
+ IN ULONG Size,
+ IN ULONG ProtectionMask
+ );
+
+ULONG
+MiChangeNoAccessForkPte (
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask
+ );
+
+//
+// Routines for charging quota and committment.
+//
+
+ULONG
+FASTCALL
+MiChargePageFileQuota (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+MiReturnPageFileQuota (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+FASTCALL
+MiChargeCommitment (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS Process OPTIONAL
+ );
+
+VOID
+FASTCALL
+MiChargeCommitmentCantExpand (
+ IN ULONG QuotaCharge,
+ IN ULONG MustSucceed
+ );
+
+VOID
+FASTCALL
+MiReturnCommitment (
+ IN ULONG QuotaCharge
+ );
+
+ULONG
+MiCalculatePageCommitment (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ );
+
+VOID
+MiReturnPageTablePageCommitment (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD PreviousVad,
+ IN PMMVAD NextVad
+ );
+
+VOID
+MiEmptyAllWorkingSets (
+ VOID
+ );
+
+
+VOID
+MiFlushAllPages (
+ VOID
+ );
+
+
+//
+// hack stuff for testing.
+//
+
+VOID
+MiDumpValidAddresses (
+ VOID
+ );
+
+VOID
+MiDumpPfn ( VOID );
+
+VOID
+MiDumpWsl ( VOID );
+
+
+VOID
+MiFormatPte (
+ IN PMMPTE PointerPte
+ );
+
+VOID
+MiCheckPfn ( VOID );
+
+VOID
+MiCheckPte ( VOID );
+
+VOID
+MiFormatPfn (
+ IN PMMPFN PointerPfn
+ );
+
+
+
+
+extern MMPTE ZeroPte;
+
+extern MMPTE ZeroKernelPte;
+
+extern MMPTE ValidKernelPte;
+
+extern MMPTE ValidKernelPde;
+
+extern MMPTE ValidUserPte;
+
+extern MMPTE ValidPtePte;
+
+extern MMPTE ValidPdePde;
+
+extern MMPTE DemandZeroPde;
+
+extern MMPTE DemandZeroPte;
+
+extern MMPTE KernelPrototypePte;
+
+extern MMPTE TransitionPde;
+
+extern MMPTE PrototypePte;
+
+extern MMPTE NoAccessPte;
+
+extern ULONG MmSubsectionBase;
+
+extern ULONG MmSubsectionTopPage;
+
+// extern MMPTE UserNoCommitPte;
+
+//
+// Virtual alignment for PTEs (machine specific) minimum value is
+// 4k maximum value is 64k. The maximum value can be raised by
+// changing the MM_PROTO_PTE_ALIGMENT constant and adding more
+// reserved mapping PTEs in hyperspace.
+//
+
+//
+// Total number of physical pages on the system.
+//
+
+extern ULONG MmNumberOfPhysicalPages;
+
+//
+// Lowest physical page number on the system.
+//
+
+extern ULONG MmLowestPhysicalPage;
+
+//
+// Higest physical page number on the system.
+//
+
+extern ULONG MmHighestPhysicalPage;
+
+//
+// Total number of available pages on the system. This
+// is the sum of the pages on the zeroed, free and standby lists.
+//
+
+extern ULONG MmAvailablePages;
+
+//
+// Total number of free pages to base working set trimming on.
+//
+
+extern ULONG MmMoreThanEnoughFreePages;
+
+//
+// System wide count of the number of pages faults.
+//
+
+//extern ULONG MmPageFaultCount;
+
+//
+// Total number phyisical pages which would be usable if every process
+// was at it's minimum working set size. This value is initialized
+// at system initialization to MmAvailablePages - MM_FLUID_PHYSICAL_PAGES.
+// Everytime a thread is created, the kernel stack is subtracted from
+// this and every time a process is created, the minimim working set
+// is subtracted from this. If the value would become negative, the
+// operation (create process/kernel stack/ adjust working set) fails.
+// The PFN LOCK must be owned to manipulate this value.
+//
+
+extern LONG MmResidentAvailablePages;
+
+//
+// The total number of pages which would be removed from working sets
+// if every working set was at its minimum.
+//
+
+extern ULONG MmPagesAboveWsMinimum;
+
+//
+// The total number of pages which would be removed from working sets
+// if every working set above its maximum was at its maximum.
+//
+
+extern ULONG MmPagesAboveWsMaximum;
+
+//
+// If memory is becoming short and MmPagesAboveWsMinimum is
+// greater than MmPagesAboveWsThreshold, trim working sets.
+//
+
+extern ULONG MmPagesAboveWsThreshold;
+
+//
+// The number of pages to add to a working set if there are ample
+// available pages and the working set is below its maximum.
+//
+
+
+extern ULONG MmWorkingSetSizeIncrement;
+
+//
+// The number of pages to extend the maximum working set size by
+// if the working set at its maximum and there are ample available pages.
+
+extern ULONG MmWorkingSetSizeExpansion;
+
+//
+// The number of pages required to be freed by working set reduction
+// before working set reduction is attempted.
+//
+
+extern ULONG MmWsAdjustThreshold;
+
+//
+// The number of pages available to allow the working set to be
+// expanded above its maximum.
+//
+
+extern ULONG MmWsExpandThreshold;
+
+//
+// The total number of pages to reduce by working set trimming.
+//
+
+extern ULONG MmWsTrimReductionGoal;
+
+extern PMMPFN MmPfnDatabase;
+
+extern MMPFNLIST MmZeroedPageListHead;
+
+extern MMPFNLIST MmFreePageListHead;
+
+extern MMPFNLIST MmStandbyPageListHead;
+
+extern MMPFNLIST MmStandbyPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS];
+
+extern MMPFNLIST MmModifiedPageListHead;
+
+extern MMPFNLIST MmModifiedNoWritePageListHead;
+
+extern MMPFNLIST MmBadPageListHead;
+
+extern PMMPFNLIST MmPageLocationList[NUMBER_OF_PAGE_LISTS];
+
+extern MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS];
+
+extern ULONG MmModNoWriteInsert;
+
+//
+// Event for available pages, set means pages are available.
+//
+
+extern KEVENT MmAvailablePagesEvent;
+
+extern KEVENT MmAvailablePagesEventHigh;
+
+//
+// Event for the zeroing page thread.
+//
+
+extern KEVENT MmZeroingPageEvent;
+
+//
+// Boolean to indicate if the zeroing page thread is currently
+// active. This is set to true when the zeroing page event is
+// set and set to false when the zeroing page thread is done
+// zeroing all the pages on the free list.
+//
+
+extern BOOLEAN MmZeroingPageThreadActive;
+
+//
+// Minimum number of free pages before zeroing page thread starts.
+//
+
+extern ULONG MmMinimumFreePagesToZero;
+
+//
+// Global event to synchronize mapped writing with cleaning segments.
+//
+
+extern KEVENT MmMappedFileIoComplete;
+
+//
+// Hyper space items.
+//
+
+extern PMMPTE MmFirstReservedMappingPte;
+
+extern PMMPTE MmLastReservedMappingPte;
+
+//
+// System space sizes - MmNonPagedSystemStart to MM_NON_PAGED_SYSTEM_END
+// defines the ranges of PDEs which must be copied into a new process's
+// address space.
+//
+
+extern PVOID MmNonPagedSystemStart;
+
+extern PCHAR MmSystemSpaceViewStart;
+
+//
+// Pool sizes.
+//
+
+extern ULONG MmSizeOfNonPagedPoolInBytes;
+
+extern ULONG MmMinimumNonPagedPoolSize;
+
+extern ULONG MmDefaultMaximumNonPagedPool;
+
+extern ULONG MmMinAdditionNonPagedPoolPerMb;
+
+extern ULONG MmMaxAdditionNonPagedPoolPerMb;
+
+extern ULONG MmSizeOfPagedPoolInBytes;
+
+extern ULONG MmMaximumNonPagedPoolInBytes;
+
+extern ULONG MmSizeOfNonPagedMustSucceed;
+
+extern PVOID MmNonPagedPoolExpansionStart;
+
+extern ULONG MmExpandedPoolBitPosition;
+
+extern ULONG MmNumberOfFreeNonPagedPool;
+
+extern ULONG MmMustSucceedPoolBitPosition;
+
+extern ULONG MmNumberOfSystemPtes;
+
+extern ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
+
+extern ULONG MmLockLimitInBytes;
+
+extern ULONG MmLockPagesLimit;
+
+extern PMMPTE MmFirstPteForPagedPool;
+
+extern PMMPTE MmLastPteForPagedPool;
+
+extern PMMPTE MmSystemPagePtes;
+
+extern ULONG MmSystemPageDirectory;
+
+extern PMMPTE MmPagedPoolBasePde;
+
+extern LIST_ENTRY MmNonPagedPoolFreeListHead;
+
+//
+// Counter for flushes of the entire TB.
+//
+
+extern MMPTE MmFlushCounter;
+
+//
+// Pool start and end.
+//
+
+extern PVOID MmNonPagedPoolStart;
+
+extern PVOID MmNonPagedPoolEnd;
+
+extern PVOID MmPagedPoolStart;
+
+extern PVOID MmPagedPoolEnd;
+
+extern PVOID MmNonPagedMustSucceed;
+
+//
+// Pool bit maps and other related structures.
+//
+
+extern PRTL_BITMAP MmPagedPoolAllocationMap;
+
+extern PRTL_BITMAP MmEndOfPagedPoolBitmap;
+
+extern PVOID MmPageAlignedPoolBase[2];
+
+//
+// MmFirstFreeSystemPte contains the offset from the
+// Nonpaged system base to the first free system PTE.
+// Note, that an offset of zero indicates an empty list.
+//
+
+extern MMPTE MmFirstFreeSystemPte[MaximumPtePoolTypes];
+
+extern PMMPTE MmNextPteForPagedPoolExpansion;
+
+//
+// System cache sizes.
+//
+
+//extern MMSUPPORT MmSystemCacheWs;
+
+extern PMMWSL MmSystemCacheWorkingSetList;
+
+extern PMMWSLE MmSystemCacheWsle;
+
+extern PVOID MmSystemCacheStart;
+
+extern PVOID MmSystemCacheEnd;
+
+extern PRTL_BITMAP MmSystemCacheAllocationMap;
+
+extern PRTL_BITMAP MmSystemCacheEndingMap;
+
+extern ULONG MmSystemCacheBitMapHint;
+
+extern ULONG MmSizeOfSystemCacheInPages;
+
+extern ULONG MmSystemCacheWsMinimum;
+
+extern ULONG MmSystemCacheWsMaximum;
+
+//
+// Virtual alignment for PTEs (machine specific) minimum value is
+// 0 (no alignment) maximum value is 64k. The maximum value can be raised by
+// changing the MM_PROTO_PTE_ALIGMENT constant and adding more
+// reserved mapping PTEs in hyperspace.
+//
+
+extern ULONG MmAliasAlignment;
+
+//
+// Mask to AND with virtual address to get an offset to go
+// with the alignment. This value is page aligned.
+//
+
+extern ULONG MmAliasAlignmentOffset;
+
+//
+// Mask to and with PTEs to determine if the alias mapping is compatable.
+// This value is usually (MmAliasAlignment - 1)
+//
+
+extern ULONG MmAliasAlignmentMask;
+
+//
+// Cells to track unused thread kernel stacks to avoid TB flushes
+// every time a thread terminates.
+//
+
+extern ULONG MmNumberDeadKernelStacks;
+extern ULONG MmMaximumDeadKernelStacks;
+extern PMMPFN MmFirstDeadKernelStack;
+
+//
+// MmSystemPteBase contains the address of 1 PTE before
+// the first free system PTE (zero indicates an empty list).
+// The value of this field does not change once set.
+//
+
+extern PMMPTE MmSystemPteBase;
+
+extern PMMWSL MmWorkingSetList;
+
+extern PMMWSLE MmWsle;
+
+//
+// Root of system space virtual address descriptors. These define
+// the pageable portion of the system.
+//
+
+extern PMMVAD MmVirtualAddressDescriptorRoot;
+
+extern PMMADDRESS_NODE MmSectionBasedRoot;
+
+extern PVOID MmHighSectionBase;
+
+//
+// Section commit mutex.
+//
+
+extern FAST_MUTEX MmSectionCommitMutex;
+
+//
+// Section base address mutex.
+//
+
+extern FAST_MUTEX MmSectionBasedMutex;
+
+//
+// Resource for section extension.
+//
+
+extern ERESOURCE MmSectionExtendResource;
+extern ERESOURCE MmSectionExtendSetResource;
+
+//
+// Event to sychronize threads within process mapping images via hyperspace.
+//
+
+extern KEVENT MmImageMappingPteEvent;
+
+//
+// Inpage cluster sizes for executable pages (set based on memory size).
+//
+
+extern ULONG MmDataClusterSize;
+
+extern ULONG MmCodeClusterSize;
+
+//
+// Pagefile creation mutex.
+//
+
+extern FAST_MUTEX MmPageFileCreationLock;
+
+//
+// Event to set when first paging file is created.
+//
+
+extern PKEVENT MmPagingFileCreated;
+
+//
+// Spinlock which guards PFN database. This spinlock is used by
+// memory mangement for accessing the PFN database. The I/O
+// system makes use of it for unlocking pages during I/O complete.
+//
+
+extern KSPIN_LOCK MmPfnLock;
+
+//
+// Spinlock which guards the working set list for the system shared
+// address space (paged pool, system cache, pagable drivers).
+//
+
+extern ERESOURCE MmSystemWsLock;
+
+//
+// Spin lock for allocating non-paged PTEs from system space.
+//
+
+extern KSPIN_LOCK MmSystemSpaceLock;
+
+//
+// Spin lock for operating on page file commit charges.
+//
+
+extern KSPIN_LOCK MmChargeCommitmentLock;
+
+//
+// Spin lock for allowing working set expansion.
+//
+
+extern KSPIN_LOCK MmExpansionLock;
+
+//
+// To prevent optimizations.
+//
+
+extern MMPTE GlobalPte;
+
+//
+// Page color for system working set.
+//
+
+extern ULONG MmSystemPageColor;
+
+extern ULONG MmSecondaryColors;
+
+extern ULONG MmProcessColorSeed;
+
+//
+// Set from ntos\config\CMDAT3.C Used by customers to disable paging
+// of executive on machines with lots of memory. Worth a few TPS on a
+// data base server.
+//
+
+extern ULONG MmDisablePagingExecutive;
+
+
+//
+// For debugging.
+
+
+#if DBG
+extern ULONG MmDebug;
+#endif
+
+//
+// List heads
+//
+
+extern MMDEREFERENCE_SEGMENT_HEADER MmDereferenceSegmentHeader;
+
+extern LIST_ENTRY MmUnusedSegmentList;
+
+extern ULONG MmUnusedSegmentCount;
+
+extern KEVENT MmUnusedSegmentCleanup;
+
+extern ULONG MmUnusedSegmentCountMaximum;
+
+extern ULONG MmUnusedSegmentCountGoal;
+
+extern MMWORKING_SET_EXPANSION_HEAD MmWorkingSetExpansionHead;
+
+extern MMPAGE_FILE_EXPANSION MmAttemptForCantExtend;
+
+//
+// Paging files
+//
+
+extern MMMOD_WRITER_LISTHEAD MmPagingFileHeader;
+
+extern MMMOD_WRITER_LISTHEAD MmMappedFileHeader;
+
+extern PMMPAGING_FILE MmPagingFile[MAX_PAGE_FILES];
+
+#define MM_MAPPED_FILE_MDLS 4
+
+
+extern PMMMOD_WRITER_MDL_ENTRY MmMappedFileMdl[MM_MAPPED_FILE_MDLS];
+
+extern LIST_ENTRY MmFreePagingSpaceLow;
+
+extern ULONG MmNumberOfActiveMdlEntries;
+
+extern ULONG MmNumberOfPagingFiles;
+
+extern KEVENT MmModifiedPageWriterEvent;
+
+extern KEVENT MmCollidedFlushEvent;
+
+extern KEVENT MmCollidedLockEvent;
+
+//
+// Total number of committed pages.
+//
+
+extern ULONG MmTotalCommittedPages;
+
+extern ULONG MmTotalCommitLimit;
+
+extern ULONG MmOverCommit;
+
+//
+// Modified page writer.
+//
+
+extern ULONG MmMinimumFreePages;
+
+extern ULONG MmFreeGoal;
+
+extern ULONG MmModifiedPageMaximum;
+
+extern ULONG MmModifiedPageMinimum;
+
+extern ULONG MmModifiedWriteClusterSize;
+
+extern ULONG MmMinimumFreeDiskSpace;
+
+extern ULONG MmPageFileExtension;
+
+extern ULONG MmMinimumPageFileReduction;
+
+//
+// System process working set sizes.
+//
+
+extern ULONG MmSystemProcessWorkingSetMin;
+
+extern ULONG MmSystemProcessWorkingSetMax;
+
+extern ULONG MmMinimumWorkingSetSize;
+
+//
+// Support for debugger's mapping phyiscal memory.
+//
+
+extern PMMPTE MmDebugPte;
+
+extern PMMPTE MmCrashDumpPte;
+
+extern ULONG MiOverCommitCallCount;
+
+#if DBG
+
+extern PRTL_EVENT_ID_INFO MiAllocVmEventId;
+extern PRTL_EVENT_ID_INFO MiFreeVmEventId;
+
+#endif // DBG
+
+#endif // MI
diff --git a/private/ntos/mm/miglobal.c b/private/ntos/mm/miglobal.c
new file mode 100644
index 000000000..e88e51f4a
--- /dev/null
+++ b/private/ntos/mm/miglobal.c
@@ -0,0 +1,795 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ miglobal.c
+
+Abstract:
+
+ This module contains the private global storage for the memory
+ management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1989
+
+Revision History:
+
+--*/
+#include "mi.h"
+
+//
+// Number of colors for pages in the system.
+//
+
+ULONG MmNumberOfColors;
+
+//
+// Number of secondary colcors, based on level 2 d cache size.
+//
+
+ULONG MmSecondaryColors;
+
+//
+// The starting color index seed, incrmented at each process creation.
+//
+
+ULONG MmProcessColorSeed = 0x12345678;
+
+//
+// Total number of physical pages available on the system.
+//
+
+ULONG MmNumberOfPhysicalPages;
+
+//
+// Lowest physical page number on the system.
+//
+
+ULONG MmLowestPhysicalPage = 0xFFFFFFFF;
+
+//
+// Higest physical page number on the system.
+//
+
+ULONG MmHighestPhysicalPage;
+
+//
+// Total number of available pages on the system. This
+// is the sum of the pages on the zeroed, free and standby lists.
+//
+
+ULONG MmAvailablePages ;
+ULONG MmThrottleTop;
+ULONG MmThrottleBottom;
+
+//
+// System wide memory management statistics block.
+//
+
+MMINFO_COUNTERS MmInfoCounters;
+
+//
+// Total number phyisical pages which would be usable if every process
+// was at it's minimum working set size. This value is initialized
+// at system initialization to MmAvailablePages - MM_FLUID_PHYSICAL_PAGES.
+// Everytime a thread is created, the kernel stack is subtracted from
+// this and every time a process is created, the minimim working set
+// is subtracted from this. If the value would become negative, the
+// operation (create process/kernel stack/ adjust working set) fails.
+// The PFN LOCK must be owned to manipulate this value.
+//
+
+LONG MmResidentAvailablePages;
+
+//
+// The total number of pages which would be removed from working sets
+// if every working set was at its minimum.
+//
+
+ULONG MmPagesAboveWsMinimum;
+
+//
+// The total number of pages which would be removed from working sets
+// if every working set above its maximum was at its maximum.
+//
+
+ULONG MmPagesAboveWsMaximum;
+
+//
+// The number of pages to add to a working set if there are ample
+// available pages and the working set is below its maximum.
+//
+
+//
+// If memory is becoming short and MmPagesAboveWsMinimum is
+// greater than MmPagesAboveWsThreshold, trim working sets.
+//
+
+ULONG MmPagesAboveWsThreshold = 37;
+
+ULONG MmWorkingSetSizeIncrement = 6;
+
+//
+// The number of pages to extend the maximum working set size by
+// if the working set at its maximum and there are ample available pages.
+
+ULONG MmWorkingSetSizeExpansion = 20;
+
+//
+// The number of pages required to be freed by working set reduction
+// before working set reduction is attempted.
+//
+
+ULONG MmWsAdjustThreshold = 45;
+
+//
+// The number of pages available to allow the working set to be
+// expanded above its maximum.
+//
+
+ULONG MmWsExpandThreshold = 90;
+
+//
+// The total number of pages to reduce by working set trimming.
+//
+
+ULONG MmWsTrimReductionGoal = 29;
+
+PMMPFN MmPfnDatabase;
+
+MMPFNLIST MmZeroedPageListHead = {
+ 0, // Total
+ ZeroedPageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+MMPFNLIST MmFreePageListHead = {
+ 0, // Total
+ FreePageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+MMPFNLIST MmStandbyPageListHead = {
+ 0, // Total
+ StandbyPageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+MMPFNLIST MmModifiedPageListHead = {
+ 0, // Total
+ ModifiedPageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+MMPFNLIST MmModifiedNoWritePageListHead = {
+ 0, // Total
+ ModifiedNoWritePageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+MMPFNLIST MmBadPageListHead = {
+ 0, // Total
+ BadPageList, // ListName
+ MM_EMPTY_LIST, //Flink
+ MM_EMPTY_LIST // Blink
+ };
+
+PMMPFNLIST MmPageLocationList[NUMBER_OF_PAGE_LISTS] = {
+ &MmZeroedPageListHead,
+ &MmFreePageListHead,
+ &MmStandbyPageListHead,
+ &MmModifiedPageListHead,
+ &MmModifiedNoWritePageListHead,
+ &MmBadPageListHead,
+ NULL,
+ NULL };
+
+
+// PMMPFNLIST MmPageLocationList[FreePageList] = &MmFreePageListHead;
+//
+// PMMPFNLIST MmPageLocationList[ZeroedPageList] = &MmZeroedPageListHead;
+//
+// PMMPFNLIST MmPageLocationList[StandbyPageList] = &MmStandbyPageListHead;
+//
+// PMMPFNLIST MmPageLocationList[ModifiedPageList] = &MmModifiedPageListHead;
+//
+// PMMPFNLIST MmPageLocationList[ModifiedNoWritePageList] = &MmModifiedNoWritePageListHead;
+//
+// PMMPFNLIST MmPageLocationList[BadPageList] = &MmBadPageListHead;
+//
+// PMMPFNLIST MmPageLocationList[ActiveAndValid] = NULL;
+//
+// PMMPFNLIST MmPageLocationList[TransitionPage] = NULL;
+
+//
+// Hyper space items.
+//
+
+PMMPTE MmFirstReservedMappingPte;
+
+PMMPTE MmLastReservedMappingPte;
+
+PMMWSL MmWorkingSetList;
+
+PMMWSLE MmWsle;
+
+//
+// Event for available pages, set means pages are available.
+//
+
+KEVENT MmAvailablePagesEvent;
+
+//
+// Event for the zeroing page thread.
+//
+
+KEVENT MmZeroingPageEvent;
+
+//
+// Boolean to indicate if the zeroing page thread is currently
+// active. This is set to true when the zeroing page event is
+// set and set to false when the zeroing page thread is done
+// zeroing all the pages on the free list.
+//
+
+BOOLEAN MmZeroingPageThreadActive;
+
+//
+// Minimum number of free pages before zeroing page thread starts.
+//
+
+ULONG MmMinimumFreePagesToZero = 8;
+
+//
+// System space sizes - MmNonPagedSystemStart to MM_NON_PAGED_SYSTEM_END
+// defines the ranges of PDEs which must be copied into a new process's
+// address space.
+//
+
+PVOID MmNonPagedSystemStart;
+
+//
+// Pool sizes.
+//
+
+ULONG MmSizeOfNonPagedPoolInBytes;
+
+ULONG MmMaximumNonPagedPoolInBytes;
+
+ULONG MmMinimumNonPagedPoolSize = 256 * 1024; // 256k
+
+ULONG MmMinAdditionNonPagedPoolPerMb = 32 * 1024; // 32k
+
+ULONG MmDefaultMaximumNonPagedPool = 1024 * 1024; // 1mb
+
+ULONG MmMaxAdditionNonPagedPoolPerMb = 400 * 1024; //400k
+
+ULONG MmSizeOfPagedPoolInBytes = 32 * 1024 * 1024; // 32 MB.
+
+ULONG MmSizeOfNonPagedMustSucceed = 4 * PAGE_SIZE; // 4 pages
+
+ULONG MmNumberOfSystemPtes;
+
+ULONG MmLockLimitInBytes = 512 * 1024;
+
+ULONG MmLockPagesLimit;
+
+PMMPTE MmFirstPteForPagedPool;
+
+PMMPTE MmLastPteForPagedPool;
+
+PMMPTE MmPagedPoolBasePde;
+
+//
+// Pool bit maps and other related structures.
+//
+
+PRTL_BITMAP MmPagedPoolAllocationMap;
+
+PRTL_BITMAP MmEndOfPagedPoolBitmap;
+
+PVOID MmPageAlignedPoolBase[2];
+
+PVOID MmNonPagedMustSucceed;
+
+ULONG MmExpandedPoolBitPosition;
+
+ULONG MmNumberOfFreeNonPagedPool;
+
+ULONG MmMustSucceedPoolBitPosition;
+
+//
+// MmFirstFreeSystemPte contains the offset from the
+// Nonpaged system base to the first free system PTE.
+// Note, that an offset of FFFFF indicates an empty list.
+//
+
+MMPTE MmFirstFreeSystemPte[MaximumPtePoolTypes];
+
+PMMPTE MmNextPteForPagedPoolExpansion;
+
+//
+// System cache sizes.
+//
+
+PMMWSL MmSystemCacheWorkingSetList = (PMMWSL)MM_SYSTEM_CACHE_WORKING_SET;
+
+MMSUPPORT MmSystemCacheWs;
+
+PMMWSLE MmSystemCacheWsle;
+
+PVOID MmSystemCacheStart = (PVOID)MM_SYSTEM_CACHE_START;
+
+PVOID MmSystemCacheEnd;
+
+PRTL_BITMAP MmSystemCacheAllocationMap;
+
+PRTL_BITMAP MmSystemCacheEndingMap;
+
+ULONG MmSystemCacheBitMapHint;
+
+//
+// This value should not be greater than 256MB in a system with 1GB of
+// system space.
+//
+
+ULONG MmSizeOfSystemCacheInPages = 64 * 256; //64MB.
+
+//
+// Default sizes for the system cache.
+//
+
+ULONG MmSystemCacheWsMinimum = 288;
+
+ULONG MmSystemCacheWsMaximum = 350;
+
+//
+// Cells to track unused thread kernel stacks to avoid TB flushes
+// every time a thread terminates.
+//
+
+ULONG MmNumberDeadKernelStacks;
+ULONG MmMaximumDeadKernelStacks = 5;
+PMMPFN MmFirstDeadKernelStack = (PMMPFN)NULL;
+
+//
+// MmSystemPteBase contains the address of 1 PTE before
+// the first free system PTE (zero indicates an empty list).
+// The value of this field does not change once set.
+//
+
+PMMPTE MmSystemPteBase;
+
+PMMWSL MmWorkingSetList;
+
+PMMWSLE MmWsle;
+
+PMMADDRESS_NODE MmSectionBasedRoot;
+
+PVOID MmHighSectionBase = (PVOID)((ULONG)MM_HIGHEST_USER_ADDRESS - 0x800000);
+
+//
+// Section object type.
+//
+
+POBJECT_TYPE MmSectionObjectType;
+
+//
+// Section commit mutex.
+//
+
+FAST_MUTEX MmSectionCommitMutex;
+
+//
+// Section base address mutex.
+//
+
+FAST_MUTEX MmSectionBasedMutex;
+
+//
+// Resource for section extension.
+//
+
+ERESOURCE MmSectionExtendResource;
+ERESOURCE MmSectionExtendSetResource;
+
+//
+// Pagefile creation lock.
+//
+
+FAST_MUTEX MmPageFileCreationLock;
+
+//
+// Event to set when first paging file is created.
+//
+
+PKEVENT MmPagingFileCreated;
+
+MMPTE GlobalPte;
+
+MMDEREFERENCE_SEGMENT_HEADER MmDereferenceSegmentHeader;
+
+LIST_ENTRY MmUnusedSegmentList;
+
+KEVENT MmUnusedSegmentCleanup;
+
+ULONG MmUnusedSegmentCount;
+
+//
+// The maximum number of unused segments to accumulate before reduction
+// begins.
+//
+
+ULONG MmUnusedSegmentCountMaximum = 1000;
+
+//
+// The number of unused segments to have when reduction is complete.
+//
+
+ULONG MmUnusedSegmentCountGoal = 800;
+
+MMWORKING_SET_EXPANSION_HEAD MmWorkingSetExpansionHead;
+
+MMPAGE_FILE_EXPANSION MmAttemptForCantExtend;
+
+//
+// Paging files
+//
+
+MMMOD_WRITER_LISTHEAD MmPagingFileHeader;
+
+MMMOD_WRITER_LISTHEAD MmMappedFileHeader;
+
+PMMMOD_WRITER_MDL_ENTRY MmMappedFileMdl[MM_MAPPED_FILE_MDLS]; ;
+
+LIST_ENTRY MmFreePagingSpaceLow;
+
+ULONG MmNumberOfActiveMdlEntries;
+
+PMMPAGING_FILE MmPagingFile[MAX_PAGE_FILES];
+
+ULONG MmNumberOfPagingFiles;
+
+KEVENT MmModifiedPageWriterEvent;
+
+KEVENT MmWorkingSetManagerEvent;
+
+KEVENT MmCollidedFlushEvent;
+
+//
+// Total number of committed pages.
+//
+
+ULONG MmTotalCommittedPages;
+
+//
+// Limit on committed pages. When MmTotalComitttedPages would become
+// greater than or equal to this number the paging files must be expanded.
+//
+
+ULONG MmTotalCommitLimit;
+
+//
+// Number of pages to overcommit without expanding the paging file.
+// MmTotalCommitLimit = (total paging file space) + MmOverCommit.
+//
+
+ULONG MmOverCommit;
+
+//
+// Modified page writer.
+//
+
+
+//
+// Minimum number of free pages before working set triming and
+// aggressive modified page writing is started.
+//
+
+ULONG MmMinimumFreePages = 26;
+
+//
+// Stop writing modified pages when MmFreeGoal pages exist.
+//
+
+ULONG MmFreeGoal = 100;
+
+//
+// Start writing pages if more than this number of pages
+// is on the modified page list.
+//
+
+ULONG MmModifiedPageMaximum;
+
+//
+// Minimum number of modified pages required before the modified
+// page writer is started.
+//
+
+ULONG MmModifiedPageMinimum;
+
+//
+// Amount of disk space that must be free after the paging file is
+// extended.
+//
+
+ULONG MmMinimumFreeDiskSpace = 1024 * 1024;
+
+//
+// Size to extend the paging file by.
+//
+
+ULONG MmPageFileExtension = 128; //128 pages
+
+//
+// Size to reduce the paging file by.
+//
+
+ULONG MmMinimumPageFileReduction = 256; //256 pages (1mb)
+
+//
+// Number of pages to write in a single I/O.
+//
+
+ULONG MmModifiedWriteClusterSize = MM_MAXIMUM_WRITE_CLUSTER;
+
+//
+// Number of pages to read in a single I/O if possible.
+//
+
+ULONG MmReadClusterSize = 7;
+
+//
+// Spin locks.
+//
+
+//
+// Spinlock which guards PFN database. This spinlock is used by
+// memory mangement for accessing the PFN database. The I/O
+// system makes use of it for unlocking pages during I/O complete.
+//
+
+// KSPIN_LOCK MmPfnLock;
+
+//
+// Spinlock which guards the working set list for the system shared
+// address space (paged pool, system cache, pagable drivers).
+//
+
+ERESOURCE MmSystemWsLock;
+
+PETHREAD MmSystemLockOwner;
+
+//
+// Spin lock for allocating non-paged PTEs from system space.
+//
+
+// KSPIN_LOCK MmSystemSpaceLock;
+
+//
+// Spin lock for operating on page file commit charges.
+//
+
+// KSPIN_LOCK MmChargeCommitmentLock;
+
+//
+// Spin lock for allowing working set expansion.
+//
+
+KSPIN_LOCK MmExpansionLock;
+
+//
+// Spin lock for protecting hyper space access.
+//
+
+//
+// System process working set sizes.
+//
+
+ULONG MmSystemProcessWorkingSetMin = 50;
+
+ULONG MmSystemProcessWorkingSetMax = 450;
+
+ULONG MmMaximumWorkingSetSize;
+
+ULONG MmMinimumWorkingSetSize = 20;
+
+
+//
+// Page color for system working set.
+//
+
+ULONG MmSystemPageColor;
+
+//
+// Time constants
+//
+
+LARGE_INTEGER MmSevenMinutes = {0, -1};
+
+//
+// note that the following constant is initialized to five seconds,
+// but is set to 3 on very small workstations. The constant used to
+// be called MmFiveSecondsAbsolute, but since its value changes depending on
+// the system type and size, I decided to change the name to reflect this
+//
+LARGE_INTEGER MmWorkingSetProtectionTime = {5 * 1000 * 1000 * 10, 0};
+
+LARGE_INTEGER MmOneSecond = {(ULONG)(-1 * 1000 * 1000 * 10), -1};
+LARGE_INTEGER MmTwentySeconds = {(ULONG)(-20 * 1000 * 1000 * 10), -1};
+LARGE_INTEGER MmShortTime = {(ULONG)(-10 * 1000 * 10), -1}; // 10 milliseconds
+LARGE_INTEGER MmHalfSecond = {(ULONG)(-5 * 100 * 1000 * 10), -1};
+LARGE_INTEGER Mm30Milliseconds = {(ULONG)(-30 * 1000 * 10), -1};
+
+//
+// Parameters for user mode passed up via PEB in MmCreatePeb
+//
+ULONG MmCritsectTimeoutSeconds = 2592000;
+LARGE_INTEGER MmCriticalSectionTimeout; // Fill in by miinit.c
+ULONG MmHeapSegmentReserve = 1024 * 1024;
+ULONG MmHeapSegmentCommit = PAGE_SIZE * 2;
+ULONG MmHeapDeCommitTotalFreeThreshold = 64 * 1024;
+ULONG MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+
+//
+// Set from ntos\config\CMDAT3.C Used by customers to disable paging
+// of executive on machines with lots of memory. Worth a few TPS on a
+// data base server.
+//
+
+ULONG MmDisablePagingExecutive;
+
+#if DBG
+ULONG MmDebug;
+#endif
+
+//
+// Map a page protection from the Pte.Protect field into a protection mask.
+//
+
+ULONG MmProtectToValue[32] = {
+ PAGE_NOACCESS,
+ PAGE_READONLY,
+ PAGE_EXECUTE,
+ PAGE_EXECUTE_READ,
+ PAGE_READWRITE,
+ PAGE_WRITECOPY,
+ PAGE_EXECUTE_READWRITE,
+ PAGE_EXECUTE_WRITECOPY,
+ PAGE_NOACCESS,
+ PAGE_NOCACHE | PAGE_READONLY,
+ PAGE_NOCACHE | PAGE_EXECUTE,
+ PAGE_NOCACHE | PAGE_EXECUTE_READ,
+ PAGE_NOCACHE | PAGE_READWRITE,
+ PAGE_NOCACHE | PAGE_WRITECOPY,
+ PAGE_NOCACHE | PAGE_EXECUTE_READWRITE,
+ PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY,
+ PAGE_NOACCESS,
+ PAGE_GUARD | PAGE_READONLY,
+ PAGE_GUARD | PAGE_EXECUTE,
+ PAGE_GUARD | PAGE_EXECUTE_READ,
+ PAGE_GUARD | PAGE_READWRITE,
+ PAGE_GUARD | PAGE_WRITECOPY,
+ PAGE_GUARD | PAGE_EXECUTE_READWRITE,
+ PAGE_GUARD | PAGE_EXECUTE_WRITECOPY,
+ PAGE_NOACCESS,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_READONLY,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_EXECUTE,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_EXECUTE_READ,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_READWRITE,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_WRITECOPY,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_EXECUTE_READWRITE,
+ PAGE_NOCACHE | PAGE_GUARD | PAGE_EXECUTE_WRITECOPY
+ };
+
+ULONG MmProtectToPteMask[32] = {
+ MM_PTE_NOACCESS,
+ MM_PTE_READONLY | MM_PTE_CACHE,
+ MM_PTE_EXECUTE | MM_PTE_CACHE,
+ MM_PTE_EXECUTE_READ | MM_PTE_CACHE,
+ MM_PTE_READWRITE | MM_PTE_CACHE,
+ MM_PTE_WRITECOPY | MM_PTE_CACHE,
+ MM_PTE_EXECUTE_READWRITE | MM_PTE_CACHE,
+ MM_PTE_EXECUTE_WRITECOPY | MM_PTE_CACHE,
+ MM_PTE_NOACCESS,
+ MM_PTE_NOCACHE | MM_PTE_READONLY,
+ MM_PTE_NOCACHE | MM_PTE_EXECUTE,
+ MM_PTE_NOCACHE | MM_PTE_EXECUTE_READ,
+ MM_PTE_NOCACHE | MM_PTE_READWRITE,
+ MM_PTE_NOCACHE | MM_PTE_WRITECOPY,
+ MM_PTE_NOCACHE | MM_PTE_EXECUTE_READWRITE,
+ MM_PTE_NOCACHE | MM_PTE_EXECUTE_WRITECOPY,
+ MM_PTE_NOACCESS,
+ MM_PTE_GUARD | MM_PTE_READONLY | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_EXECUTE | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_EXECUTE_READ | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_READWRITE | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_WRITECOPY | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_EXECUTE_READWRITE | MM_PTE_CACHE,
+ MM_PTE_GUARD | MM_PTE_EXECUTE_WRITECOPY | MM_PTE_CACHE,
+ MM_PTE_NOACCESS,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_READONLY,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_EXECUTE,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_EXECUTE_READ,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_READWRITE,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_WRITECOPY,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_EXECUTE_READWRITE,
+ MM_PTE_NOCACHE | MM_PTE_GUARD | MM_PTE_EXECUTE_WRITECOPY
+ };
+
+//
+// Conversion which takes a Pte.Protect and builds a new Pte.Protect which
+// is not copy-on-write.
+//
+
+ULONG MmMakeProtectNotWriteCopy[32] = {
+ MM_NOACCESS,
+ MM_READONLY,
+ MM_EXECUTE,
+ MM_EXECUTE_READ,
+ MM_READWRITE,
+ MM_READWRITE, //not copy
+ MM_EXECUTE_READWRITE,
+ MM_EXECUTE_READWRITE,
+ MM_NOACCESS,
+ MM_NOCACHE | MM_READONLY,
+ MM_NOCACHE | MM_EXECUTE,
+ MM_NOCACHE | MM_EXECUTE_READ,
+ MM_NOCACHE | MM_READWRITE,
+ MM_NOCACHE | MM_READWRITE,
+ MM_NOCACHE | MM_EXECUTE_READWRITE,
+ MM_NOCACHE | MM_EXECUTE_READWRITE,
+ MM_NOACCESS,
+ MM_GUARD_PAGE | MM_READONLY,
+ MM_GUARD_PAGE | MM_EXECUTE,
+ MM_GUARD_PAGE | MM_EXECUTE_READ,
+ MM_GUARD_PAGE | MM_READWRITE,
+ MM_GUARD_PAGE | MM_READWRITE,
+ MM_GUARD_PAGE | MM_EXECUTE_READWRITE,
+ MM_GUARD_PAGE | MM_EXECUTE_READWRITE,
+ MM_NOACCESS,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_READONLY,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_EXECUTE,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_EXECUTE_READ,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_READWRITE,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_READWRITE,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_EXECUTE_READWRITE,
+ MM_NOCACHE | MM_GUARD_PAGE | MM_EXECUTE_READWRITE
+ };
+
+//
+// Converts a protection code to an access right for section access.
+// This uses on the lower 3 bits of the 5 bit protection code.
+//
+
+ACCESS_MASK MmMakeSectionAccess[8] = { SECTION_MAP_READ,
+ SECTION_MAP_READ,
+ SECTION_MAP_EXECUTE,
+ SECTION_MAP_EXECUTE | SECTION_MAP_READ,
+ SECTION_MAP_WRITE,
+ SECTION_MAP_READ,
+ SECTION_MAP_EXECUTE | SECTION_MAP_WRITE,
+ SECTION_MAP_EXECUTE | SECTION_MAP_READ };
+
+//
+// Converts a protection code to an access right for file access.
+// This uses on the lower 3 bits of the 5 bit protection code.
+//
+
+ACCESS_MASK MmMakeFileAccess[8] = { FILE_READ_DATA,
+ FILE_READ_DATA,
+ FILE_EXECUTE,
+ FILE_EXECUTE | FILE_READ_DATA,
+ FILE_WRITE_DATA | FILE_READ_DATA,
+ FILE_READ_DATA,
+ FILE_EXECUTE | FILE_WRITE_DATA | FILE_READ_DATA,
+ FILE_EXECUTE | FILE_READ_DATA };
+
diff --git a/private/ntos/mm/mips/datamips.c b/private/ntos/mm/mips/datamips.c
new file mode 100644
index 000000000..1ca65a475
--- /dev/null
+++ b/private/ntos/mm/mips/datamips.c
@@ -0,0 +1,191 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ datamips.c
+
+Abstract:
+
+ This module contains the private hardware specific global storage for
+ the memory management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 27-Mar-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// A zero Pte.
+//
+
+MMPTE ZeroPte = { 0 };
+
+//
+// A kernel zero PTE.
+//
+
+#ifdef R3000
+MMPTE ZeroKernelPte = { 0 };
+#endif //R3000
+
+#ifdef R4000
+MMPTE ZeroKernelPte = { MM_PTE_GLOBAL_MASK };
+#endif //R4000
+
+#ifdef R3000
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_GLOBAL_MASK };
+#endif //R3000
+
+#ifdef R4000
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_CACHE_ENABLE_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_GLOBAL_MASK };
+#endif //R4000
+
+
+MMPTE ValidUserPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_CACHE_ENABLE_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidPtePte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_CACHE_ENABLE_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidPdePde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_CACHE_ENABLE_MASK |
+ MM_PTE_DIRTY_MASK };
+
+MMPTE ValidKernelPde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK |
+ MM_PTE_CACHE_ENABLE_MASK |
+ MM_PTE_DIRTY_MASK |
+ MM_PTE_GLOBAL_MASK };
+
+#ifdef R3000
+MMPTE DemandZeroPde = { MM_READWRITE << 4 };
+#endif //R3000
+
+#ifdef R4000
+MMPTE DemandZeroPde = { MM_READWRITE << 3 };
+#endif //R4000
+
+#ifdef R3000
+MMPTE DemandZeroPte = { MM_READWRITE << 4 };
+#endif //R3000
+
+
+#ifdef R4000
+MMPTE DemandZeroPte = { MM_READWRITE << 3 };
+#endif //R4000
+
+#ifdef R3000
+MMPTE TransitionPde = { 0x2 | (MM_READWRITE << 4) };
+#endif //R3000
+
+#ifdef R4000
+MMPTE TransitionPde = { MM_PTE_TRANSITION_MASK | (MM_READWRITE << 3) };
+#endif //R4000
+
+#ifdef R3000
+MMPTE PrototypePte = { 0xFFFFF000 | (MM_READWRITE << 4) | MM_PTE_PROTOTYPE_MASK };
+#endif //R3000
+
+#ifdef R4000
+MMPTE PrototypePte = { 0xFFFFF000 | (MM_READWRITE << 3) | MM_PTE_PROTOTYPE_MASK };
+#endif //R4000
+
+//
+// PTE which generates an access violation when referenced.
+//
+
+#ifdef R3000
+MMPTE NoAccessPte = {MM_NOACCESS << 4};
+#endif //R3000
+
+#ifdef R4000
+MMPTE NoAccessPte = {MM_NOACCESS << 3};
+#endif //R4000
+
+
+//
+// Pool start and end.
+//
+
+PVOID MmNonPagedPoolStart;
+
+PVOID MmNonPagedPoolEnd = ((PVOID)MM_NONPAGED_POOL_END);
+
+PVOID MmPagedPoolStart = (PVOID)0xE1000000;
+
+PVOID MmPagedPoolEnd;
+
+
+//
+// Color tables for free and zeroed pages.
+//
+
+MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+
+PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+MMPFNLIST MmStandbyPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST
+ };
+
+
+//
+// Color tables for modified pages destined for the paging file.
+//
+
+MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST};
+
+ULONG MmSecondaryColorMask;
+
+//
+// Count of the number of modified pages destined for the paging file.
+//
+
+ULONG MmTotalPagesForPagingFile;
+
+
+//
+// PTE reserved for mapping physical data for debugger.
+//
+
+PMMPTE MmDebugPte = (MiGetPteAddress((PVOID)MM_NONPAGED_POOL_END));
+
+//
+// 17 PTEs reserved for mapping MDLs (64k max) + 1 to ensure g-bits right.
+//
+
+PMMPTE MmCrashDumpPte = (MiGetPteAddress((PVOID)MM_NONPAGED_POOL_END));
diff --git a/private/ntos/mm/mips/debugsup.c b/private/ntos/mm/mips/debugsup.c
new file mode 100644
index 000000000..eb9ee3be3
--- /dev/null
+++ b/private/ntos/mm/mips/debugsup.c
@@ -0,0 +1,207 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ debugsup.c
+
+Abstract:
+
+ This module contains routines which provide support for the
+ kernel debugger.
+
+Author:
+
+ Lou Perazzoli (loup) 02-Aug-90
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PVOID
+MmDbgReadCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ MIPS implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for read access.
+
+ If the address is valid and readable and not within KSEG0 or KSEG1
+ the physical address within KSEG0 is returned. If the adddress
+ is within KSEG0 or KSEG1 then the called address is returned.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the physical address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ if (KiProbeEntryTb(VirtualAddress)) {
+ return VirtualAddress;
+ }
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgWriteCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ MIPS implementation specific:
+
+ This routine returns the phyiscal address for a virtual address
+ which is valid (mapped) for write access.
+
+ If the address is valid and writable and not within KSEG0 or KSEG1
+ the physical address within KSEG0 is returned. If the adddress
+ is within KSEG0 or KSEG1 then the called address is returned.
+
+ NOTE: The physical address is must only be used while the interrupt
+ level on ALL processors is above DISPATCH_LEVEL, otherwise the
+ binding between the virtual address and the physical address can
+ change due to paging.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the physical address of the corresponding virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+
+ //
+ // need to check write
+ //
+
+ if (KiProbeEntryTb(VirtualAddress)) {
+ return VirtualAddress;
+ }
+ return NULL;
+ }
+
+ PointerPte = MiGetPteAddress (VirtualAddress);
+
+ if ((ULONG) VirtualAddress < KSEG0_BASE && PointerPte->u.Hard.Dirty == 0) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+
+PVOID
+MmDbgTranslatePhysicalAddress (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ MIPS implementation specific:
+
+ This routine maps the specified physical address and returns
+ the virtual address which maps the physical address.
+
+ The next call to MmDbgTranslatePhyiscalAddress removes the
+ previous phyiscal address translation, hence on a single
+ physical address can be examined at a time (can't cross page
+ boundaries).
+
+Arguments:
+
+ PhysicalAddress - Supplies the phyiscal address to map and translate.
+
+Return Value:
+
+ The virtual address which corresponds to the phyiscal address.
+
+ NULL if the physical address was bogus.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PVOID BaseAddress;
+ PMMPTE BasePte;
+ PMMPFN Pfn1;
+ ULONG Page;
+
+ BasePte = MmDebugPte + (MM_NUMBER_OF_COLORS - 1);
+ BasePte = (PMMPTE)((ULONG)BasePte & ~(MM_COLOR_MASK << PTE_SHIFT));
+
+ Page = (ULONG)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
+
+ if ((Page > (LONGLONG)MmHighestPhysicalPage) ||
+ (Page < (LONGLONG)MmLowestPhysicalPage)) {
+ return NULL;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (Page);
+
+ if (!MmIsAddressValid (Pfn1)) {
+ return NULL;
+ }
+
+ BasePte = BasePte + Pfn1->u3.e1.PageColor;
+
+ BaseAddress = MiGetVirtualAddressMappedByPte (BasePte);
+
+ KiFlushSingleTb (TRUE, BaseAddress);
+
+ *BasePte = ValidKernelPte;
+ BasePte->u.Hard.PageFrameNumber = Page;
+ return (PVOID)((ULONG)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart));
+}
diff --git a/private/ntos/mm/mips/hypermap.c b/private/ntos/mm/mips/hypermap.c
new file mode 100644
index 000000000..99a9ff7fe
--- /dev/null
+++ b/private/ntos/mm/mips/hypermap.c
@@ -0,0 +1,325 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ hypermap.c
+
+Abstract:
+
+ This module contains the routines which map physical pages into
+ reserved PTEs within hyper space.
+
+ This module is machine dependent. This version is targetted
+ for MIPS Rxxxx and uses KSEG0 to map the pages at their physical
+ addresses.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ IN PKIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns the physical address of the page.
+
+ ************************************
+ * *
+ * Returns with a spin lock held!!! *
+ * *
+ ************************************
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the address where the requested page was mapped.
+
+ RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
+
+ The routine MiUnmapHyperSpaceMap MUST be called to release the lock!!!!
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ ULONG i;
+ PMMPTE PointerPte;
+ PMMPTE NextPte;
+ MMPTE TempPte;
+ ULONG LastEntry;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // Pages must be aligned on their natural boundaries.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ i = Pfn1->u3.e1.PageColor;
+ if ((i == (PageFrameIndex & MM_COLOR_MASK)) &&
+ (PageFrameIndex < MM_PAGES_IN_KSEG0)) {
+
+ //
+ // Virtual and physical alignment match, return the KSEG0 address
+ // for this page.
+ //
+
+ LOCK_HYPERSPACE (OldIrql);
+ return (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT));
+ }
+
+ //
+ // Find the proper location in hyper space and map the page there.
+ //
+
+ LOCK_HYPERSPACE (OldIrql);
+ PointerPte = MmFirstReservedMappingPte + i;
+ if (PointerPte->u.Hard.Valid == 1 ) {
+
+ //
+ // All the pages in reserved for mapping have been used,
+ // flush the TB and reinitialize the pages.
+ //
+
+ RtlZeroMemory ((PVOID)MmFirstReservedMappingPte,
+ ( NUMBER_OF_MAPPING_PTES + 1) * sizeof (MMPTE));
+ KeFlushEntireTb (TRUE, FALSE);
+
+ LastEntry = NUMBER_OF_MAPPING_PTES - MM_COLOR_MASK;
+ NextPte = MmFirstReservedMappingPte;
+ while (NextPte <= (MmFirstReservedMappingPte + MM_COLOR_MASK)) {
+ NextPte->u.Hard.PageFrameNumber = LastEntry;
+ NextPte += 1;
+ }
+ }
+
+ //
+ // Locate next entry in list and reset the next entry in the
+ // list. The list is organized thusly:
+ //
+ // The first N elements corresponding to the alignment mask + 1
+ // contain in their page frame number fields the value of the
+ // last free mapping PTE with this alignment. However, if
+ // the valid bit is set, this PTE has been used and the TB
+ // must be flushed and the list reinitialized.
+ //
+
+ //
+ // Get the offset to the first free PTE.
+ //
+
+ i = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Change the offset for the next time through.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = i - (MM_COLOR_MASK + 1);
+
+ //
+ // Point to the free entry and make it valid.
+ //
+
+ PointerPte += i;
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ ASSERT ((((ULONG)PointerPte >> PTE_SHIFT) & MM_COLOR_MASK) ==
+ (((ULONG)Pfn1->u3.e1.PageColor)));
+
+ return MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ The physical address of the specified page is returned.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // Avoid address aliasing problem on r4000.
+ //
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte->u.Long != 0) {
+
+ //
+ // If there is no event specified, set one up.
+ //
+
+ if (MmWorkingSetList->WaitingForImageMapping == (PKEVENT)NULL) {
+
+ //
+ // Set the global event into the field and wait for it.
+ //
+
+ MmWorkingSetList->WaitingForImageMapping = &MmImageMappingPteEvent;
+ }
+
+ //
+ // Release the PFN lock and wait on the event in an
+ // atomic operation.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(MmWorkingSetList->WaitingForImageMapping,
+ Executive,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ return (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure unmaps the PTE reserved for mapping the image
+ header, flushes the TB, and, if the WaitingForImageMapping field
+ is not NULL, sets the specified event.
+
+ On the MIPS serries, no action is required as the physical address
+ of the page is used.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PKEVENT Event;
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ TempPte.u.Long = 0;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the current state of the event field and clear it out.
+ //
+
+ Event = MmWorkingSetList->WaitingForImageMapping;
+
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+
+ ASSERT (PointerPte->u.Long != 0);
+
+ KeFlushSingleTb (IMAGE_MAPPING_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Hard);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != (PKEVENT)NULL) {
+
+ //
+ // If there was an event specified, set the event.
+ //
+
+ KePulseEvent (Event, 0, FALSE);
+ }
+
+ return;
+}
diff --git a/private/ntos/mm/mips/initmips.c b/private/ntos/mm/mips/initmips.c
new file mode 100644
index 000000000..033ebefcf
--- /dev/null
+++ b/private/ntos/mm/mips/initmips.c
@@ -0,0 +1,1047 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ initmips.c
+
+Abstract:
+
+ This module contains the machine dependent initialization for the
+ memory management component. It is specifically tailored to the
+ MIPS environment (both r3000 and r4000).
+
+Author:
+
+ Lou Perazzoli (loup) 3-Apr-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs the necessary operations to enable virtual
+ memory. This includes building the page directory page, building
+ page table pages to map the code section, the data section, the'
+ stack section and the trap handler.
+
+ It also initializes the PFN database and populates the free list.
+
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ PMMPFN BasePfn;
+ PMMPFN BottomPfn;
+ PMMPFN TopPfn;
+ BOOLEAN PfnInKseg0;
+ ULONG i, j;
+ ULONG HighPage;
+ ULONG PagesLeft;
+ ULONG PageNumber;
+ ULONG PdePageNumber;
+ ULONG PdePage;
+ ULONG PageFrameIndex;
+ ULONG NextPhysicalPage;
+ ULONG PfnAllocation;
+ ULONG MaxPool;
+ KIRQL OldIrql;
+ PEPROCESS CurrentProcess;
+ ULONG DirBase;
+ PVOID SpinLockPage;
+ ULONG MostFreePage = 0;
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MMPTE TempPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE CacheStackPage;
+ PMMPTE Pde;
+ PMMPTE StartPde;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PULONG PointerLong;
+ PVOID NonPagedPoolStartVirtual;
+ ULONG Range;
+
+ //
+ // Initialize color tables and cache policy fields of static data if R4000.
+ //
+
+ ValidKernelPte.u.Hard.CachePolicy = PCR->CachePolicy;
+ ValidUserPte.u.Hard.CachePolicy = PCR->CachePolicy;
+ ValidPtePte.u.Hard.CachePolicy = PCR->CachePolicy;
+ ValidPdePde.u.Hard.CachePolicy = PCR->CachePolicy;
+ ValidKernelPde.u.Hard.CachePolicy = PCR->CachePolicy ;
+
+ MmProtectToPteMask[MM_READONLY] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_EXECUTE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_EXECUTE_READ] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_READWRITE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_WRITECOPY] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_EXECUTE_READWRITE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_EXECUTE_WRITECOPY] |= PCR->AlignedCachePolicy;
+
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_READONLY] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_EXECUTE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_EXECUTE_READ] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_READWRITE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_WRITECOPY] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_EXECUTE_READWRITE] |= PCR->AlignedCachePolicy;
+ MmProtectToPteMask[MM_GUARD_PAGE | MM_EXECUTE_WRITECOPY] |= PCR->AlignedCachePolicy;
+
+ PointerPte = MiGetPdeAddress (PDE_BASE);
+
+ PointerPte->u.Hard.Dirty = 1;
+ PointerPte->u.Hard.Valid = 1;
+ PointerPte->u.Hard.Global = 1;
+ PointerPte->u.Hard.Write = 0;
+
+ PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
+
+ PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PointerPte->u.Long;
+
+ KeSweepDcache (FALSE);
+
+ //
+ // Get the lower bound of the free physical memory and the
+ // number of physical pages by walking the memory descriptor lists.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
+ if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
+ MmLowestPhysicalPage = MemoryDescriptor->BasePage;
+ }
+
+ //
+ // If the memory range described by the descriptor is larger
+ // than the previous largest range and the descriptor describes
+ // memory that is in KSEG0, then record the address of the
+ // descriptor.
+ //
+
+ HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
+ if (MemoryDescriptor->MemoryType == LoaderFree) {
+ if ((MemoryDescriptor->PageCount > MostFreePage) &&
+ (HighPage < MM_PAGES_IN_KSEG0)) {
+ MostFreePage = MemoryDescriptor->PageCount;
+ FreeDescriptor = MemoryDescriptor;
+ }
+ }
+
+ if (HighPage > MmHighestPhysicalPage) {
+ MmHighestPhysicalPage = HighPage;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // If the number of physical pages is less than 1024, then bug check.
+ //
+
+ if (MmNumberOfPhysicalPages < 1024) {
+ KeBugCheckEx (INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MmLowestPhysicalPage,
+ MmHighestPhysicalPage,
+ 0);
+ }
+
+ //
+ // Build non-paged pool using the physical pages following the
+ // data page in which to build the pool from. Non-page pool grows
+ // from the high range of the virtual address space and expands
+ // downward.
+ //
+ // At this time non-paged pool is constructed so virtual addresses
+ // are also physically contiguous.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ (7 * (MmNumberOfPhysicalPages << 3))) {
+
+ //
+ // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = 0;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
+
+ //
+ // Calculate the size of nonpaged pool.
+ // Use the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
+
+ MmSizeOfNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMinAdditionNonPagedPoolPerMb;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
+ MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
+ }
+
+ //
+ // Align to page size boundary.
+ //
+
+ MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
+
+ //
+ // Calculate the maximum size of pool.
+ //
+
+ if (MmMaximumNonPagedPoolInBytes == 0) {
+
+ //
+ // Calculate the size of nonpaged pool. If 4mb of less use
+ // the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
+
+ //
+ // Make sure enough expansion for pfn database exists.
+ //
+
+ MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ MmMaximumNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMaxAdditionNonPagedPoolPerMb;
+ }
+
+ MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
+ (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmMaximumNonPagedPoolInBytes < MaxPool) {
+ MmMaximumNonPagedPoolInBytes = MaxPool;
+ }
+
+ if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
+ MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
+ }
+
+ MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
+ - (MmMaximumNonPagedPoolInBytes - 1));
+
+ MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
+ NonPagedPoolStartVirtual = MmNonPagedPoolStart;
+
+
+ //
+ // Calculate the starting PDE for the system PTE pool which is
+ // right below the nonpaged pool.
+ //
+
+ MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
+ (~PAGE_DIRECTORY_MASK));
+
+ if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
+ MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
+ MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
+ (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
+ ASSERT (MmNumberOfSystemPtes > 1000);
+ }
+
+ //
+ // Set the global bit in all PDE's for system space.
+ //
+
+ StartPde = MiGetPdeAddress (MM_SYSTEM_SPACE_START);
+ EndPde = MiGetPdeAddress (MM_SYSTEM_SPACE_END);
+
+ while (StartPde <= EndPde) {
+ if (StartPde->u.Hard.Global == 0) {
+
+ //
+ // Set the Global bit.
+ //
+
+ TempPte = *StartPde;
+ TempPte.u.Hard.Global = 1;
+ *StartPde = TempPte;
+ }
+ StartPde += 1;
+ }
+
+ StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
+
+ EndPde = MiGetPdeAddress((PVOID)((PCHAR)MmNonPagedPoolEnd - 1));
+
+ ASSERT ((EndPde - StartPde) < (LONG)FreeDescriptor->PageCount);
+
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ TempPte = ValidKernelPte;
+
+ while (StartPde <= EndPde) {
+ if (StartPde->u.Hard.Valid == 0) {
+
+ //
+ // Map in a page directory page.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *StartPde = TempPte;
+
+ }
+ StartPde += 1;
+ }
+
+ //
+ // Zero the PTEs before nonpaged pool.
+ //
+
+ StartPde = MiGetPteAddress (MmNonPagedSystemStart);
+ PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
+
+ RtlZeroMemory (StartPde, ((ULONG)PointerPte - (ULONG)StartPde));
+
+ //
+ // Fill in the PTEs for non-paged pool.
+ //
+
+ LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes - 1);
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+
+ ASSERT (NextPhysicalPage <
+ (FreeDescriptor->BasePage + FreeDescriptor->PageCount));
+
+ //
+ // Zero the remaining PTEs (if any).
+ //
+
+ StartPde = (PMMPTE)((ULONG)MiGetPteAddress (MmNonPagedPoolEnd) | (PAGE_SIZE - sizeof(MMPTE)));
+ while (PointerPte <= StartPde) {
+ *PointerPte = ZeroKernelPte;
+ PointerPte++;
+ }
+
+ PointerPte = MiGetPteAddress (MmNonPagedPoolStart);
+ MmNonPagedPoolStart =
+ (PVOID)(KSEG0_BASE | (PointerPte->u.Hard.PageFrameNumber << PAGE_SHIFT));
+
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ MmSubsectionBase = (ULONG)MmNonPagedPoolStart;
+ if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) {
+ MmSubsectionBase = KSEG0_BASE;
+ MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT;
+ }
+
+ //
+ // Non-paged pages now exist, build the pool structures.
+ //
+
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual +
+ MmSizeOfNonPagedPoolInBytes);
+ MiInitializeNonPagedPool (NonPagedPoolStartVirtual);
+
+ //
+ // Before Non-paged pool can be used, the PFN database must
+ // be built. This is due to the fact that the start and end of
+ // allocation bits for nonpaged pool are maintained in the
+ // PFN elements for the corresponding pages.
+ //
+
+ //
+ // Calculate the number of pages required from page zero to
+ // the highest page.
+ //
+
+ //
+ // Get the number of secondary colors and add the arrary for tracking
+ // secondary colors to the end of the PFN database.
+ //
+ // Get secondary color value from registry.
+ //
+
+ if (MmSecondaryColors == 0) {
+ MmSecondaryColors = PCR->SecondLevelDcacheSize;
+ }
+
+ MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
+
+ //
+ // Make sure value is power of two and within limits.
+ //
+
+ if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
+ (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
+ (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ }
+
+ MmSecondaryColorMask = (MmSecondaryColors - 1) & ~MM_COLOR_MASK;
+
+ PfnAllocation = 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
+ (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
+ >> PAGE_SHIFT);
+
+ //
+ // If the number of pages remaining in the current descriptor is
+ // greater than the number of pages needed for the PFN database,
+ // then allocate the PFN database from the current free descriptor.
+ //
+
+ HighPage = FreeDescriptor->BasePage + FreeDescriptor->PageCount;
+ PagesLeft = HighPage - NextPhysicalPage;
+ if (PagesLeft >= PfnAllocation) {
+
+ //
+ // Allocate the PFN database in kseg0.
+ //
+ // Compute the address of the PFN by allocating the appropriate
+ // number of pages from the end of the free descriptor.
+ //
+
+ PfnInKseg0 = TRUE;
+ MmPfnDatabase = (PMMPFN)(KSEG0_BASE |
+ ((HighPage - PfnAllocation) << PAGE_SHIFT));
+
+ RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE);
+ FreeDescriptor->PageCount -= PfnAllocation;
+
+ } else {
+
+ //
+ // Allocate the PFN database in virtual memory.
+ //
+ // Calculate the start of the Pfn Database (it starts a physical
+ // page zero, even if the Lowest physical page is not zero).
+ //
+
+ PfnInKseg0 = FALSE;
+ PointerPte = MiReserveSystemPtes (PfnAllocation,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+
+ MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
+
+ //
+ // Go through the memory descriptors and for each physical page
+ // make the PFN database has a valid PTE to map it. This allows
+ // machines with sparse physical memory to have a minimal PFN
+ // database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage));
+
+ LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage +
+ MemoryDescriptor->PageCount))) - 1);
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+ }
+
+ //
+ // Initialize support for colored pages.
+ //
+
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
+ &MmPfnDatabase[MmHighestPhysicalPage + 1];
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ //
+ // Make sure the PTEs are mapped.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
+ PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
+
+ LastPte = MiGetPteAddress (
+ (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ for (i = 0; i < MmSecondaryColors; i++) {
+ MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ }
+
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
+ MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
+ }
+
+ //
+ // Go through the page table entries and for any page which is
+ // valid, update the corresponding PFN database element.
+ //
+
+ PointerPde = MiGetPdeAddress (PTE_BASE);
+
+ PdePage = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PdePage;
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE (PointerPde);
+
+ //
+ // Add the pages which were used to construct nonpaged pool to
+ // the pfn database.
+ //
+
+ Pde = MiGetPdeAddress ((ULONG)NonPagedPoolStartVirtual -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE));
+
+ EndPde = MiGetPdeAddress(MmNonPagedPoolEnd);
+
+ while (Pde <= EndPde) {
+ if (Pde->u.Hard.Valid == 1) {
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE (Pde);
+
+ PointerPte = MiGetVirtualAddressMappedByPte (Pde);
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn2->PteFrame = PdePage;
+ Pfn2->u2.ShareCount += 1;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+ Pfn2->PteAddress =
+ (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT));
+
+ Pfn2->u3.e1.PageColor =
+ MI_GET_PAGE_COLOR_FROM_PTE (Pfn2->PteAddress);
+ }
+ PointerPte++;
+ }
+ }
+ Pde++;
+ }
+
+ //
+ // If page zero is still unused, mark it as in use. This is
+ // temporary as we want to find bugs where a physical page
+ // is specified as zero.
+ //
+
+ Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Make the reference count non-zero and point it into a
+ // page directory.
+ //
+
+ Pde = MiGetPdeAddress (0xb0000000);
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE (Pde);
+ }
+
+ // end of temporary set to physical page zero.
+
+
+ //
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ i = MemoryDescriptor->PageCount;
+ NextPhysicalPage = MemoryDescriptor->BasePage;
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderBad:
+ while (i != 0) {
+ MiInsertPageInList (MmPageLocationList[BadPageList],
+ NextPhysicalPage);
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress = (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE (
+ Pfn1->PteAddress);
+
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ default:
+ PointerPte = MiGetPteAddress(KSEG0_BASE |
+ (NextPhysicalPage << PAGE_SHIFT));
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ //
+ // Set page as in use.
+ //
+
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE (
+ PointerPte);
+
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ PointerPte += 1;
+ }
+
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // If the PFN database is allocated in virtual memory, then indicate that
+ // the PFN database is allocated in NonPaged pool. Otherwise, scan the PFN
+ // database for holes and insert the respective pages in the free page list.
+ //
+
+ if (PfnInKseg0 == FALSE) {
+
+ //
+ // The PFN database is allocated in virtual memory.
+ //
+ // Set the start and end of allocation.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ } else {
+
+ //
+ // The PFN database is allocated in KSEG0.
+ //
+ // Mark all pfn entries for the pfn pages in use.
+ //
+
+ PageNumber = ((ULONG)MmPfnDatabase - KSEG0_BASE) >> PAGE_SHIFT;
+ Pfn1 = MI_PFN_ELEMENT(PageNumber);
+ do {
+ Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE(Pfn1->PteAddress);
+ Pfn1 += 1;
+ PfnAllocation -= 1;
+ } while (PfnAllocation != 0);
+
+ // Scan the PFN database backward for pages that are completely zero.
+ // These pages are unused and can be added to the free list
+ //
+
+ BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage);
+ do {
+
+ //
+ // Compute the address of the start of the page that is next
+ // lower in memory and scan backwards until that page address
+ // is reached or just crossed.
+ //
+
+ if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1));
+ TopPfn = BottomPfn + 1;
+
+ } else {
+ BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE);
+ TopPfn = BottomPfn;
+ }
+
+ while (BottomPfn > BasePfn) {
+ BottomPfn -= 1;
+ }
+
+ //
+ // If the entire range over which the PFN entries span is
+ // completely zero and the PFN entry that maps the page is
+ // not in the range, then add the page to the appropriate
+ // free list.
+ //
+
+ Range = (ULONG)TopPfn - (ULONG)BottomPfn;
+ if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) {
+
+ //
+ // Set the PTE address to the physical page for virtual
+ // address alignment checking.
+ //
+
+ PageNumber = ((ULONG)BasePfn - KSEG0_BASE) >> PAGE_SHIFT;
+ Pfn1 = MI_PFN_ELEMENT(PageNumber);
+
+ ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
+
+ PfnAllocation += 1;
+
+ Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT);
+ Pfn1->u3.e1.PageColor = MI_GET_PAGE_COLOR_FROM_PTE(Pfn1->PteAddress);
+ MiInsertPageInList(MmPageLocationList[FreePageList],
+ PageNumber);
+ }
+
+ } while (BottomPfn > MmPfnDatabase);
+ }
+
+ //
+ // Indicate that nonpaged pool must succeed is allocated in
+ // nonpaged pool.
+ //
+
+ i = MmSizeOfNonPagedMustSucceed;
+ Pfn1 = MI_PFN_ELEMENT(MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedMustSucceed));
+ while ((LONG)i > 0) {
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ i -= PAGE_SIZE;
+ Pfn1 += 1;
+ }
+
+ KeInitializeSpinLock (&MmSystemSpaceLock);
+ KeInitializeSpinLock (&MmPfnLock);
+
+ //
+ // Initialize the nonpaged available PTEs for mapping I/O space
+ // and kernel stacks.
+ //
+
+ PointerPte = MiGetPteAddress ((ULONG)NonPagedPoolStartVirtual -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE));
+
+ PointerPte = (PMMPTE)PAGE_ALIGN (PointerPte);
+ if (PfnInKseg0) {
+ MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1;
+ } else {
+ MmNumberOfSystemPtes = MiGetPteAddress(NonPagedPoolStartVirtual) - PointerPte - 1;
+ }
+
+ MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
+
+ //
+ // Initialize the nonpaged pool.
+ //
+
+ InitializePool(NonPagedPool,0);
+
+ //
+ // Initialize memory management structures for this process.
+ //
+
+ //
+ // Build working set list. System initialization has created
+ // a PTE for hyperspace.
+ //
+ // Note, we can't remove a zeroed page as hyper space does not
+ // exist and we map non-zeroed pages into hyper space to zero.
+ //
+
+ PointerPte = MiGetPdeAddress(HYPER_SPACE);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PointerPte->u.Hard.Global = 0;
+ PointerPte->u.Hard.Write = 1;
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Point to the page table page we just created and zero it.
+ //
+
+
+// KeFillEntryTb ((PHARDWARE_PTE)PointerPte,
+// MiGetPteAddress(HYPER_SPACE),
+// TRUE);
+
+ PointerPte = MiGetPteAddress(HYPER_SPACE);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ //
+ // Hyper space now exists, set the necessary variables.
+ //
+
+ MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
+ MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
+
+ MmWorkingSetList = WORKING_SET_LIST;
+ MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL));
+
+ //
+ // Initialize this process's memory management structures including
+ // the working set list.
+ //
+
+ //
+ // The pfn element for the page directory has already been initialized,
+ // zero the reference count and the share count so they won't be
+ // wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 0;
+
+ //
+ // The pfn element for the PDE which maps hyperspace has already
+ // been initialized, zero the reference count and the share count
+ // so they won't be wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 1;
+
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Get a page for the working set list and map it into the Page
+ // directory at the page after hyperspace.
+ //
+
+ PointerPte = MiGetPteAddress (HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte));
+ CurrentProcess->WorkingSetPage = PageFrameIndex;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
+
+ //
+ // Assert that the double mapped pages have the same alignment.
+ //
+
+ ASSERT ((PointerPte->u.Long & (0xF << PTE_SHIFT)) ==
+ (PointerPde->u.Long & (0xF << PTE_SHIFT)));
+
+ *PointerPde = TempPte;
+ PointerPde->u.Hard.Global = 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPde,
+ PointerPte,
+ TRUE);
+
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ TempPte = *PointerPde;
+ TempPte.u.Hard.Valid = 0;
+ TempPte.u.Hard.Global = 0;
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeFlushSingleTb (PointerPte,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPde,
+ TempPte.u.Hard);
+
+ KeLowerIrql(OldIrql);
+
+#ifdef R4000
+
+ //
+ // Initialize hyperspace for this process.
+ //
+
+ i = NUMBER_OF_MAPPING_PTES - MM_COLOR_MASK;
+ PointerPte = MmFirstReservedMappingPte;
+ while (PointerPte <= (MmFirstReservedMappingPte + MM_COLOR_MASK)) {
+ PointerPte->u.Hard.PageFrameNumber = i;
+ PointerPte += 1;
+ }
+
+#endif
+
+ CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
+ CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
+
+ MmInitializeProcessAddressSpace (CurrentProcess,
+ (PEPROCESS)NULL,
+ (PVOID)NULL);
+
+ *PointerPde = ZeroKernelPte;
+
+ //
+ // Check to see if moving the secondary page structures to the end
+ // of the PFN database is a waste of memory. And if so, copy it
+ // to paged pool.
+ //
+ // If the PFN datbase ends on a page aligned boundary and the
+ // size of the two arrays is less than a page, free the page
+ // and allocate nonpagedpool for this.
+ //
+
+ if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
+ ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
+
+ PMMCOLOR_TABLES c;
+
+ c = MmFreePagesByColor[0];
+
+ MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
+ ' mM');
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ RtlMoveMemory (MmFreePagesByColor[0],
+ c,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
+
+ //
+ // Free the page.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(c)) {
+ PointerPte = MiGetPteAddress(c);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *PointerPte = ZeroKernelPte;
+ } else {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ }
+ return;
+}
diff --git a/private/ntos/mm/mips/mir3000.h b/private/ntos/mm/mips/mir3000.h
new file mode 100644
index 000000000..4cd742a14
--- /dev/null
+++ b/private/ntos/mm/mips/mir3000.h
@@ -0,0 +1,1021 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ mir3000.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ It is specifically tailored for the MIPS R3000 machine.
+
+Author:
+
+ Lou Perazzoli (loup) 12-Mar-1990
+
+Revision History:
+
+--*/
+
+/*++
+
+ Virtual Memory Layout on the R3000 is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7ffff000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | | KSEG_0
+ | HAL loads kernel and initial |
+ | boot drivers in first 16mb |
+ | of this region. |
+ | Kernel mode access only. |
+ | |
+ | Initial NonPaged Pool is within |
+ | KEG_0 |
+ | |
+ +------------------------------------+
+ A0000000 | | KSEG_1
+ | |
+ | |
+ | |
+ | |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 4mb region |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ C0400000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 4mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C0800000 | NO ACCESS AREA (4MB) |
+ | |
+ +------------------------------------+
+ C0C00000 | System Cache Structures |
+ | reside in this 4mb region |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C1000000 | System cache resides here. |
+ | Kernel mode access only. |
+ | |
+ | |
+ +------------------------------------+
+ E1000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ | |
+ +------------------------------------+
+ | |
+ | Kernel mode access only. |
+ | |
+ | |
+ FFBFFFFF | NonPaged System area |
+ +------------------------------------+
+ FFC00000 | Last 4mb reserved for HAL usage |
+ +------------------------------------+
+
+--*/
+
+
+//
+// PAGE_SIZE for MIPS r3000 is 4k, virtual page is 20 bits with a PAGE_SHIFT
+// byte offset.
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT 20
+
+//
+// Address space layout definitions.
+//
+
+//#define PDE_BASE ((ULONG)0xC0300000)
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define MM_SYSTEM_SPACE_START (0xC0C00000)
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define MM_NONPAGED_SYSTEM_SPACE_START (0xF0000000)
+
+#define PDE_TOP 0xC03FFFFF
+
+//#define PTE_BASE ((ULONG)0xC0000000)
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+
+#define HYPER_SPACE_END (0xC07fffff)
+
+//
+// Define the start and maximum size for the system cache.
+// Maximum size 512MB.
+//
+
+#define MM_SYSTEM_CACHE_START (0xC1000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE ((512*1024*1024) >> PAGE_SHIFT)
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0xC0C00000)
+
+#define MM_SYSTEM_CACHE_START (0xC1000000)
+
+#define MM_SYSTEM_CACHE_END (0xE1000000)
+
+#define MM_PAGED_POOL_START ((PVOID)(0xE1000000))
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(0xEB000000))
+
+#define MmProtopte_Base ((ULONG)0xE1000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xFFC00000))
+
+#define NON_PAGED_SYSTEM_END ((ULONG)0xFFFFFFF0) //quadword aligned.
+
+//
+// Number of PTEs to flush singularly before flushing the entire TB.
+//
+
+#define MM_MAXIMUM_FLUSH_COUNT 7
+
+//
+// Pool limits
+//
+
+//
+// The maximim amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The total amount of nonpaged pool (initial pool + expansion + system PTEs).
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)(192*1024*1024))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(192*1024*1024))
+
+#define MM_MAX_TOTAL_POOL (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)(MM_PAGED_POOL_START)))
+
+#define MM_PROTO_PTE_ALIGNMENT (PAGE_SIZE)
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x003FFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x400000)
+
+#if defined(JAZZ)
+
+#define LOWEST_IO_ADDRESS (0x40000000)
+
+#endif
+
+#if defined(DECSTATION)
+
+#define LOWEST_IO_ADDRESS (0x1e000000)
+
+#endif
+
+#define PTE_SHIFT (2)
+
+//
+// The number of bits in a physical address.
+//
+
+#define PHYSICAL_ADDRESS_BITS (32)
+
+//
+// Maximum number of paging files.
+//
+
+#define MAX_PAGE_FILES (16)
+
+#define MM_MAXIMUM_NUMBER_OF_COLORS 1
+
+//
+// R3000 does not require support for colored pages.
+//
+
+#define MM_NUMBER_OF_COLORS 1
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK 0
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT 0
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL 0
+
+
+//
+// Hyper space definitions.
+//
+
+#define FIRST_MAPPING_PTE ((ULONG)0xC0400000)
+#define NUMBER_OF_MAPPING_PTES 127L
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_PTE_PROTOTYPE_MASK 0x1
+#define MM_PTE_TRANSITION_MASK 0x2
+#define MM_PTE_WRITE_MASK 0x40
+#define MM_PTE_COPY_ON_WRITE_MASK 0x80
+#define MM_PTE_GLOBAL_MASK 0x100
+#define MM_PTE_VALID_MASK 0x200
+#define MM_PTE_DIRTY_MASK 0x400
+#define MM_PTE_CACHE_DISABLE_MASK 0x800
+#define MM_PTE_CACHE_ENABLE_MASK 0x0
+
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS 0x0 // not expressable on R3000
+#define MM_PTE_READONLY 0x0
+#define MM_PTE_READWRITE 0x40
+#define MM_PTE_WRITECOPY 0xC0
+#define MM_PTE_EXECUTE 0x0 // read-only on R3000
+#define MM_PTE_EXECUTE_READ 0x0
+#define MM_PTE_EXECUTE_READWRITE 0x40
+#define MM_PTE_EXECUTE_WRITECOPY 0xC0
+#define MM_PTE_NOCACHE 0x800
+#define MM_PTE_GUARD 0x0 // not expressable on R3000
+#define MM_PTE_CACHE 0x0
+
+#define MM_STACK_ALIGNMENT 0x0
+#define MM_STACK_OFFSET 0x0
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)1024)
+
+#define PTE_PER_PAGE ((ULONG)1024)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (512)
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ { \
+ (OUTPTE).u.Long = ((FRAME << 12) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MM_PTE_VALID_MASK); \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ } \
+ }
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Soft.Transition = 1; \
+ (OUTPTE).u.Soft.Valid = 0; \
+ (OUTPTE).u.Soft.Prototype = 0; \
+ (OUTPTE).u.Soft.Protection = PROTECT;
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ (OUTPTE).u.Long = (((PPTE)->u.Long & 0xFFFFF000) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MM_PTE_VALID_MASK);
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_ENABLE_CACHING(PTE) ((PTE).u.Hard.CacheDisable = 0)
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_DISABLE_CACHING(PTE) ((PTE).u.Hard.CacheDisable = 1)
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+
+#define MI_IS_CACHING_DISABLED(PPTE) \
+ ((PPTE)->u.Hard.CacheDisable == 1)
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) \
+ (((PPFN)->PteAddress = (PMMPTE)0xFFFFFFFF))
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ ((PPFN)->PteAddress == (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// PPTE - Supplies a pointer to the new PTE which will contain the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+// does nothing on r3000.
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,PPTE)
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// does nothing on r3000.
+#define MI_INITIALIZE_HYPERSPACE_MAP()
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+// returns 0 on r3000.
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) 0
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+// returns 0 on r3000.
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) 0
+
+
+//
+// If the PTE is writable, set the copy on write bit and clear the
+// dirty bit.
+//
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Dirty = 0; \
+ }
+
+//
+// Based on the virtual address of the PTE determine the owner (user or
+// kernel).
+//
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ ((((PPTE) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) || \
+ ((PPTE) >= MiGetPdeAddress(NULL) && \
+ ((PPTE) <= MiGetPdeAddress(MM_HIGHEST_USER_ADDRESS)))) ? 1 : 0)
+
+//
+// Macro to set the ACCESSED field in the PTE.
+// Some processors do not have an accessed field, so this macro will
+// not do anything.
+//
+
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+
+//
+// Macro to get the ACCESSED field in the PTE.
+// Some processors do not have an accessed field, so this macro will
+// return the value 0 indicating not accessed.
+//
+
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+
+//
+// Macro to set the OWNER field in the PTE.
+// Some processors do not have an OWNER field, so this macro will
+// not do anything.
+//
+
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER)
+
+//
+// Macro to get the OWNER field in the PTE.
+// Some processors do not have an OWNER field, so this macro will
+// return the value 0 indicating kenel-mode.
+//
+
+#define MI_GET_OWNER_IN_PTE(PPTE) KernelMode
+
+//
+// bit mask to clear out fields in a PTE to or in prototype pte offset.
+//
+
+#define CLEAR_FOR_PROTO_PTE_ADDRESS ((ULONG)0x5)
+
+
+// bit mask to clear out fields in a PTE to or in paging file location.
+
+#define CLEAR_FOR_PAGE_FILE 0x000001F0
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) ((((PTE).u.Long & \
+ CLEAR_FOR_PAGE_FILE) | \
+ (((FILEINFO & 3) << 10) | (FILEINFO & 0xC) | \
+ (OFFSET << 12))))
+
+//
+// MiPteToProtoPte returns the address of the corresponding prototype
+// PTE
+//
+
+#define MiPteToProto(lpte) ((((lpte)->u.Long) & 0x80000000) ? \
+ ((PMMPTE)((((((lpte)->u.Long) << 1) >> 11) << 8) + \
+ (((((lpte)->u.Long) << 23) >> 26) << 2) \
+ + (ULONG)MmNonPagedPoolStart)) \
+ : ((PMMPTE)(((((lpte)->u.Long) >> 10) << 8) + \
+ (((((lpte)->u.Long) << 23) >> 26) << 2) \
+ + MmProtopte_Base)))
+
+//
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit 0x400.
+//
+
+#define MiProtoAddressForPte(proto_va) \
+ (((ULONG)(proto_va) < (ULONG)KSEG1_BASE) ? \
+ ((((((ULONG)(proto_va) - (ULONG)MmNonPagedPoolStart) << 1) & (ULONG)0x1F8) | \
+ (((((ULONG)(proto_va) - (ULONG)MmNonPagedPoolStart) << 2) & (ULONG)0x7FFFFC00))) | \
+ 0x80000001) \
+ : ((((((ULONG)(proto_va) - MmProtopte_Base) << 1) & (ULONG)0x1F8) | \
+ (((((ULONG)(proto_va) - MmProtopte_Base) << 2) & (ULONG)0x7FFFFC00))) | \
+ 0x1))
+
+
+//
+// MiGetSubsectionAddress converts a PTE into the address of the subsection
+// encoded within the PTE. If bit 31 is set, the allocation is from
+// pool within KSEG0.
+//
+
+#define MiGetSubsectionAddress(lpte) \
+ (((lpte)->u.Subsect.WhichPool == 1) ? \
+ ((PSUBSECTION)((ULONG)MmNonPagedPoolStart + \
+ ((((((lpte)->u.Long) << 1) >> 11) << 6) | (((lpte)->u.Long & 0xE) << 2))))\
+ : ((PSUBSECTION)(NON_PAGED_SYSTEM_END - \
+ (((((lpte)->u.Long) >> 10) << 6) | (((lpte)->u.Long & 0xE) << 2)))))
+
+
+//
+// MiGetSubsectionAddressForPte converts a QUADWORD aligned subsection
+// address to a mask that can be ored into a PTE.
+//
+
+#define MiGetSubsectionAddressForPte(VA) \
+ (((ULONG)(VA) < (ULONG)KSEG1_BASE) ? \
+ (((((ULONG)(VA) - (ULONG)MmNonPagedPoolStart) >> 2) & (ULONG)0x0E) | \
+ ((((((ULONG)(VA) - (ULONG)MmNonPagedPoolStart) << 4) & (ULONG)0x7ffffc00))) | 0x80000000) \
+ : (((((ULONG)NON_PAGED_SYSTEM_END - (ULONG)VA) >> 2) & (ULONG)0x0E) | \
+ ((((((ULONG)NON_PAGED_SYSTEM_END - (ULONG)VA) << 4) & (ULONG)0x7ffffc00)))))
+
+
+//
+// MiGetPdeAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+
+#define MiGetPdeAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 22) << 2) + PDE_BASE))
+
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+
+#define MiGetPteAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 12) << 2) + PTE_BASE))
+
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+
+//
+// MiGetPteOffset returns the offset into a page table page for
+// a given virtual address.
+//
+
+#define MiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
+
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+
+#define MiGetVirtualAddressMappedByPte(va) ((PVOID)((ULONG)(va) << 10))
+
+
+#define GET_PAGING_FILE_NUMBER(PTE) (((((PTE).u.Long) << 1) & 0XC) | \
+ (((PTE).u.Long) >> 10) & 3)
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << 4)
+
+//
+// Check to see if a given PTE is NOT a demand zero PTE.
+//
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFFE0C)
+
+//
+// Prepare to make a valid PTE invalid (clear the present bit on the r3000).
+// No action is required.
+//
+
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+//
+// Prepare to make multiple valid PTEs invalid (clear the present bit on the
+// R3000). No action is required.
+//
+
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+//
+// Make a writable PTE, writeable-copy PTE. This takes advantage of
+// the fact that the protection field in the PTE (5 bit protection) is]
+// set up such that write is a bit.
+//
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Long & 0x40) { \
+ ((PTE).u.Long |= 0x10); \
+ }
+
+//
+// Handle the case when a page fault is taken and no PTE with the
+// valid bit clear is found. No action is required.
+//
+
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == 0)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ } else { \
+ KeFillEntryTb ((PHARDWARE_PTE)PPTE, VA, FALSE); \
+ }
+ //
+ // If the PTE was already valid, assume that the PTE
+ // in the TB is stall and just reload the PTE.
+ //
+
+//
+// Capture the state of the dirty bit to the PFN element.
+//
+
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ if (((PPFN)->u3.e1.Modified == 0) && ((PPTE)->u.Hard.Dirty == 1)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+//
+// Determine if an virtual address is really a physical address.
+//
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ (((ULONG)Va >= KSEG0_BASE) && ((ULONG)Va < KSEG2_BASE))
+
+//
+// Convert a "physical address" within kseg0 or 1 to a page frame number.
+// Not valid on 386.
+//
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) \
+ (((ULONG)Va << 2) >> 14)
+
+
+
+//
+// The hardware PTE is defined in a MIPS specified header file.
+//
+
+//
+// Invalid PTEs have the following defintion.
+//
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG PageFileLow0 : 2;
+ ULONG Protection : 5;
+ ULONG Valid : 1;
+ ULONG PageFileLow1 : 2;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG filler2 : 2;
+ ULONG Protection : 5;
+ ULONG Valid : 1;
+ ULONG Dirty : 1;
+ ULONG CacheDisable : 1;
+ ULONG PageFrameNumber : 20;
+} MMPTE_TRANSITION;
+
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Prototype : 1;
+ ULONG filler3 : 1;
+ ULONG ReadOnly : 1;
+ ULONG ProtoAddressLow : 6;
+ ULONG Valid : 1;
+ ULONG ProtoAddressHigh : 21;
+ ULONG WhichPool : 1;
+} MMPTE_PROTOTYPE;
+
+
+typedef struct _MMPTE_LIST {
+ ULONG filler002 : 2;
+ ULONG OneEntry : 1;
+ ULONG filler06 : 6;
+ ULONG Valid : 1;
+ ULONG filler02 : 2;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG Prototype : 1;
+ ULONG SubsectionAddressLow : 3;
+ ULONG Protection : 5;
+ ULONG Valid : 1;
+ ULONG SubsectionAddressHigh : 21;
+ ULONG WhichPool : 1;
+} MMPTE_SUBSECTION;
+
+
+//
+// A Valid Page Table Entry on a MIPS R3000 has the following definition.
+//
+
+//
+// typedef struct _HARDWARE_PTE {
+// ULONG filler1 : 6;
+// ULONG Write : 1;
+// ULONG CopyOnWrite : 1;
+// ULONG Global : 1;
+// ULONG Valid : 1;
+// ULONG Dirty : 1;
+// ULONG CacheDisable : 1;
+// ULONG PageFrameNumber : 20;
+// } HARDWARE_PTE, *PHARDWARE_PTE;
+//
+
+
+//
+// A Page Table Entry on a MIPS R3000 has the following definition.
+//
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ HARDWARE_PTE Hard;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_TRANSITION Trans;
+ MMPTE_LIST List;
+ MMPTE_SUBSECTION Subsect;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
+
diff --git a/private/ntos/mm/mips/mir4000.h b/private/ntos/mm/mips/mir4000.h
new file mode 100644
index 000000000..6a4682b55
--- /dev/null
+++ b/private/ntos/mm/mips/mir4000.h
@@ -0,0 +1,2075 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ mir4000.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ It is specifically tailored for the MIPS R4000 machine.
+
+Author:
+
+ Lou Perazzoli (loup) 9-Jan-1991
+
+Revision History:
+
+--*/
+
+#define HEADER_FILE
+#include <kxmips.h>
+
+//
+// The R4000 requires colored page support.
+//
+
+//
+// The R4000 supports large pages.
+//
+
+#define LARGE_PAGES 1
+
+
+/*++
+
+ Virtual Memory Layout on the R4000 is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7ffff000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | | KSEG_0
+ | HAL loads kernel and initial |
+ | boot drivers in first 16mb |
+ | of this region. |
+ | Kernel mode access only. |
+ | |
+ | Initial NonPaged Pool is within |
+ | KEG_0 |
+ | |
+ +------------------------------------+
+ A0000000 | | KSEG_1
+ | |
+ | |
+ | |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 4mb region |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ C0400000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 4mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C0800000 | NO ACCESS AREA (4MB) |
+ | |
+ +------------------------------------+
+ C0C00000 | System Cache Structures |
+ | reside in this 4mb region |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C1000000 | System cache resides here. |
+ | Kernel mode access only. |
+ | |
+ | |
+ +------------------------------------+
+ DE000000 | System mapped views |
+ | |
+ | |
+ +------------------------------------+
+ E1000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ +------------------------------------+
+ | |
+ | Kernel mode access only. |
+ | |
+ | |
+ FFBFFFFF | NonPaged System area |
+ +------------------------------------+
+ FFC00000 | Last 4mb reserved for HAL usage |
+ +------------------------------------+
+
+--*/
+
+//
+// PAGE_SIZE for MIPS r4000 is 4k, virtual page is 20 bits with a PAGE_SHIFT
+// byte offset.
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT 20
+
+//
+// Address space layout definitions.
+//
+
+//#define PDE_BASE ((ULONG)0xC0300000)
+
+//#define PTE_BASE ((ULONG)0xC0000000)
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define MM_SYSTEM_SPACE_START (0xC0800000)
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define MM_NONPAGED_SYSTEM_SPACE_START (0xF0000000)
+
+#define PDE_TOP 0xC03FFFFF
+
+#define MM_PAGES_IN_KSEG0 (((ULONG)KSEG1_BASE - (ULONG)KSEG0_BASE) >> PAGE_SHIFT)
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+
+#define HYPER_SPACE_END 0xC07fffff
+
+//
+// Define the start and maximum size for the system cache.
+// Maximum size 464MB.
+//
+
+#define MM_SYSTEM_CACHE_START (0xC1000000)
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0xC0C00000)
+
+#define MM_SYSTEM_CACHE_START (0xC1000000)
+
+#define MM_SYSTEM_CACHE_END (0xDE000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
+ (((ULONG)MM_SYSTEM_CACHE_END - (ULONG)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT)
+
+//
+// Define area for mapping views into system space.
+//
+
+#define MM_SYSTEM_VIEW_START (0xDE000000)
+
+#define MM_SYSTEM_VIEW_SIZE (48*1024*1024)
+
+#define MM_PAGED_POOL_START ((PVOID)(0xE1000000))
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(0xEB000000))
+
+#define MmProtopte_Base ((ULONG)0xE1000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xFFBE0000))
+
+#define NON_PAGED_SYSTEM_END ((ULONG)0xFFFFFFF0) //quadword aligned.
+
+//
+// Define absolute minumum and maximum count for system ptes.
+//
+
+#define MM_MINIMUM_SYSTEM_PTES 9000
+
+#define MM_MAXIMUM_SYSTEM_PTES 50000
+
+#define MM_DEFAULT_SYSTEM_PTES 15000
+
+//
+// Pool limits
+//
+
+//
+// The maximim amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The total amount of nonpaged pool (initial pool + expansion).
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(192*1024*1024))
+
+#define MM_MAX_TOTAL_POOL (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)(MM_PAGED_POOL_START)))
+
+
+//
+// Structure layout defintions.
+//
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x003FFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x400000)
+
+#define LOWEST_IO_ADDRESS (0x40000000)
+
+#define PTE_SHIFT (2)
+
+//
+// The number of bits in a physical address.
+//
+
+#define PHYSICAL_ADDRESS_BITS (36)
+
+#define MM_MAXIMUM_NUMBER_OF_COLORS (8)
+
+#define MM_PROTO_PTE_ALIGNMENT ((ULONG)MM_MAXIMUM_NUMBER_OF_COLORS * (ULONG)PAGE_SIZE)
+
+//
+// Maximum number of paging files.
+//
+
+#define MAX_PAGE_FILES 8
+
+//
+// Hyper space definitions.
+//
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+#define FIRST_MAPPING_PTE ((ULONG)0xC0400000)
+
+//
+// On R4000 number of mapping PTEs must be a mulitple of 16 for alignment.
+//
+
+#define NUMBER_OF_MAPPING_PTES 255
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+//
+// On R4000 this must be on a 64k virtual address boundary.
+//
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_MAXIMUM_WORKING_SET \
+ ((ULONG)((ULONG)2*1024*1024*1024 - 64*1024*1024) >> PAGE_SHIFT) //2Gb-64Mb
+
+#define MM_WORKING_SET_END ((ULONG)0xC07FF000)
+
+#define MM_PTE_GLOBAL_MASK 0x1
+#define MM_PTE_PROTOTYPE_MASK 0x4
+#define MM_PTE_VALID_MASK 0x2
+#define MM_PTE_DIRTY_MASK 0x4
+#define MM_PTE_CACHE_DISABLE_MASK 0x10
+#define MM_PTE_TRANSITION_MASK 0x100
+#define MM_PTE_WRITE_MASK 0x40000000
+#define MM_PTE_COPY_ON_WRITE_MASK 0x80000000
+#define MM_PTE_CACHE_ENABLE_MASK 0x0 // (PCR->AlignedCachePolicy)
+
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS 0x0 // not expressable on R4000
+#define MM_PTE_READONLY 0x0
+#define MM_PTE_READWRITE MM_PTE_WRITE_MASK
+#define MM_PTE_WRITECOPY (MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_EXECUTE 0x0 // read-only on R4000
+#define MM_PTE_EXECUTE_READ 0x0
+#define MM_PTE_EXECUTE_READWRITE MM_PTE_WRITE_MASK
+#define MM_PTE_EXECUTE_WRITECOPY (MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_NOCACHE (MM_PTE_CACHE_DISABLE_MASK)
+#define MM_PTE_GUARD 0x0 // not expressable on R4000
+#define MM_PTE_CACHE MM_PTE_CACHE_ENABLE_MASK
+
+#define MM_PROTECT_FIELD_SHIFT 3
+
+//
+// Zero PTE
+//
+
+#define MM_ZERO_PTE 0
+
+//
+// Zero Kernel PTE
+//
+
+#define MM_ZERO_KERNEL_PTE MM_PTE_GLOBAL_MASK
+
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE.
+//
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A demand zero PTE with a protection or PAGE_READWRITE for system space.
+//
+
+#define MM_KERNEL_DEMAND_ZERO_PTE ((MM_READWRITE << MM_PROTECT_FIELD_SHIFT) | MM_PTE_GLOBAL_MASK)
+
+//
+// A no access PTE for system space.
+//
+
+#define MM_KERNEL_NOACCESS_PTE ((MM_NOACCESS << MM_PROTECT_FIELD_SHIFT) | MM_PTE_GLOBAL_MASK)
+
+//
+// Dirty bit definitions for clean and dirty.
+//
+
+#define MM_PTE_CLEAN 0
+
+#define MM_PTE_DIRTY 1
+
+
+
+#define MM_STACK_ALIGNMENT 0x2000 //8k
+#define MM_STACK_OFFSET 0x1000 //align guard page on 4k offset
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)1024)
+
+#define PTE_PER_PAGE ((ULONG)1024)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (512)
+
+//
+// R4000 has 8 colors.
+//
+
+#define MM_NUMBER_OF_COLORS 8
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK 7
+
+//
+// Define secondary color stride.
+//
+
+#define MM_COLOR_STRIDE 11
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT 0x8000
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL 0x7000
+
+//
+// Define 1mb worth of secondary colors
+//
+
+#define MM_SECONDARY_COLORS_DEFAULT (256)
+
+#define MM_SECONDARY_COLORS_MIN (2)
+
+#define MM_SECONDARY_COLORS_MAX (2048)
+
+//
+// Mask for isolating secondary color from physical page number;
+//
+
+extern ULONG MmSecondaryColorMask;
+
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN OWNER
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ { \
+ (OUTPTE).u.Long = ((FRAME << 6) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MM_PTE_VALID_MASK); \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ } \
+ }
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Long = ((((OUTPTE).u.Long & 0xffffffc0) << 3) | \
+ (((PROTECT) << MM_PROTECT_FIELD_SHIFT)) | \
+ ((OUTPTE).u.Long & MM_PTE_GLOBAL_MASK) | \
+ MM_PTE_TRANSITION_MASK);
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT; \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) {\
+ (OUTPTE).u.Hard.Global = 1; \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ (OUTPTE).u.Long = ((((PPTE)->u.Long >> 3) & 0xffffffc0) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MM_PTE_VALID_MASK); \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ }
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_BIT_IF_SYSTEM (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit if the pointer PTE is within
+// system space.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the PTE becoming valid.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE) \
+ if (((PMMPTE)PPTE) >= MiGetPteAddress(MM_SYSTEM_SPACE_START)) { \
+ (OUTPTE).u.Hard.Global = 1; \
+ }
+
+
+//++
+//VOID
+//MI_SET_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set dirty.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_DIRTY(PTE) (PTE).u.Long |= HARDWARE_PTE_DIRTY_MASK
+
+
+//++
+//VOID
+//MI_SET_PTE_CLEAN (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro clears the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set clear.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_CLEAN(PTE) (PTE).u.Long &= ~HARDWARE_PTE_DIRTY_MASK
+
+
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != 0)
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_STATE (
+// IN MMPTE PTE,
+// IN ULONG STATE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit in the PTE. if the pointer PTE is within
+//
+// Argments
+//
+// PTE - Supplies the PTE to set global state into.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_STATE(PTE,STATE) \
+ (PTE).u.Hard.Global = STATE;
+
+
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_ENABLE_CACHING(PTE) ((PTE).u.Hard.CachePolicy = PCR->CachePolicy)
+
+
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_DISABLE_CACHING(PTE) ((PTE).u.Hard.CachePolicy = UNCACHED_POLICY)
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+
+#define MI_IS_CACHING_DISABLED(PPTE) \
+ ((PPTE)->u.Hard.CachePolicy == UNCACHED_POLICY)
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != 0)
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) (((ULONG)(PPFN)->PteAddress &= 0x7FFFFFFF ))
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ (((ULONG)(PPFN)->PteAddress & 0x80000000) == 0)
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// PPTE - Supplies a pointer to the new PTE which will contain the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,COLOR) \
+{ \
+ PMMPFN PPFN; \
+ ULONG OldColor; \
+ PPFN = MI_PFN_ELEMENT(PAGE); \
+ OldColor = PPFN->u3.e1.PageColor; \
+ if ((COLOR) != OldColor) { \
+ KeChangeColorPage((PVOID)((ULONG)(COLOR) << PAGE_SHIFT), \
+ (PVOID)((ULONG)(OldColor << PAGE_SHIFT)), \
+ Page); \
+ PPFN->u3.e1.PageColor = COLOR; \
+ } \
+}
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// HYPER_PAGE
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// HYPER_PAGE - Phyical page number for the page to become hyperspace.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_INITIALIZE_HYPERSPACE_MAP(HYPER_PAGE) \
+ { \
+ PMMPTE NextPte; \
+ ULONG LastEntry; \
+ PMMPTE Base; \
+ ULONG i; \
+ KIRQL OldIrql; \
+ Base = MiMapPageInHyperSpace (HYPER_PAGE, &OldIrql); \
+ LastEntry = NUMBER_OF_MAPPING_PTES - MM_COLOR_MASK; \
+ NextPte = (PMMPTE)((PCHAR)Base + BYTE_OFFSET(MmFirstReservedMappingPte));\
+ for (i = 0; i < MM_NUMBER_OF_COLORS; i++ ) { \
+ NextPte->u.Hard.PageFrameNumber = LastEntry; \
+ NextPte += 1; \
+ } \
+ MiUnmapPageInHyperSpace (OldIrql); \
+ }
+
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTEADDRESS)) >> 2) & MM_COLOR_MASK))
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_PTE_PROCESS (
+// IN PCHAR COLOR,
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+
+#define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTE)) >> 2) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_VA_PROCESS (
+// IN PVOID ADDRESS,
+// IN PEPROCESS COLOR
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_GET_NEXT_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the next color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the next of.
+//
+// Return Value:
+//
+// Next color in sequence.
+//
+//--
+
+#define MI_GET_NEXT_COLOR(COLOR) ((COLOR + 1) & MM_COLOR_MASK)
+
+
+//++
+//ULONG
+//MI_GET_PREVIOUS_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the previous color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the previous of.
+//
+// Return Value:
+//
+// Previous color in sequence.
+//
+//--
+
+#define MI_GET_PREVIOUS_COLOR(COLOR) ((COLOR - 1) & MM_COLOR_MASK)
+
+#define MI_GET_COLOR_FROM_SECONDARY(COLOR) ((COLOR) & MM_COLOR_MASK)
+
+//
+// The top bits of the prototype PTE tracks the secondary color,
+// the primary color may NOT match the lower bits of the prototype PTE
+// in the case of fork.
+//
+
+#define MI_GET_SECONDARY_COLOR(PAGE,PFN) \
+ ((((ULONG)(PAGE) & MmSecondaryColorMask)) | (PFN)->u3.e1.PageColor)
+
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_BY_COLOR (
+// OUT ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. It does NOT remove the page
+// from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_ANY_COLOR (
+// OUT ULONG PAGE,
+// IN OUT ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. If not page of the desired
+// color exists, all colored lists are searched for a page.
+// It does NOT remove the page from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate and returns the
+// color of the page located.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
+ { \
+ if (MmTotalPagesForPagingFile == 0) { \
+ PAGE = MM_EMPTY_LIST; \
+ } else { \
+ while (MmModifiedPageListByColor[COLOR].Flink == \
+ MM_EMPTY_LIST) { \
+ COLOR = MI_GET_NEXT_COLOR(COLOR); \
+ } \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink; \
+ } \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_WRITE_COPY (
+// IN OUT PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if the PTE indicates that the
+// page is writable and if so it clears the write bit and
+// sets the copy-on-write bit.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Dirty = MM_PTE_CLEAN; \
+ }
+
+
+//++
+//ULONG
+//MI_DETERMINE_OWNER (
+// IN MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro examines the virtual address of the PTE and determines
+// if the PTE resides in system space or user space.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ ((((PPTE) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) || \
+ ((PPTE) >= MiGetPdeAddress(NULL) && \
+ ((PPTE) <= MiGetPdeAddress(MM_HIGHEST_USER_ADDRESS)))) ? 1 : 0)
+
+
+//++
+//VOID
+//MI_SET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+// not implemented on mips r4000.
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+
+
+//++
+//ULONG
+//MI_GET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro returns the state of the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the ACCESSED field.
+//
+//--
+
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+
+
+
+//++
+//VOID
+//MI_SET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// IN ULONG OWNER
+// );
+//
+// Routine Description:
+//
+// This macro sets the owner field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// not implemented on r4000.
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER)
+
+
+
+//++
+//ULONG
+//MI_GET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro gets the owner field from the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the OWNER field.
+//
+//--
+
+// always kernel mode on r4000.
+#define MI_GET_OWNER_IN_PTE(PPTE) KernelMode
+
+//
+// bit mask to clear out fields in a PTE to or in prototype pte offset.
+//
+
+#define CLEAR_FOR_PROTO_PTE_ADDRESS ((ULONG)0xf)
+
+
+// bit mask to clear out fields in a PTE to or in paging file location.
+
+#define CLEAR_FOR_PAGE_FILE ((ULONG)(0x0F9))
+
+//++
+//VOID
+//MI_SET_PAGING_FILE_INFO (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// FILEINFO - Supplies the number of the paging file.
+//
+// OFFSET - Supplies the offset into the paging file.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) \
+ ((((PTE).u.Long & CLEAR_FOR_PAGE_FILE) | \
+ (((FILEINFO) << 9) | \
+ (OFFSET << 12))))
+
+//++
+//PMMPTE
+//MiPteToProto (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro returns the address of the corresponding prototype which
+// was encoded earlier into the supplied PTE.
+//
+// NOTE THAT AS PROTOPTE CAN ONLY RESIDE IN PAGED POOL!!!!!!
+//
+// MAX SIZE = 2^(2+7+21) = 2^30 = 1GB.
+//
+// NOTE, that the valid bit must be zero!
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Pointer to the prototype PTE that backs this PTE.
+//
+//--
+//
+// MiPteToProtoPte returns the address of the corresponding prototype
+// PTE
+//
+//
+//
+
+#define MiPteToProto(lpte) \
+ ((PMMPTE)((((lpte)->u.Long >> 1) & 0x3FFFFFFC) + \
+ MmProtopte_Base))
+
+
+//++
+//ULONG
+//MiProtoAddressForPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForPte(proto_va) \
+ ((ULONG)((((ULONG)proto_va - MmProtopte_Base) << 1) | MM_PTE_PROTOTYPE_MASK))
+
+
+
+//++
+//ULONG
+//MiProtoAddressForKernelPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// This macro also sets any other information (such as global bits)
+// required for kernel mode PTEs.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForKernelPte(proto_va) \
+ (((ULONG)(proto_va) < (ULONG)KSEG1_BASE) ? \
+ ((ULONG)((((ULONG)proto_va - (ULONG)MmNonPagedPoolStart) << 1) | MM_PTE_PROTOTYPE_MASK | \
+ 0x40000000 | MM_PTE_GLOBAL_MASK)) \
+ : ((ULONG)((((ULONG)proto_va - MmProtopte_Base) << 1) | MM_PTE_PROTOTYPE_MASK | \
+ MM_PTE_GLOBAL_MASK)))
+
+
+
+#define MM_SUBSECTION_MAP (128*1024*1024)
+
+
+//++
+//PSUBSECTION
+//MiGetSubsectionAddress (
+// IN PMMPTE lpte
+// );
+//
+// Routine Description:
+//
+// This macro takes a PTE and returns the address of the subsection that
+// the PTE refers to. Subsections are quadword structures allocated
+// from nonpaged pool.
+//
+// NOTE THIS MACRO LIMITS THE SIZE OF NONPAGED POOL!
+// MAXIMUM NONPAGED POOL = 2^(24+3) = 2^27 = 128mb in KSEG_0 POOL AND
+// 128 MB IN EXPANDED POOL.
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// A pointer to the subsection referred to by the supplied PTE.
+//
+//--
+
+#define MiGetSubsectionAddress(lpte) \
+ (((lpte)->u.Long & 0x1) ? \
+ ((PSUBSECTION)(((((lpte)->u.Long >> 8) << 3) + (ULONG)MmSubsectionBase))) \
+ : ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - ((((lpte)->u.Long) >> 8) << 3))))
+
+
+
+//++
+//ULONG
+//MiGetSubsectionAddressForPte (
+// IN PSUBSECTION VA
+// );
+//
+// Routine Description:
+//
+// This macro takes the address of a subsection and encodes it for use
+// in a PTE.
+//
+// NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
+//
+// Argments
+//
+// VA - Supplies a pointer to the subsection to encode.
+//
+// Return Value:
+//
+// The mask to set into the PTE to make it reference the supplied
+// subsetion.
+//
+//--
+
+#define MiGetSubsectionAddressForPte(VA) \
+ (((ULONG)(VA) < (ULONG)KSEG1_BASE) ? \
+ ((((ULONG)(VA) - (ULONG)MmSubsectionBase) << 5) | 0x1) \
+ : (((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) << 5))
+
+
+
+//++
+//PMMPTE
+//MiGetPdeAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeAddress returns the address of the PDE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PDE for.
+//
+// Return Value:
+//
+// The address of the PDE.
+//
+//--
+
+#define MiGetPdeAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 22) << 2) + PDE_BASE))
+
+
+
+//++
+//PMMPTE
+//MiGetPteAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PTE for.
+//
+// Return Value:
+//
+// The address of the PTE.
+//
+//--
+
+#define MiGetPteAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 12) << 2) + PTE_BASE))
+
+
+//++
+//ULONG
+//MiGetPdeOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page directory table the corresponding PDE is at.
+//
+//--
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+
+
+//++
+//ULONG
+//MiGetPteOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteOffset returns the offset into a page table page
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page table page table the corresponding PTE is at.
+//
+//--
+
+#define MiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
+
+
+//++
+//PMMPTE
+//MiGetProtoPteAddress (
+// IN PMMPTE VAD,
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+// Argments
+//
+// VAD - Supplies a pointer to the virtual address descriptor that contains
+// the VA.
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// A pointer to the proto PTE which corresponds to the VA.
+//
+//--
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+
+
+//++
+//PVOID
+//MiGetVirtualAddressMappedByPte (
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+// Argments
+//
+// PTE - Supplies the PTE to get the virtual address for.
+//
+// Return Value:
+//
+// Virtual address mapped by the PTE.
+//
+//--
+
+#define MiGetVirtualAddressMappedByPte(va) ((PVOID)((ULONG)(va) << 10))
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_NUMBER (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the paging file number from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file number.
+//
+//--
+
+#define GET_PAGING_FILE_NUMBER(PTE) ((((PTE).u.Long) >> 9) & 0x7)
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_OFFSET (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the offset into the paging file from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file offset.
+//
+//--
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+//++
+//ULONG
+//IS_PTE_NOT_DEMAND_ZERO (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if a given PTE is NOT a demand zero PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Returns 0 if the PTE is demand zero, non-zero otherwise.
+//
+//--
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFF107)
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+#define MM_KERNEL_DEMAND_ZERO_PTE ((MM_READWRITE << MM_PROTECT_FIELD_SHIFT) | MM_PTE_GLOBAL_MASK)
+
+#define MM_KERNEL_NOACCESS_PTE ((MM_NOACCESS << MM_PROTECT_FIELD_SHIFT) | MM_PTE_GLOBAL_MASK)
+
+
+
+//++
+//VOID
+//MI_MAKING_VALID_PTE_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make a single valid PTE invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// not implemented on r4000.
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+
+
+//++
+//VOID
+//MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make multiple valid PTEs invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// not implemented on r4000.
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+//
+// Make a writable PTE, writeable-copy PTE. This takes advantage of
+// the fact that the protection field in the PTE (5 bit protection) is
+// set up such that write is a bit.
+//
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Long & 0x20) { \
+ ((PTE).u.Long |= 0x8); \
+ }
+
+
+//++
+//VOID
+//MI_SET_PAGE_DIRTY(
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit (and release page file space).
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
+ if ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+
+
+
+//++
+//VOID
+//MI_NO_FAULT_FOUND(
+// IN TEMP,
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro handles the case when a page fault is taken and no
+// PTE with the valid bit clear is found.
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ } else { \
+ KeFillEntryTb ((PHARDWARE_PTE)PPTE, VA, FALSE); \
+ }
+// KeFillEntryTb((PHARDWARE_PTE)(MiGetPdeAddress(VA)),(PVOID)PPTE,FALSE);
+ //
+ // If the PTE was already valid, assume that the PTE
+ // in the TB is stall and just reload the PTE.
+ //
+
+
+//++
+//ULONG
+//MI_CAPTURE_DIRTY_BIT_TO_PFN (
+// IN PMMPTE PPTE,
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro gets captures the state of the dirty bit to the PFN
+// and frees any associated page file space if the PTE has been
+// modified element.
+//
+// NOTE - THE PFN LOCK MUST BE HELD!
+//
+// Argments
+//
+// PPTE - Supplies the PTE to operate upon.
+//
+// PPFN - Supplies a pointer to the PFN database element that corresponds
+// to the page mapped by the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ if (((PPFN)->u3.e1.Modified == 0) && \
+ ((PPTE)->u.Hard.Dirty == MM_PTE_DIRTY)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+
+
+//++
+//BOOLEAN
+//MI_IS_PHYSICAL_ADDRESS (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro deterines if a give virtual address is really a
+// physical address.
+//
+// Argments
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// FALSE if it is not a physical address, TRUE if it is.
+//
+//--
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ (((ULONG)Va >= KSEG0_BASE) && ((ULONG)Va < KSEG2_BASE))
+
+
+
+
+//++
+//ULONG
+//MI_CONVERT_PHYSICAL_TO_PFN (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
+// to its corresponding physical frame number.
+//
+// Argments
+//
+// VA - Supplies a pointer to the physical address.
+//
+// Return Value:
+//
+// Returns the PFN for the page.
+//
+//--
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) (((ULONG)Va << 3) >> 15)
+
+
+
+
+typedef struct _MMCOLOR_TABLES {
+ ULONG Flink;
+ PVOID Blink;
+} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
+
+typedef struct _MMPRIMARY_COLOR_TABLES {
+ LIST_ENTRY ListHead;
+} MMPRIMARY_COLOR_TABLES, *PMMPRIMARY_COLOR_TABLES;
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+extern PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+extern ULONG MmTotalPagesForPagingFile;
+
+//
+// The hardware PTE is defined in ../inc/mips.h
+//
+
+//
+// Invalid PTEs have the following defintion.
+//
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Global : 1;
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG Protection : 5;
+ ULONG Transition : 1;
+ ULONG PageFileLow : 3;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Global : 1;
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG Protection : 5;
+ ULONG Transition : 1;
+ ULONG PageFrameNumber : 23;
+} MMPTE_TRANSITION;
+
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Global : 1;
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG ProtoAddressLow : 6;
+ ULONG ProtoAddressHigh : 22;
+ ULONG ReadOnly : 1;
+} MMPTE_PROTOTYPE;
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG WhichPool : 1;
+ ULONG Valid : 1;
+ ULONG Prototype : 1;
+ ULONG Protection : 5;
+ ULONG SubsectionAddressLow : 4;
+ ULONG SubsectionAddressHigh : 20;
+} MMPTE_SUBSECTION;
+
+typedef struct _MMPTE_LIST {
+ ULONG filler01 : 1;
+ ULONG Valid : 1;
+ ULONG filler0 : 9;
+ ULONG OneEntry : 1;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+
+// typedef struct _HARDWARE_PTE {
+// ULONG Global : 1;
+// ULONG Valid : 1;
+// ULONG Dirty : 1;
+// ULONG CachePolicy : 3;
+// ULONG PageFrameNumber : 24;
+// ULONG Write : 1;
+// ULONG CopyOnWrite : 1;
+// } HARDWARE_PTE, *PHARDWARE_PTE;
+
+
+//
+// A Page Table Entry on a MIPS R4000 has the following definition.
+//
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ HARDWARE_PTE Hard;
+ HARDWARE_PTE Flush;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_TRANSITION Trans;
+ MMPTE_SUBSECTION Subsect;
+ MMPTE_LIST List;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
diff --git a/private/ntos/mm/mips/setdirty.c b/private/ntos/mm/mips/setdirty.c
new file mode 100644
index 000000000..217311313
--- /dev/null
+++ b/private/ntos/mm/mips/setdirty.c
@@ -0,0 +1,125 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ setdirty.c
+
+Abstract:
+
+ This module contains the setting dirty bit routine for memory management.
+
+ MIPS R3000 specific.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1990.
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+ULONG MmSetDirtyCount; //fixfix - remove, temporary performance measurement
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets dirty in the specified PTE and the modify bit in the
+ correpsonding PFN element. If any page file space is allocated, it
+ is deallocated.
+
+Arguments:
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies a pointer to the corresponding valid PTE.
+
+ PfnHeld - Supplies TRUE if the PFN mutex is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ //
+ // The page is NOT copy on write, update the PTE setting both the
+ // dirty bit and the accessed bit. Note, that as this PTE is in
+ // the TB, the TB must be flushed.
+ //
+
+ MmSetDirtyCount += 1; //fixfix - remove
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ TempPte = *PointerPte;
+ TempPte.u.Hard.Dirty = 1;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ *PointerPte = TempPte;
+
+ //
+ // Check state of PFN mutex and if not held, don't update PFN database.
+ //
+
+
+ if (PfnHeld) {
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ Pfn1->u3.e1.Modified = 1;
+ }
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty bit clear
+ // has been fetched into the TB. If it isn't flushed, another fault
+ // is generated as the dirty bit is not set in the cached TB entry.
+ //
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, FaultingAddress, TRUE);
+
+ return;
+}
diff --git a/private/ntos/mm/mips/sources b/private/ntos/mm/mips/sources
new file mode 100644
index 000000000..5377648ce
--- /dev/null
+++ b/private/ntos/mm/mips/sources
@@ -0,0 +1,5 @@
+MIPS_SOURCES=..\mips\initmips.c \
+ ..\mips\datamips.c \
+ ..\mips\debugsup.c \
+ ..\mips\hypermap.c \
+ ..\mips\setdirty.c
diff --git a/private/ntos/mm/mmfault.c b/private/ntos/mm/mmfault.c
new file mode 100644
index 000000000..4a9fc75d6
--- /dev/null
+++ b/private/ntos/mm/mmfault.c
@@ -0,0 +1,938 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mmfault.c
+
+Abstract:
+
+ This module contains the handlers for access check, page faults
+ and write faults.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#define PROCESS_FOREGROUND_PRIORITY (9)
+
+ULONG MmDelayPageFaults;
+
+#if DBG
+ULONG MmProtoPteVadLookups = 0;
+ULONG MmProtoPteDirect = 0;
+ULONG MmAutoEvaluate = 0;
+#endif //DBG
+
+#if DBG
+PMMPTE MmPteHit = NULL;
+#endif
+
+#if DBG
+ULONG MmLargePageFaultError;
+#endif
+
+#if DBG
+extern ULONG MmPagingFileDebug[8192];
+#endif
+
+
+NTSTATUS
+MmAccessFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID VirtualAddress,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called by the kernel on data or instruction
+ access faults. The access fault was detected due to either
+ an access violation, a PTE with the present bit clear, or a
+ valid PTE with the dirty bit clear and a write operation.
+
+ Also note that the access violation and the page fault could
+ occur because of the Page Directory Entry contents as well.
+
+ This routine determines what type of fault it is and calls
+ the appropriate routine to handle the page fault or the write
+ fault.
+
+Arguments:
+
+ StoreInstruction - Supplies TRUE (1) if the operation causes a write into
+ memory. Note this value must be 1 or 0.
+
+ VirtualAddress - Supplies the virtual address which caused the
+ fault.
+
+ PreviousMode - Supplies the mode (kernel or user) in which the fault
+ occurred.
+
+Return Value:
+
+ Returns the status of the fault handling operation. Can be one of:
+ - Success.
+ - Access Violation.
+ - Guard Page Violation.
+ - In-page Error.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE PointerProtoPte = NULL;
+ ULONG ProtectionCode;
+ MMPTE TempPte;
+ PEPROCESS CurrentProcess;
+ KIRQL PreviousIrql;
+ NTSTATUS status;
+ ULONG ProtectCode;
+ ULONG PageFrameIndex;
+ ULONG WorkingSetIndex;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+
+
+ //
+ // Block APC's and acquire the working set mutex. This prevents any
+ // changes to the address space and it prevents valid PTEs from becoming
+ // invalid.
+ //
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_FAULTS) {
+
+ PETHREAD CurThread;
+
+ CurThread = PsGetCurrentThread();
+ DbgPrint("MM:**access fault - va %lx process %lx thread %lx\n",
+ VirtualAddress, CurrentProcess, CurThread);
+ }
+#endif //DBG
+
+ PreviousIrql = KeGetCurrentIrql ();
+
+ //
+ // Get the pointer to the PDE and the PTE for this page.
+ //
+
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ PointerPde = MiGetPdeAddress (VirtualAddress);
+
+#if DBG
+ if (PointerPte == MmPteHit) {
+ DbgPrint("MM:pte hit at %lx\n",MmPteHit);
+ DbgBreakPoint();
+ }
+#endif
+
+ if ( PreviousIrql > APC_LEVEL ) {
+
+ //
+ // The PFN datbase lock is an executive spin-lock. The pager could
+ // get dirty faults or lock faults while servicing it owns the
+ // PFN database lock.
+ //
+
+ MiCheckPdeForPagedPool (VirtualAddress);
+
+#ifdef _X86_
+ if (PointerPde->u.Hard.Valid == 1) {
+ if (PointerPde->u.Hard.LargePage == 1) {
+#if DBG
+ if (MmLargePageFaultError < 10) {
+ DbgPrint ("MM - fault on Large page %lx\n",VirtualAddress);
+ }
+ MmLargePageFaultError += 1;
+#endif //DBG
+ return STATUS_SUCCESS;
+ }
+ }
+#endif //X86
+
+ if ((PointerPde->u.Hard.Valid == 0) || (PointerPte->u.Hard.Valid == 0)) {
+ KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %lx, IRQL %lx\n",VirtualAddress,
+ PreviousIrql));
+
+ //
+ // use reserved bit to signal fatal error to trap handlers
+ //
+
+ return STATUS_IN_PAGE_ERROR | 0x10000000;
+
+ }
+
+ if (StoreInstruction && (PointerPte->u.Hard.CopyOnWrite != 0)) {
+ KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %lx, IRQL %lx\n",VirtualAddress,
+ PreviousIrql));
+
+ //
+ // use reserved bit to signal fatal error to trap handlers
+ //
+
+ return STATUS_IN_PAGE_ERROR | 0x10000000;
+
+ } else {
+
+ //
+ // The PTE is valid and accessable, another thread must
+ // have faulted the PTE in already, or the access bit
+ // is clear and this is a access fault; Blindly set the
+ // access bit and dismiss the fault.
+ //
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_FAULTS) {
+ DbgPrint("MM:no fault found - pte is %lx\n", PointerPte->u.Long);
+ }
+#endif //DBG
+
+ MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, FALSE);
+ return STATUS_SUCCESS;
+ }
+ }
+
+ if (VirtualAddress >= (PVOID)MM_SYSTEM_RANGE_START) {
+
+ //
+ // This is a fault in the system address space. User
+ // mode access is not allowed.
+ //
+
+#if defined(_X86_) || defined(_ALPHA_) || defined(_PPC_)
+ if (PreviousMode == UserMode) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+#endif // _X86_ || _ALPHA_ || _PPC_
+
+RecheckPde:
+
+ if (PointerPde->u.Hard.Valid == 1) {
+#ifdef _X86_
+ if (PointerPde->u.Hard.LargePage == 1) {
+#if DBG
+ if (MmLargePageFaultError < 10) {
+ DbgPrint ("MM - fault on Large page %lx\n",VirtualAddress);
+ }
+ MmLargePageFaultError += 1;
+#endif //DBG
+ return STATUS_SUCCESS;
+ }
+#endif //X86
+
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ // Acquire the PFN lock, check to see if the address is still
+ // valid if writable, update dirty bit.
+ //
+
+ LOCK_PFN (OldIrql);
+ TempPte = *(volatile MMPTE *)PointerPte;
+ if (TempPte.u.Hard.Valid == 1) {
+ MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE);
+ }
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+ } else {
+
+ //
+ // Due to G-bits in kernel mode code, accesses to paged pool
+ // PDEs may not fault even though the PDE is not valid. Make
+ // sure the PDE is valid so PteFrames in the PFN database are
+ // tracked properly.
+ //
+
+ MiCheckPdeForPagedPool (VirtualAddress);
+
+ if (PointerPde->u.Hard.Valid == 0) {
+ KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
+ (ULONG)VirtualAddress,
+ StoreInstruction,
+ PreviousMode,
+ 2);
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // Now that the PDE is valid, go look at the PTE again.
+ //
+
+ goto RecheckPde;
+ }
+
+ if ((VirtualAddress < (PVOID)PTE_BASE) ||
+ (VirtualAddress > (PVOID)HYPER_SPACE_END)) {
+
+ //
+ // Acquire system working set lock. While this lock
+ // is held, no pages may go from valid to invalid.
+ //
+ // HOWEVER - transition pages may go to valid, but
+ // may not be added to the working set list. This
+ // is done in the cache manager support routines to
+ // shortcut faults on transition prototype PTEs.
+ //
+
+ if (PsGetCurrentThread() == MmSystemLockOwner) {
+
+ //
+ // Recursively trying to acquire the system working set
+ // fast mutex - cause an IRQL > 1 bug check.
+ //
+
+ return STATUS_IN_PAGE_ERROR | 0x10000000;
+ }
+
+ LOCK_SYSTEM_WS (PreviousIrql);
+
+ TempPte = *PointerPte;
+
+#ifdef MIPS
+ ASSERT ((TempPte.u.Hard.Global == 1) &&
+ (PointerPde->u.Hard.Global == 1));
+#endif //MIPS
+
+ if (TempPte.u.Hard.Valid != 0) {
+
+ //
+ // PTE is already valid, return.
+ //
+
+ LOCK_PFN (OldIrql);
+ TempPte = *(volatile MMPTE *)PointerPte;
+ if (TempPte.u.Hard.Valid == 1) {
+ MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE);
+ }
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_SYSTEM_WS (PreviousIrql);
+ return STATUS_SUCCESS;
+
+ } else if (TempPte.u.Soft.Prototype != 0) {
+
+ //
+ // This is a PTE in prototype format, locate the coresponding
+ // prototype PTE.
+ //
+
+ PointerProtoPte = MiPteToProto (&TempPte);
+ } else if ((TempPte.u.Soft.Transition == 0) &&
+ (TempPte.u.Soft.Protection == 0)) {
+
+ //
+ // Page file format. If the protection is ZERO, this
+ // is a page of free system PTEs - bugcheck!
+ //
+
+ KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
+ (ULONG)VirtualAddress,
+ StoreInstruction,
+ PreviousMode,
+ 0);
+ return STATUS_SUCCESS;
+ }
+//fixfix remove this - also see procsup.c / mminpagekernelstack.
+ else {
+ if (TempPte.u.Soft.Protection == 31) {
+ KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA,
+ (ULONG)VirtualAddress,
+ StoreInstruction,
+ PreviousMode,
+ 0);
+
+ }
+ }
+//end of fixfix
+ status = MiDispatchFault (StoreInstruction,
+ VirtualAddress,
+ PointerPte,
+ PointerProtoPte,
+ NULL);
+
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+ PageFrameIndex = MmSystemCacheWs.PageFaultCount;
+
+ if (MmSystemCacheWs.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) {
+ MiGrowWsleHash (&MmSystemCacheWs, TRUE);
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ MmSystemCacheWs.AllowWorkingSetAdjustment = TRUE;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+ }
+ UNLOCK_SYSTEM_WS (PreviousIrql);
+
+ if ((PageFrameIndex & 0x3FFFF) == 0x30000) {
+
+ //
+ // The system cache is taking too many faults, delay
+ // execution so modified page writer gets a quick shot and
+ // increase the working set size.
+ //
+
+ KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+ }
+ return status;
+ } else {
+ if (MiCheckPdeForPagedPool (VirtualAddress) == STATUS_WAIT_1) {
+ return STATUS_SUCCESS;
+ }
+ }
+ }
+
+ if (MmDelayPageFaults ||
+ ((MmModifiedPageListHead.Total >= (MmModifiedPageMaximum + 100)) &&
+ (MmAvailablePages < (1024*1024 / PAGE_SIZE)) &&
+ (CurrentProcess->ModifiedPageCount > ((64*1024)/PAGE_SIZE)))) {
+
+ //
+ // This process has placed more than 64k worth of pages on the modified
+ // list. Delay for a short period and set the count to zero.
+ //
+
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ (CurrentProcess->Pcb.BasePriority < PROCESS_FOREGROUND_PRIORITY) ?
+ &MmHalfSecond : &Mm30Milliseconds);
+ CurrentProcess->ModifiedPageCount = 0;
+ }
+
+ //
+ // FAULT IN USER SPACE OR PAGE TABLE PAGES.
+ //
+
+ //
+ // Block APC's and acquire the working set lock.
+ //
+
+ KeRaiseIrql (APC_LEVEL, &PreviousIrql);
+
+
+ LOCK_WS (CurrentProcess);
+
+ //
+ // Locate the Page Directory Entry which maps this virtual
+ // address and check for accessability and validity.
+ //
+
+ //
+ // Check to see if the page table page (PDE entry) is valid.
+ // If not, the page table page must be made valid first.
+ //
+
+ if (PointerPde->u.Hard.Valid == 0) {
+
+ //
+ // If the PDE is zero, check to see if there is virtual address
+ // mapped at this location, and if so create the necessary
+ // structures to map it.
+ //
+
+ if ((PointerPde->u.Long == MM_ZERO_PTE) ||
+ (PointerPde->u.Long == MM_ZERO_KERNEL_PTE)) {
+ PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
+ &ProtectCode);
+
+#ifdef LARGE_PAGES
+ if (ProtectCode == MM_LARGE_PAGES) {
+ status = STATUS_SUCCESS;
+ goto ReturnStatus2;
+ }
+#endif //LARGE_PAGES
+
+ if (ProtectCode == MM_NOACCESS) {
+ status = STATUS_ACCESS_VIOLATION;
+ MiCheckPdeForPagedPool (VirtualAddress);
+ if (PointerPde->u.Hard.Valid == 1) {
+ status = STATUS_SUCCESS;
+ }
+
+#if DBG
+ if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
+ (status == STATUS_ACCESS_VIOLATION)) {
+ DbgPrint("MM:access violation - %lx\n",VirtualAddress);
+ MiFormatPte(PointerPte);
+ DbgBreakPoint();
+ }
+#endif //DEBUG
+
+ goto ReturnStatus2;
+
+ } else {
+
+ //
+ // Build a demand zero PDE and operate on it.
+ //
+
+ *PointerPde = DemandZeroPde;
+ }
+ }
+
+ //
+ // The PDE is not valid, call the page fault routine passing
+ // in the address of the PDE. If the PDE is valid, determine
+ // the status of the corresponding PTE.
+ //
+
+ status = MiDispatchFault (TRUE, //page table page always written
+ PointerPte, //Virtual address
+ PointerPde, // PTE (PDE in this case)
+ NULL,
+ CurrentProcess);
+
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+ if (PointerPde->u.Hard.Valid == 0) {
+
+ //
+ // The PDE is not valid, return the status.
+ //
+ goto ReturnStatus1;
+ }
+
+ //KeFillEntryTb ((PHARDWARE_PTE)PointerPde, (PVOID)PointerPte, TRUE);
+
+ MI_SET_PAGE_DIRTY (PointerPde, PointerPte, FALSE);
+
+ //
+ // Now that the PDE is accessable, get the PTE - let this fall
+ // through.
+ //
+ }
+
+ //
+ // The PDE is valid and accessable, get the PTE contents.
+ //
+
+ TempPte = *PointerPte;
+ if (TempPte.u.Hard.Valid != 0) {
+
+ //
+ // The PTE is valid and accessable, is this a write fault
+ // copy on write or setting of some dirty bit?
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ MiFormatPte(PointerPte);
+ }
+#endif //DBG
+
+ status = STATUS_SUCCESS;
+
+ if (StoreInstruction) {
+
+ //
+ // This was a write operation. If the copy on write
+ // bit is set in the PTE perform the copy on write,
+ // else check to ensure write access to the PTE.
+ //
+
+ if (TempPte.u.Hard.CopyOnWrite != 0) {
+ MiCopyOnWrite (VirtualAddress, PointerPte);
+ status = STATUS_PAGE_FAULT_COPY_ON_WRITE;
+ goto ReturnStatus2;
+
+ } else {
+ if (TempPte.u.Hard.Write == 0) {
+ status = STATUS_ACCESS_VIOLATION;
+ }
+ }
+#if DBG
+ } else {
+
+ //
+ // The PTE is valid and accessable, another thread must
+ // have faulted the PTE in already, or the access bit
+ // is clear and this is a access fault; Blindly set the
+ // access bit and dismiss the fault.
+ //
+
+ if (MmDebug & MM_DBG_SHOW_FAULTS) {
+ DbgPrint("MM:no fault found - pte is %lx\n", PointerPte->u.Long);
+ }
+#endif //DBG
+ }
+
+ if (status == STATUS_SUCCESS) {
+ LOCK_PFN (OldIrql);
+ if (PointerPte->u.Hard.Valid != 0) {
+ MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE);
+ }
+ UNLOCK_PFN (OldIrql);
+ }
+
+ goto ReturnStatus2;
+ }
+
+ //
+ // If the PTE is zero, check to see if there is virtual address
+ // mapped at this location, and if so create the necessary
+ // structures to map it.
+ //
+
+ //
+ // Check explicitly for demand zero pages.
+ //
+
+ if (TempPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE) {
+ MiResolveDemandZeroFault (VirtualAddress,
+ PointerPte,
+ CurrentProcess,
+ 0);
+
+ status = STATUS_PAGE_FAULT_DEMAND_ZERO;
+ goto ReturnStatus1;
+ }
+
+ if ((TempPte.u.Long == MM_ZERO_PTE) ||
+ (TempPte.u.Long == MM_ZERO_KERNEL_PTE)) {
+
+ //
+ // PTE is needs to be evaluated with respect its virtual
+ // address descriptor (VAD). At this point there are 3
+ // possiblities, bogus address, demand zero, or refers to
+ // a prototype PTE.
+ //
+
+ PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
+ &ProtectionCode);
+ if (ProtectionCode == MM_NOACCESS) {
+ status = STATUS_ACCESS_VIOLATION;
+
+ //
+ // Check to make sure this is not a page table page for
+ // paged pool which needs extending.
+ //
+
+ MiCheckPdeForPagedPool (VirtualAddress);
+ if (PointerPte->u.Hard.Valid == 1) {
+ status = STATUS_SUCCESS;
+ }
+
+#if DBG
+ if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) &&
+ (status == STATUS_ACCESS_VIOLATION)) {
+ DbgPrint("MM:access vio - %lx\n",VirtualAddress);
+ MiFormatPte(PointerPte);
+ DbgBreakPoint();
+ }
+#endif //DEBUG
+ goto ReturnStatus2;
+ }
+
+ //
+ // Increment the count of non-zero page table entires for this
+ // page table.
+ //
+
+ if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPdeOffset(VirtualAddress)] += 1;
+ }
+
+ //
+ // Is this page a guard page?
+ //
+
+ if (ProtectionCode & MM_GUARD_PAGE) {
+
+ //
+ // This is a guard page exception.
+ //
+
+ PointerPte->u.Soft.Protection = ProtectionCode & ~MM_GUARD_PAGE;
+
+ if (PointerProtoPte != NULL) {
+
+ //
+ // This is a prototype PTE, build the PTE to not
+ // be a guard page.
+ //
+
+ PointerPte->u.Soft.PageFileHigh = 0xFFFFF;
+ PointerPte->u.Soft.Prototype = 1;
+ }
+
+ UNLOCK_WS (CurrentProcess);
+ KeLowerIrql (PreviousIrql);
+ return MiCheckForUserStackOverflow (VirtualAddress);
+ }
+
+ if (PointerProtoPte == NULL) {
+
+ //ASSERT (KeReadStateMutant (&CurrentProcess->WorkingSetLock) == 0);
+
+ //
+ // Assert that this is not for a PDE.
+ //
+
+ if (PointerPde == MiGetPdeAddress(PTE_BASE)) {
+
+ //
+ // This PTE is really a PDE, set contents as such.
+ //
+
+ *PointerPte = DemandZeroPde;
+ } else {
+ PointerPte->u.Soft.Protection = ProtectionCode;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // If a fork operation is in progress and the faulting thread
+ // is not the thread performning the fork operation, block until
+ // the fork is completed.
+ //
+
+ if ((CurrentProcess->ForkInProgress != NULL) &&
+ (CurrentProcess->ForkInProgress != PsGetCurrentThread())) {
+ MiWaitForForkToComplete (CurrentProcess);
+ status = STATUS_SUCCESS;
+ UNLOCK_PFN (OldIrql);
+ goto ReturnStatus1;
+ }
+
+ if (!MiEnsureAvailablePageOrWait (CurrentProcess,
+ VirtualAddress)) {
+
+ ULONG Color;
+ Color = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
+ &CurrentProcess->NextPageColor);
+ PageFrameIndex = MiRemoveZeroPageIfAny (Color);
+ if (PageFrameIndex == 0) {
+ PageFrameIndex = MiRemoveAnyPage (Color);
+ UNLOCK_PFN (OldIrql);
+ MiZeroPhysicalPage (PageFrameIndex, Color);
+ LOCK_PFN (OldIrql);
+ }
+
+ CurrentProcess->NumberOfPrivatePages += 1;
+ MmInfoCounters.DemandZeroCount += 1;
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // As this page is demand zero, set the modified bit in the
+ // PFN database element and set the dirty bit in the PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ PointerPte->u.Soft.Protection,
+ PointerPte);
+
+ if (TempPte.u.Hard.Write != 0) {
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+
+ *PointerPte = TempPte;
+
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ WorkingSetIndex = MiLocateAndReserveWsle (&CurrentProcess->Vm);
+ MiUpdateWsle (&WorkingSetIndex,
+ VirtualAddress,
+ MmWorkingSetList,
+ Pfn1);
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte,
+ VirtualAddress,
+ FALSE);
+ } else {
+ UNLOCK_PFN (OldIrql);
+ }
+
+ status = STATUS_PAGE_FAULT_DEMAND_ZERO;
+ goto ReturnStatus1;
+
+ } else {
+
+ //
+ // This is a prototype PTE.
+ //
+
+ if (ProtectionCode == MM_UNKNOWN_PROTECTION) {
+
+ //
+ // The protection field is stored in the prototype PTE.
+ //
+
+ PointerPte->u.Long = MiProtoAddressForPte (PointerProtoPte);
+
+ } else {
+
+ *PointerPte = PrototypePte;
+ PointerPte->u.Soft.Protection = ProtectionCode;
+ }
+ TempPte = *PointerPte;
+ }
+
+ } else {
+
+ //
+ // The PTE is non-zero and not valid, see if it is a prototype PTE.
+ //
+
+ ProtectionCode = TempPte.u.Soft.Protection;
+
+ if (TempPte.u.Soft.Prototype != 0) {
+ if (TempPte.u.Soft.PageFileHigh == 0xFFFFF) {
+#if DBG
+ MmProtoPteVadLookups += 1;
+#endif //DBG
+ PointerProtoPte = MiCheckVirtualAddress (VirtualAddress,
+ &ProtectCode);
+
+ } else {
+#if DBG
+ MmProtoPteDirect += 1;
+#endif //DBG
+
+ //
+ // Protection is in the prototype PTE, indicate an
+ // access check should not be performed on the current PTE.
+ //
+
+ PointerProtoPte = MiPteToProto (&TempPte);
+ ProtectionCode = MM_UNKNOWN_PROTECTION;
+
+ //
+ // Check to see if the proto protection has been overriden.
+ //
+
+ if (TempPte.u.Proto.ReadOnly != 0) {
+ ProtectionCode = MM_READONLY;
+ }
+ }
+ }
+ }
+
+ if (ProtectionCode != MM_UNKNOWN_PROTECTION) {
+ status = MiAccessCheck (PointerPte,
+ StoreInstruction,
+ PreviousMode,
+ ProtectionCode );
+
+ if (status != STATUS_SUCCESS) {
+#if DBG
+ if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && (status == STATUS_ACCESS_VIOLATION)) {
+ DbgPrint("MM:access violate - %lx\n",VirtualAddress);
+ MiFormatPte(PointerPte);
+ DbgBreakPoint();
+ }
+#endif //DEBUG
+
+ UNLOCK_WS (CurrentProcess);
+ KeLowerIrql (PreviousIrql);
+
+ //
+ // Check to see if this is a guard page violation
+ // and if so, should the user's stack be extended.
+ //
+
+ if (status == STATUS_GUARD_PAGE_VIOLATION) {
+ return MiCheckForUserStackOverflow (VirtualAddress);
+ }
+
+ return status;
+ }
+ }
+
+ //
+ // This is a page fault, invoke the page fault handler.
+ //
+
+ if (PointerProtoPte != NULL) {
+
+ //
+ // Lock page containing prototype PTEs in memory by
+ // incrementing the reference count for the page.
+ //
+
+
+ if (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte)) {
+ PointerPde = MiGetPteAddress (PointerProtoPte);
+ LOCK_PFN (OldIrql);
+ if (PointerPde->u.Hard.Valid == 0) {
+ MiMakeSystemAddressValidPfn (PointerProtoPte);
+ }
+ Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
+ Pfn1->u3.e2.ReferenceCount += 1;
+ ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
+ UNLOCK_PFN (OldIrql);
+ }
+ }
+ status = MiDispatchFault (StoreInstruction,
+ VirtualAddress,
+ PointerPte,
+ PointerProtoPte,
+ CurrentProcess);
+
+ if (PointerProtoPte != NULL) {
+
+ //
+ // Unlock page containing prototype PTEs.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte)) {
+ LOCK_PFN (OldIrql);
+ ASSERT (Pfn1->u3.e2.ReferenceCount > 1);
+ Pfn1->u3.e2.ReferenceCount -= 1;
+ UNLOCK_PFN (OldIrql);
+ }
+ }
+
+ReturnStatus1:
+
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+ if (CurrentProcess->Vm.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) {
+ MiGrowWsleHash (&CurrentProcess->Vm, FALSE);
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ CurrentProcess->Vm.AllowWorkingSetAdjustment = TRUE;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+ }
+
+ReturnStatus2:
+
+ PageFrameIndex = CurrentProcess->Vm.PageFaultCount;
+
+ UNLOCK_WS (CurrentProcess);
+ KeLowerIrql (PreviousIrql);
+
+ if ((PageFrameIndex & 0x3FFFF) == 0x30000) {
+ if (PsGetCurrentThread()->Tcb.Priority >= LOW_REALTIME_PRIORITY) {
+
+ //
+ // This thread is realtime and taking many faults, delay
+ // execution so modified page writer gets a quick shot and
+ // increase the working set size.
+ //
+
+ KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+ MmAdjustWorkingSetSize (
+ (CurrentProcess->Vm.MinimumWorkingSetSize + 10) << PAGE_SHIFT,
+ (CurrentProcess->Vm.MaximumWorkingSetSize + 10) << PAGE_SHIFT,
+ FALSE);
+ }
+ }
+
+ return status;
+}
diff --git a/private/ntos/mm/mminit.c b/private/ntos/mm/mminit.c
new file mode 100644
index 000000000..3702ec37a
--- /dev/null
+++ b/private/ntos/mm/mminit.c
@@ -0,0 +1,1980 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mminit.c
+
+Abstract:
+
+ This module contains the initialization for the memory management
+ system.
+
+Author:
+
+ Lou Perazzoli (loup) 20-Mar-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+MMPTE MmSharedUserDataPte;
+
+extern ULONG MmPagedPoolCommit;
+
+extern ULONG MmHeapSegmentReserve;
+extern ULONG MmHeapSegmentCommit;
+extern ULONG MmHeapDeCommitTotalFreeThreshold;
+extern ULONG MmHeapDeCommitFreeBlockThreshold;
+extern MMINPAGE_SUPPORT_LIST MmInPageSupportList;
+extern MMEVENT_COUNT_LIST MmEventCountList;
+extern KMUTANT MmSystemLoadLock;
+extern ULONG MmSystemPtesStart[MaximumPtePoolTypes];
+
+ULONG MmSubsectionBase;
+ULONG MmSubsectionTopPage;
+ULONG MmDataClusterSize;
+ULONG MmCodeClusterSize;
+ULONG MmResidentAvailableAtInit;
+KEVENT MmImageMappingPteEvent;
+PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock;
+
+#if DBG
+
+PRTL_EVENT_ID_INFO MiAllocVmEventId;
+PRTL_EVENT_ID_INFO MiFreeVmEventId;
+
+#endif // DBG
+
+VOID
+MiEnablePagingTheExecutive(
+ VOID
+ );
+
+VOID
+MiEnablePagingOfDriverAtInit (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte
+ );
+
+VOID
+MiBuildPagedPool (
+ );
+
+VOID
+MiMergeMemoryLimit (
+ IN OUT PPHYSICAL_MEMORY_DESCRIPTOR Memory,
+ IN ULONG StartPage,
+ IN ULONG NoPages
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MmInitSystem)
+#pragma alloc_text(INIT,MmInitializeMemoryLimits)
+#pragma alloc_text(INIT,MiMergeMemoryLimit)
+#pragma alloc_text(INIT,MmFreeLoaderBlock)
+#pragma alloc_text(INIT,MiBuildPagedPool)
+#pragma alloc_text(INIT,MiFindInitializationCode)
+#pragma alloc_text(INIT,MiEnablePagingTheExecutive)
+#pragma alloc_text(INIT,MiEnablePagingOfDriverAtInit)
+#pragma alloc_text(PAGELK,MiFreeInitializationCode)
+#endif
+
+#define MM_MAX_LOADER_BLOCKS 20
+
+//
+// The following constants are base on the number PAGES not the
+// memory size. For convience the number of pages is calculated
+// based on a 4k page size. Hence 12mb with 4k page is 3072.
+//
+
+#define MM_SMALL_SYSTEM ((13*1024*1024) / 4096)
+
+#define MM_MEDIUM_SYSTEM ((19*1024*1024) / 4096)
+
+#define MM_MIN_INITIAL_PAGED_POOL ((32*1024*1024) >> PAGE_SHIFT)
+
+#define MM_DEFAULT_IO_LOCK_LIMIT (512 * 1024)
+
+extern ULONG MmMaximumWorkingSetSize;
+
+ULONG MmSystemPageDirectory;
+
+PMMPTE MmSystemPagePtes;
+
+ULONG MmTotalSystemCodePages;
+
+MM_SYSTEMSIZE MmSystemSize;
+
+ULONG MmLargeSystemCache;
+
+ULONG MmProductType;
+
+LIST_ENTRY MmLoadedUserImageList;
+
+BOOLEAN
+MmInitSystem (
+ IN ULONG Phase,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock,
+ IN PPHYSICAL_MEMORY_DESCRIPTOR PhysicalMemoryBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called during Phase 0, phase 1 and at the end
+ of phase 1 ("phase 2") initialization.
+
+ Phase 0 initializes the memory management paging functions,
+ nonpaged and paged pool, the PFN database, etc.
+
+ Phase 1 initializes the section objects, the physical memory
+ object, and starts the memory management system threads.
+
+ Phase 2 frees memory used by the OsLoader.
+
+Arguments:
+
+ Phase - System initialization phase.
+
+ LoadBlock - Supplies a pointer the ssystem loader block.
+
+Return Value:
+
+ Returns TRUE if the initialization was successful.
+
+Environment:
+
+ Kernel Mode Only. System initialization.
+
+--*/
+
+{
+ HANDLE ThreadHandle;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE StartPde;
+ PMMPTE StartingPte;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ ULONG i, j;
+ ULONG PageFrameIndex;
+ MMPTE TempPte;
+ KIRQL OldIrql;
+
+ BOOLEAN IncludeType[LoaderMaximum];
+ ULONG MemoryAlloc[(sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
+ sizeof(PHYSICAL_MEMORY_RUN)*MAX_PHYSICAL_MEMORY_FRAGMENTS) /
+ sizeof(ULONG)];
+ PPHYSICAL_MEMORY_DESCRIPTOR Memory;
+
+ //
+ // Make sure structure alignment is okay.
+ //
+
+ if (Phase == 0) {
+ MmThrottleTop = 450;
+ MmThrottleBottom = 127;
+
+#if DBG
+
+ //
+ // A few sanity checks to ensure things are as they should be.
+ //
+
+ if (sizeof(MMPFN) != 24) {
+ DbgPrint("pfn element size is not 24\n");
+ }
+
+ if ((sizeof(MMWSL) % 8) != 0) {
+ DbgPrint("working set list is not a quadword sized structure\n");
+ }
+
+ if ((sizeof(CONTROL_AREA) % 8) != 0) {
+ DbgPrint("control area list is not a quadword sized structure\n");
+ }
+
+ if ((sizeof(SUBSECTION) % 8) != 0) {
+ DbgPrint("subsection list is not a quadword sized structure\n");
+ }
+
+ //
+ // Some checks to make sure prototype PTEs can be placed in
+ // either paged or nonpaged (prototype PTEs for paged pool are here)
+ // can be put into pte format.
+ //
+
+ PointerPte = (PMMPTE)MmPagedPoolStart;
+ i = MiProtoAddressForPte (PointerPte);
+ TempPte.u.Long = i;
+ PointerPde = MiPteToProto(&TempPte);
+ if (PointerPte != PointerPde) {
+ DbgPrint("unable to map start of paged pool as prototype pte %lx %lx\n",
+ PointerPde, PointerPte);
+ }
+
+ PointerPte =
+ (PMMPTE)((ULONG)MM_NONPAGED_POOL_END & ~((1 << PTE_SHIFT) - 1));
+ i = MiProtoAddressForPte (PointerPte);
+ TempPte.u.Long = i;
+ PointerPde = MiPteToProto(&TempPte);
+ if (PointerPte != PointerPde) {
+ DbgPrint("unable to map end of nonpaged pool as prototype pte %lx %lx\n",
+ PointerPde, PointerPte);
+ }
+
+ PointerPte = (PMMPTE)(((ULONG)NON_PAGED_SYSTEM_END - 0x37000 + PAGE_SIZE-1) & ~(PAGE_SIZE-1));
+
+ for (j = 0; j < 20; j++) {
+ i = MiProtoAddressForPte (PointerPte);
+ TempPte.u.Long = i;
+ PointerPde = MiPteToProto(&TempPte);
+ if (PointerPte != PointerPde) {
+ DbgPrint("unable to map end of nonpaged pool as prototype pte %lx %lx\n",
+ PointerPde, PointerPte);
+ }
+ PointerPte++;
+
+ }
+
+ PointerPte = (PMMPTE)(((ULONG)MM_NONPAGED_POOL_END - 0x133448) & ~7);
+ i = MiGetSubsectionAddressForPte (PointerPte);
+ TempPte.u.Long = i;
+ PointerPde = (PMMPTE)MiGetSubsectionAddress(&TempPte);
+ if (PointerPte != PointerPde) {
+ DbgPrint("unable to map end of nonpaged pool as section pte %lx %lx\n",
+ PointerPde, PointerPte);
+
+ MiFormatPte(&TempPte);
+ }
+
+ //
+ // End of sanity checks.
+ //
+#endif //dbg
+
+ InitializeListHead( &MmLoadedUserImageList );
+
+ MmCriticalSectionTimeout.QuadPart = Int32x32To64(
+ MmCritsectTimeoutSeconds,
+ -10000000);
+
+
+ //
+ // Initialize PFN database mutex and System Address Space creation
+ // mutex.
+ //
+
+ MmNumberOfColors = MM_MAXIMUM_NUMBER_OF_COLORS;
+
+
+ ExInitializeFastMutex (&MmSectionCommitMutex);
+ ExInitializeFastMutex (&MmSectionBasedMutex);
+
+ KeInitializeMutant (&MmSystemLoadLock, FALSE);
+
+ KeInitializeEvent (&MmAvailablePagesEvent, NotificationEvent, TRUE);
+ KeInitializeEvent (&MmAvailablePagesEventHigh, NotificationEvent, TRUE);
+ KeInitializeEvent (&MmMappedFileIoComplete, NotificationEvent, FALSE);
+ KeInitializeEvent (&MmImageMappingPteEvent, NotificationEvent, FALSE);
+ KeInitializeEvent (&MmZeroingPageEvent, SynchronizationEvent, FALSE);
+ KeInitializeEvent (&MmCollidedFlushEvent, NotificationEvent, FALSE);
+ KeInitializeEvent (&MmCollidedLockEvent, NotificationEvent, FALSE);
+
+ InitializeListHead (&MmWorkingSetExpansionHead.ListHead);
+ InitializeListHead (&MmInPageSupportList.ListHead);
+ InitializeListHead (&MmEventCountList.ListHead);
+
+ MmZeroingPageThreadActive = FALSE;
+
+ //
+ // Compute pyhiscal memory block a yet again
+ //
+
+ Memory = (PPHYSICAL_MEMORY_DESCRIPTOR)&MemoryAlloc;
+ Memory->NumberOfRuns = MAX_PHYSICAL_MEMORY_FRAGMENTS;
+
+ // include all memory types ...
+ for (i=0; i < LoaderMaximum; i++) {
+ IncludeType[i] = TRUE;
+ }
+
+ // ... expect these..
+ IncludeType[LoaderBad] = FALSE;
+ IncludeType[LoaderFirmwarePermanent] = FALSE;
+ IncludeType[LoaderSpecialMemory] = FALSE;
+
+ MmInitializeMemoryLimits(LoaderBlock, IncludeType, Memory);
+
+ //
+ // Add all memory runs in PhysicalMemoryBlock to Memory
+ //
+
+ for (i=0; i < PhysicalMemoryBlock->NumberOfRuns; i++) {
+ MiMergeMemoryLimit (
+ Memory,
+ PhysicalMemoryBlock->Run[i].BasePage,
+ PhysicalMemoryBlock->Run[i].PageCount
+ );
+ }
+#ifdef MIPS
+
+ //
+ // On mips machines these first two pages of physical memory are
+ // used for important stuff.
+ //
+
+ Memory->Run[Memory->NumberOfRuns].BasePage = 0;
+ Memory->Run[Memory->NumberOfRuns].PageCount = 2;
+ Memory->NumberOfRuns += 1;
+ Memory->NumberOfPages += 2;
+#endif //MIPS
+ //
+ // Sort and merge adjacent runs
+ //
+
+ for (i=0; i < Memory->NumberOfRuns; i++) {
+ for (j=i+1; j < Memory->NumberOfRuns; j++) {
+ if (Memory->Run[j].BasePage < Memory->Run[i].BasePage) {
+ // swap runs
+ PhysicalMemoryBlock->Run[0] = Memory->Run[j];
+ Memory->Run[j] = Memory->Run[i];
+ Memory->Run[i] = PhysicalMemoryBlock->Run[0];
+ }
+
+ if (Memory->Run[i].BasePage + Memory->Run[i].PageCount ==
+ Memory->Run[j].BasePage) {
+ // merge runs
+ Memory->NumberOfRuns -= 1;
+ Memory->Run[i].PageCount += Memory->Run[j].PageCount;
+ Memory->Run[j] = Memory->Run[Memory->NumberOfRuns];
+ i -= 1;
+ break;
+ }
+ }
+ }
+
+
+ if (MmNumberOfSystemPtes == 0) {
+ if (Memory->NumberOfPages < MM_MEDIUM_SYSTEM) {
+ MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES;
+ } else {
+ MmNumberOfSystemPtes = MM_DEFAULT_SYSTEM_PTES;
+ if (Memory->NumberOfPages > 8192) {
+ MmNumberOfSystemPtes += MmNumberOfSystemPtes;
+ }
+ }
+ }
+
+ if (MmNumberOfSystemPtes > MM_MAXIMUM_SYSTEM_PTES) {
+ MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES;
+ }
+
+ if (MmNumberOfSystemPtes < MM_MINIMUM_SYSTEM_PTES) {
+ MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES;
+ }
+
+ if ( !MmHeapSegmentReserve ) {
+ MmHeapSegmentReserve = 1024 * 1024;
+ }
+
+ if ( !MmHeapSegmentCommit ) {
+ MmHeapSegmentCommit = PAGE_SIZE * 2;
+ }
+
+ if ( !MmHeapDeCommitTotalFreeThreshold ) {
+ MmHeapDeCommitTotalFreeThreshold = 64 * 1024;
+ }
+
+ if ( !MmHeapDeCommitFreeBlockThreshold ) {
+ MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE;
+ }
+
+#if DBG
+ if (MmSpecialPoolTag != 0) {
+ MmNumberOfSystemPtes += 25000;
+ }
+#endif //DBG
+
+ //
+ // Initialize the machine dependent portion of the hardware.
+ //
+
+ ExInitializeResource (&MmSystemWsLock);
+
+ MiInitMachineDependent (LoaderBlock);
+
+ j = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
+ (sizeof(PHYSICAL_MEMORY_RUN) *
+ (Memory->NumberOfRuns - 1)));
+
+ MmPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ j,
+ ' mM');
+
+ RtlCopyMemory (MmPhysicalMemoryBlock, Memory, j);
+
+ //
+ // Setup the system size as small, medium, or large depending
+ // on memory available.
+ //
+ // For internal MM tuning, the following applies
+ //
+ // 12Mb is small
+ // 12-19 is medium
+ // > 19 is large
+ //
+ //
+ // For all other external tuning,
+ // < 19 is small
+ // 19 - 31 is medium for workstation
+ // 19 - 63 is medium for server
+ // >= 32 is large for workstation
+ // >= 64 is large for server
+ //
+
+ MmReadClusterSize = 7;
+ if (MmNumberOfPhysicalPages <= MM_SMALL_SYSTEM ) {
+ MmSystemSize = MmSmallSystem;
+ MmMaximumDeadKernelStacks = 0;
+ MmModifiedPageMinimum = 40;
+ MmModifiedPageMaximum = 100;
+ MmDataClusterSize = 0;
+ MmCodeClusterSize = 1;
+ MmReadClusterSize = 2;
+ } else if (MmNumberOfPhysicalPages <= MM_MEDIUM_SYSTEM ) {
+ MmSystemSize = MmSmallSystem;
+ MmMaximumDeadKernelStacks = 2;
+ MmModifiedPageMinimum = 80;
+ MmModifiedPageMaximum = 150;
+ MmSystemCacheWsMinimum += 100;
+ MmSystemCacheWsMaximum += 150;
+ MmDataClusterSize = 1;
+ MmCodeClusterSize = 2;
+ MmReadClusterSize = 4;
+ } else {
+ MmSystemSize = MmMediumSystem;
+ MmMaximumDeadKernelStacks = 5;
+ MmModifiedPageMinimum = 150;
+ MmModifiedPageMaximum = 300;
+ MmSystemCacheWsMinimum += 400;
+ MmSystemCacheWsMaximum += 800;
+ MmDataClusterSize = 3;
+ MmCodeClusterSize = 7;
+ }
+
+ if (MmNumberOfPhysicalPages >= ((32*1024*1024)/PAGE_SIZE) ) {
+
+ //
+ // If we are on a workstation, 32Mb and above are considered large systems
+ //
+ if ( MmProductType == 0x00690057 ) {
+ MmSystemSize = MmLargeSystem;
+ }
+ else {
+
+ //
+ // For servers, 64Mb and greater is a large system
+ //
+
+ if (MmNumberOfPhysicalPages >= ((64*1024*1024)/PAGE_SIZE) ) {
+ MmSystemSize = MmLargeSystem;
+ }
+ }
+ }
+
+ if (MmNumberOfPhysicalPages > ((33*1024*1024)/PAGE_SIZE) ) {
+ MmModifiedPageMinimum = 400;
+ MmModifiedPageMaximum = 800;
+ MmSystemCacheWsMinimum += 500;
+ MmSystemCacheWsMaximum += 900;
+ }
+
+ //
+ // determine if we are on an AS system ( Winnt is not AS)
+ //
+
+ if ( MmProductType == 0x00690057 ) {
+ SharedUserData->NtProductType = NtProductWinNt;
+ MmProductType = 0;
+ MmThrottleTop = 250;
+ MmThrottleBottom = 30;
+ } else {
+ if ( MmProductType == 0x0061004c ) {
+ SharedUserData->NtProductType = NtProductLanManNt;
+ }
+ else {
+ SharedUserData->NtProductType = NtProductServer;
+ }
+ MmProductType = 1;
+ MmThrottleTop = 450;
+ MmThrottleBottom = 80;
+ MmMinimumFreePages = 81;
+ }
+
+ MiAdjustWorkingSetManagerParameters((BOOLEAN)(MmProductType == 0 ? TRUE : FALSE));
+
+ //
+ // Set the ResidentAvailablePages to the number of available
+ // pages minum the fluid value.
+ //
+
+ MmResidentAvailablePages = MmAvailablePages - MM_FLUID_PHYSICAL_PAGES;
+
+ //
+ // Subtract off the size of the system cache working set.
+ //
+
+ MmResidentAvailablePages -= MmSystemCacheWsMinimum;
+ MmResidentAvailableAtInit = MmResidentAvailablePages;
+
+
+ if ((LONG)MmResidentAvailablePages < 0) {
+#if DBG
+ DbgPrint("system cache working set too big\n");
+#endif
+ return FALSE;
+ }
+
+ //
+ // Initialize spin lock for charging and releasing page file
+ // commitment.
+ //
+
+ KeInitializeSpinLock (&MmChargeCommitmentLock);
+
+ //
+ // Initialize spin lock for allowing working set expansion.
+ //
+
+ KeInitializeSpinLock (&MmExpansionLock);
+
+ ExInitializeFastMutex (&MmPageFileCreationLock);
+
+ //
+ // Initialize resource for extending sections.
+ //
+
+ ExInitializeResource (&MmSectionExtendResource);
+ ExInitializeResource (&MmSectionExtendSetResource);
+
+ //
+ // Build the system cache structures.
+ //
+
+ StartPde = MiGetPdeAddress (MmSystemCacheWorkingSetList);
+ PointerPte = MiGetPteAddress (MmSystemCacheWorkingSetList);
+
+ ASSERT ((StartPde + 1) == MiGetPdeAddress (MmSystemCacheStart));
+
+ //
+ // Size the system cache based on the amount of physical memory.
+ //
+
+ i = (MmNumberOfPhysicalPages + 65) / 1024;
+
+ if (i >= 4) {
+
+ //
+ // System has at least 4032 pages. Make the system
+ // cache 128mb + 64mb for each additional 1024 pages.
+ //
+
+ MmSizeOfSystemCacheInPages = ((128*1024*1024) >> PAGE_SHIFT) +
+ ((i - 4) * ((64*1024*1024) >> PAGE_SHIFT));
+ if (MmSizeOfSystemCacheInPages > MM_MAXIMUM_SYSTEM_CACHE_SIZE) {
+ MmSizeOfSystemCacheInPages = MM_MAXIMUM_SYSTEM_CACHE_SIZE;
+ }
+ }
+
+ MmSystemCacheEnd = (PVOID)(((ULONG)MmSystemCacheStart +
+ MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
+
+ EndPde = MiGetPdeAddress(MmSystemCacheEnd);
+
+ TempPte = ValidKernelPte;
+
+ LOCK_PFN (OldIrql);
+ while (StartPde <= EndPde) {
+ ASSERT (StartPde->u.Hard.Valid == 0);
+
+ //
+ // Map in a page directory page.
+ //
+
+ PageFrameIndex = MiRemoveAnyPage(
+ MI_GET_PAGE_COLOR_FROM_PTE (StartPde));
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *StartPde = TempPte;
+
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->PteFrame =
+ MiGetPdeAddress(PDE_BASE)->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = StartPde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->OriginalPte.u.Long = 0;
+
+ RtlFillMemoryUlong (PointerPte,
+ PAGE_SIZE,
+ ZeroKernelPte.u.Long);
+
+ StartPde += 1;
+ PointerPte += PTE_PER_PAGE;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Initialize the system cache.
+ //
+
+ if (MmLargeSystemCache != 0) {
+ if ((MmAvailablePages >
+ MmSystemCacheWsMaximum + ((6*1024*1024) >> PAGE_SHIFT))) {
+ MmSystemCacheWsMaximum =
+ MmAvailablePages - ((4*1024*1024) >> PAGE_SHIFT);
+ MmMoreThanEnoughFreePages = 256;
+ }
+ }
+
+ if (MmSystemCacheWsMaximum > (MM_MAXIMUM_WORKING_SET - 5)) {
+ MmSystemCacheWsMaximum = MM_MAXIMUM_WORKING_SET - 5;
+ }
+
+ if (MmSystemCacheWsMaximum > MmSizeOfSystemCacheInPages) {
+ MmSystemCacheWsMaximum = MmSizeOfSystemCacheInPages;
+ if ((MmSystemCacheWsMinimum + 500) > MmSystemCacheWsMaximum) {
+ MmSystemCacheWsMinimum = MmSystemCacheWsMaximum - 500;
+ }
+ }
+
+ if (!MiInitializeSystemCache (MmSizeOfSystemCacheInPages,
+ MmSystemCacheWsMinimum,
+ MmSystemCacheWsMaximum
+ )) {
+ return FALSE;
+ }
+
+ //
+ // Set the commit page limit to four times the number of available
+ // pages. This value is updated as paging files are created.
+ //
+
+ MmTotalCommitLimit = MmAvailablePages << 2;
+
+ MmAttemptForCantExtend.Segment = NULL;
+ MmAttemptForCantExtend.RequestedExpansionSize = 1;
+ MmAttemptForCantExtend.ActualExpansion = 1;
+ MmAttemptForCantExtend.InProgress = FALSE;
+
+ KeInitializeEvent (&MmAttemptForCantExtend.Event,
+ NotificationEvent,
+ FALSE);
+
+ if (MmOverCommit == 0) {
+
+ // If this value was not set via the regisistry, set the
+ // over commit value to the number of available pages
+ // minus 1024 pages (4mb with 4k pages).
+ //
+
+ if (MmAvailablePages > 1024) {
+ MmOverCommit = MmAvailablePages - 1024;
+ }
+ }
+
+ //
+ // Set maximum working set size to 512 pages less total available
+ // memory. 2mb on machine with 4k pages.
+ //
+
+ MmMaximumWorkingSetSize = MmAvailablePages - 512;
+
+ if (MmMaximumWorkingSetSize > (MM_MAXIMUM_WORKING_SET - 5)) {
+ MmMaximumWorkingSetSize = MM_MAXIMUM_WORKING_SET - 5;
+ }
+
+ //
+ // Create the modified page writer event.
+ //
+
+ KeInitializeEvent (&MmModifiedPageWriterEvent, NotificationEvent, FALSE);
+
+ //
+ // Build paged pool.
+ //
+
+ MiBuildPagedPool ();
+
+ //
+ // Add more system PTEs if large memory system.
+ //
+
+ if (MmNumberOfPhysicalPages > ((128*1024*1024) >> PAGE_SHIFT)) {
+
+
+ PointerPde = MiGetPdeAddress ((PCHAR)MmPagedPoolEnd + 1);
+ StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1);
+ j = 0;
+
+ TempPte = ValidKernelPde;
+ LOCK_PFN (OldIrql);
+ while (PointerPde->u.Hard.Valid == 0) {
+
+ MiChargeCommitmentCantExpand (1, TRUE);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPde = TempPte;
+ MiInitializePfn (PageFrameIndex, PointerPde, 1);
+ PointerPde += 1;
+ StartingPte += PAGE_SIZE / sizeof(MMPTE);
+ j += PAGE_SIZE / sizeof(MMPTE);
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ if (j != 0) {
+ StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1);
+ MmSystemPtesStart[SystemPteSpace] = (ULONG)StartingPte;
+ MmNonPagedSystemStart = MiGetVirtualAddressMappedByPte (StartingPte);
+ MmNumberOfSystemPtes += j;
+ MiReleaseSystemPtes (StartingPte, j, SystemPteSpace);
+ }
+ }
+
+
+#if DBG
+ if (MmDebug & MM_DBG_DUMP_BOOT_PTES) {
+ MiDumpValidAddresses ();
+ MiDumpPfn ();
+ }
+#endif
+
+#if DBG
+ MiAllocVmEventId = RtlCreateEventId( NULL,
+ 0,
+ "AllocVM",
+ 5,
+ RTL_EVENT_ULONG_PARAM, "Addr", 0,
+ RTL_EVENT_ULONG_PARAM, "Size", 0,
+ RTL_EVENT_FLAGS_PARAM, "", 3,
+ MEM_RESERVE, "Reserve",
+ MEM_COMMIT, "Commit",
+ MEM_TOP_DOWN, "TopDown",
+ RTL_EVENT_ENUM_PARAM, "", 8,
+ PAGE_NOACCESS, "NoAccess",
+ PAGE_READONLY, "ReadOnly",
+ PAGE_READWRITE, "ReadWrite",
+ PAGE_WRITECOPY, "CopyOnWrite",
+ PAGE_EXECUTE, "Execute",
+ PAGE_EXECUTE_READ, "ExecuteRead",
+ PAGE_EXECUTE_READWRITE, "ExecuteReadWrite",
+ PAGE_EXECUTE_WRITECOPY, "ExecuteCopyOnWrite",
+ RTL_EVENT_FLAGS_PARAM, "", 2,
+ PAGE_GUARD, "Guard",
+ PAGE_NOCACHE, "NoCache"
+ );
+ MiFreeVmEventId = RtlCreateEventId( NULL,
+ 0,
+ "FreeVM",
+ 3,
+ RTL_EVENT_ULONG_PARAM, "Addr", 0,
+ RTL_EVENT_ULONG_PARAM, "Size", 0,
+ RTL_EVENT_FLAGS_PARAM, "", 2,
+ MEM_RELEASE, "Release",
+ MEM_DECOMMIT, "DeCommit"
+ );
+#endif // DBG
+
+ return TRUE;
+ }
+
+ if (Phase == 1) {
+
+#if DBG
+ MmDebug |= MM_DBG_CHECK_PFN_LOCK;
+#endif
+
+#ifdef _X86_
+ MiInitMachineDependent (LoaderBlock);
+#endif
+
+ if (!MiSectionInitialization ()) {
+ return FALSE;
+ }
+
+#if defined(MM_SHARED_USER_DATA_VA)
+
+ //
+ // Create double mapped page between kernel and user mode.
+ //
+
+ PointerPte = MiGetPteAddress(KI_USER_SHARED_DATA);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ MI_MAKE_VALID_PTE (MmSharedUserDataPte,
+ PageFrameIndex,
+ MM_READONLY,
+ PointerPte);
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+#endif
+
+ //
+ // Set up system wide lock pages limit.
+ //
+
+ MmLockPagesLimit = MmLockLimitInBytes >> PAGE_SHIFT;
+ if (MmLockPagesLimit < MM_DEFAULT_IO_LOCK_LIMIT) {
+ MmLockPagesLimit = MM_DEFAULT_IO_LOCK_LIMIT;
+ }
+
+ if ((MmLockPagesLimit + ((7 * 1024*1024) / PAGE_SIZE)) > MmAvailablePages) {
+ MmLockPagesLimit = MmAvailablePages - ((7 * 1024*1024) / PAGE_SIZE);
+ if ((LONG)MmLockPagesLimit < (MM_DEFAULT_IO_LOCK_LIMIT / PAGE_SIZE)) {
+ MmLockPagesLimit = MM_DEFAULT_IO_LOCK_LIMIT / PAGE_SIZE;
+ }
+ }
+
+ MmPagingFileCreated = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(KEVENT),
+ 'fPmM');
+
+ if (MmPagingFileCreated == NULL) {
+
+ //
+ // Pool allocation failed, return FALSE.
+ //
+
+ return FALSE;
+ }
+
+ KeInitializeEvent (MmPagingFileCreated, NotificationEvent, FALSE);
+
+ //
+ // Start the modified page writer.
+ //
+
+ InitializeObjectAttributes( &ObjectAttributes, NULL, 0, NULL, NULL );
+
+ if ( !NT_SUCCESS(PsCreateSystemThread(
+ &ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ 0L,
+ NULL,
+ MiModifiedPageWriter,
+ NULL
+ )) ) {
+ return FALSE;
+ }
+ ZwClose (ThreadHandle);
+
+ //
+ // Start the balance set manager.
+ //
+ // The balance set manager performs stack swapping and working
+ // set management and requires two threads.
+ //
+
+ KeInitializeEvent (&MmWorkingSetManagerEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ InitializeObjectAttributes( &ObjectAttributes, NULL, 0, NULL, NULL );
+
+ if ( !NT_SUCCESS(PsCreateSystemThread(
+ &ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ 0L,
+ NULL,
+ KeBalanceSetManager,
+ NULL
+ )) ) {
+
+ return FALSE;
+ }
+ ZwClose (ThreadHandle);
+
+ if ( !NT_SUCCESS(PsCreateSystemThread(
+ &ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ 0L,
+ NULL,
+ KeSwapProcessOrStack,
+ NULL
+ )) ) {
+
+ return FALSE;
+ }
+ ZwClose (ThreadHandle);
+
+ MiEnablePagingTheExecutive();
+
+ return TRUE;
+
+ }
+
+ return FALSE;
+}
+
+VOID
+MmInitializeMemoryLimits (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock,
+ IN PBOOLEAN IncludeType,
+ OUT PPHYSICAL_MEMORY_DESCRIPTOR Memory
+ )
+
+/*++
+
+Routine Description:
+
+ This function walks through the loader block's memory
+ description list and builds a list of contiguous physical
+ memory blocks of the desired types.
+
+Arguments:
+
+ LoadBlock - Supplies a pointer the ssystem loader block.
+
+ IncludeType - Array of BOOLEANS size LoaderMaximum.
+ TRUE means include this type of memory in return.
+
+ Memory - Returns the physical memory blocks.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. System initialization.
+
+--*/
+{
+
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ PLIST_ENTRY NextMd;
+ ULONG i;
+ ULONG LowestFound;
+ ULONG Found;
+ ULONG Merged;
+ ULONG NextPage;
+ ULONG TotalPages = 0;
+
+ //
+ // Walk through the memory descriptors and build physical memory list.
+ //
+
+ LowestFound = 0;
+ Memory->Run[0].BasePage = 0xffffffff;
+ NextPage = 0xffffffff;
+ Memory->Run[0].PageCount = 0;
+ i = 0;
+
+ do {
+ Merged = FALSE;
+ Found = FALSE;
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ if (MemoryDescriptor->MemoryType < LoaderMaximum &&
+ IncludeType [MemoryDescriptor->MemoryType] ) {
+
+ //
+ // Try to merge runs.
+ //
+
+ if (MemoryDescriptor->BasePage == NextPage) {
+ ASSERT (MemoryDescriptor->PageCount != 0);
+ Memory->Run[i - 1].PageCount += MemoryDescriptor->PageCount;
+ NextPage += MemoryDescriptor->PageCount;
+ TotalPages += MemoryDescriptor->PageCount;
+ Merged = TRUE;
+ Found = TRUE;
+ break;
+ }
+
+ if (MemoryDescriptor->BasePage >= LowestFound) {
+ if (Memory->Run[i].BasePage > MemoryDescriptor->BasePage) {
+ Memory->Run[i].BasePage = MemoryDescriptor->BasePage;
+ Memory->Run[i].PageCount = MemoryDescriptor->PageCount;
+ }
+ Found = TRUE;
+ }
+ }
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ if (!Merged && Found) {
+ NextPage = Memory->Run[i].BasePage + Memory->Run[i].PageCount;
+ TotalPages += Memory->Run[i].PageCount;
+ i += 1;
+ }
+ Memory->Run[i].BasePage = 0xffffffff;
+ LowestFound = NextPage;
+
+ } while (Found);
+ ASSERT (i <= Memory->NumberOfRuns);
+ Memory->NumberOfRuns = i;
+ Memory->NumberOfPages = TotalPages;
+ return;
+}
+
+VOID
+MiMergeMemoryLimit (
+ IN OUT PPHYSICAL_MEMORY_DESCRIPTOR Memory,
+ IN ULONG StartPage,
+ IN ULONG NoPages
+ )
+/*++
+
+Routine Description:
+
+ This function ensures the passed range is in the passed in Memory
+ block adding any new data as needed.
+
+ The passed memory block is assumed to be at least
+ MAX_PHYSICAL_MEMORY_FRAGMENTS large
+
+Arguments:
+
+ Memory - Memory block to verify run is present in
+
+ StartPage - First page of run
+
+ NoPages - Number of pages in run
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. System initialization.
+
+--*/
+{
+ ULONG EndPage, sp, ep, i;
+
+
+ EndPage = StartPage + NoPages;
+
+ //
+ // Clip range to area which is not already described
+ //
+
+ for (i=0; i < Memory->NumberOfRuns; i++) {
+ sp = Memory->Run[i].BasePage;
+ ep = sp + Memory->Run[i].PageCount;
+
+ if (sp < StartPage) {
+ if (ep > StartPage && ep < EndPage) {
+ // bump begining page of the target area
+ StartPage = ep;
+ }
+
+ if (ep > EndPage) {
+ //
+ // Target area is contained totally within this
+ // descriptor. This range is fully accounted for.
+ //
+
+ StartPage = EndPage;
+ }
+
+ } else {
+ // sp >= StartPage
+
+ if (sp < EndPage) {
+ if (ep < EndPage) {
+ //
+ // This descriptor is totally within the target area -
+ // check the area on either side of this desctipor
+ //
+
+ MiMergeMemoryLimit (Memory, StartPage, sp - StartPage);
+ StartPage = ep;
+
+ } else {
+ // clip the ending page of the target area
+ EndPage = sp;
+ }
+ }
+ }
+
+ //
+ // Anything left of target area?
+ //
+
+ if (StartPage == EndPage) {
+ return ;
+ }
+ } // next descrtiptor
+
+ //
+ // The range StartPage - EndPage is a missing. Add it.
+ //
+
+ if (Memory->NumberOfRuns == MAX_PHYSICAL_MEMORY_FRAGMENTS) {
+ return ;
+ }
+
+ Memory->Run[Memory->NumberOfRuns].BasePage = StartPage;
+ Memory->Run[Memory->NumberOfRuns].PageCount = EndPage - StartPage;
+ Memory->NumberOfPages += EndPage - StartPage;
+ Memory->NumberOfRuns += 1;
+}
+
+
+
+VOID
+MmFreeLoaderBlock (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called as the last routine in phase 1 initialization.
+ It frees memory used by the OsLoader.
+
+Arguments:
+
+ LoadBlock - Supplies a pointer the ssystem loader block.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. System initialization.
+
+--*/
+
+{
+
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MEMORY_ALLOCATION_DESCRIPTOR SavedDescriptor[MM_MAX_LOADER_BLOCKS];
+ ULONG i;
+ ULONG NextPhysicalPage;
+ PMMPFN Pfn1;
+ LONG BlockNumber = -1;
+ KIRQL OldIrql;
+
+ //
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderOsloaderHeap:
+ case LoaderRegistryData:
+ case LoaderNlsData:
+ //case LoaderMemoryData: //this has page table and other stuff.
+
+ //
+ // Capture the data to temporary storage so we won't
+ // free memory we are referencing.
+ //
+
+ BlockNumber += 1;
+ if (BlockNumber >= MM_MAX_LOADER_BLOCKS) {
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+
+ SavedDescriptor[BlockNumber] = *MemoryDescriptor;
+
+ break;
+
+ default:
+
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ while (BlockNumber >= 0) {
+
+ i = SavedDescriptor[BlockNumber].PageCount;
+ NextPhysicalPage = SavedDescriptor[BlockNumber].BasePage;
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ if (Pfn1->u1.Flink == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress =
+ (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ } else {
+
+ if (NextPhysicalPage != 0) {
+ //
+ // Remove PTE and insert into the free list. If it is
+ // a phyical address within the PFN database, the PTE
+ // element does not exist and therefore cannot be updated.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS (
+ MiGetVirtualAddressMappedByPte (Pfn1->PteAddress))) {
+
+ //
+ // Not a physical address.
+ //
+
+ *(Pfn1->PteAddress) = ZeroPte;
+ }
+
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (NextPhysicalPage);
+ }
+ }
+
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ BlockNumber -= 1;
+ }
+
+ KeFlushEntireTb (TRUE, TRUE);
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MiBuildPagedPool (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to build the structures required for paged
+ pool and initialize the pool. Once this routine is called, paged
+ pool may be allocated.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. System initialization.
+
+--*/
+
+{
+ ULONG Size;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ MMPTE TempPte;
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+
+ //
+ // Double map system page directory page.
+ //
+
+ MmSystemPageDirectory = MiGetPdeAddress(PDE_BASE)->u.Hard.PageFrameNumber;
+
+ MmSystemPagePtes = (PMMPTE)MiMapPageInHyperSpace (MmSystemPageDirectory,
+ &OldIrql);
+ MiUnmapPageInHyperSpace (OldIrql);
+
+ if (!MI_IS_PHYSICAL_ADDRESS(MmSystemPagePtes)) {
+
+ //
+ // Was not mapped physically, map it virtually in system space.
+ //
+
+ PointerPte = MiReserveSystemPtes (
+ 1,
+ SystemPteSpace,
+ MM_COLOR_ALIGNMENT,
+ ((ULONG)PDE_BASE & MM_COLOR_MASK_VIRTUAL),
+ TRUE);
+ *PointerPte = ValidKernelPte;
+ PointerPte->u.Hard.PageFrameNumber = MmSystemPageDirectory;
+ MmSystemPagePtes = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
+ }
+
+ //
+ // Allocate the prototype PTEs for paged pool.
+ //
+
+ //
+ // A size of 0 means size the pool based on physical memory.
+ //
+
+ if (MmSizeOfPagedPoolInBytes == 0) {
+ MmSizeOfPagedPoolInBytes = 2 * MmMaximumNonPagedPoolInBytes;
+ }
+
+ if (MmIsThisAnNtAsSystem()) {
+ if (MmSizeOfPagedPoolInBytes < MM_MINIMUM_PAGED_POOL_NTAS) {
+ MmSizeOfPagedPoolInBytes = MM_MINIMUM_PAGED_POOL_NTAS;
+ }
+ }
+
+ if (MmSizeOfPagedPoolInBytes >
+ (ULONG)((PCHAR)MmNonPagedSystemStart - (PCHAR)MmPagedPoolStart)) {
+ MmSizeOfPagedPoolInBytes =
+ ((PCHAR)MmNonPagedSystemStart - (PCHAR)MmPagedPoolStart);
+ }
+
+ Size = BYTES_TO_PAGES(MmSizeOfPagedPoolInBytes);
+
+ if (Size < MM_MIN_INITIAL_PAGED_POOL) {
+ Size = MM_MIN_INITIAL_PAGED_POOL;
+ }
+
+ if (Size > (MM_MAX_PAGED_POOL >> PAGE_SHIFT)) {
+ Size = MM_MAX_PAGED_POOL >> PAGE_SHIFT;
+ }
+
+ Size = (Size + (PTE_PER_PAGE - 1)) / PTE_PER_PAGE;
+ MmSizeOfPagedPoolInBytes = Size * PAGE_SIZE * PTE_PER_PAGE;
+
+ ASSERT ((MmSizeOfPagedPoolInBytes + (PCHAR)MmPagedPoolStart) <=
+ (PCHAR)MmNonPagedSystemStart);
+
+ //
+ // Set size to the number of pages in the pool.
+ //
+
+ Size = Size * PTE_PER_PAGE;
+
+ MmPagedPoolEnd = (PVOID)(((PUCHAR)MmPagedPoolStart +
+ MmSizeOfPagedPoolInBytes) - 1);
+
+ MmPageAlignedPoolBase[PagedPool] = MmPagedPoolStart;
+
+ //
+ // Build page table page for paged pool.
+ //
+
+ PointerPde = MiGetPdeAddress (MmPagedPoolStart);
+ MmPagedPoolBasePde = PointerPde;
+
+ PointerPte = MiGetPteAddress (MmPagedPoolStart);
+ MmFirstPteForPagedPool = PointerPte;
+ MmLastPteForPagedPool = MiGetPteAddress (MmPagedPoolEnd);
+
+ RtlFillMemoryUlong (PointerPde,
+ sizeof(MMPTE) *
+ (1 + MiGetPdeAddress (MmPagedPoolEnd) - PointerPde),
+ MM_KERNEL_NOACCESS_PTE);
+
+ TempPte = ValidKernelPde;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Map in a page table page.
+ //
+
+ PageFrameIndex = MiRemoveAnyPage(
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPde));
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPde = TempPte;
+
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->PteFrame = MmSystemPageDirectory;
+ Pfn1->PteAddress = PointerPde;
+ Pfn1->u2.ShareCount = 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->OriginalPte.u.Long = 0;
+ RtlFillMemoryUlong (PointerPte, PAGE_SIZE, MM_KERNEL_DEMAND_ZERO_PTE);
+
+ UNLOCK_PFN (OldIrql);
+
+ MmNextPteForPagedPoolExpansion = PointerPde + 1;
+
+ //
+ // Build bitmaps for paged pool.
+ //
+
+ MiCreateBitMap (&MmPagedPoolAllocationMap, Size, NonPagedPool);
+ RtlSetAllBits (MmPagedPoolAllocationMap);
+
+ //
+ // Indicate first page worth of PTEs are available.
+ //
+
+ RtlClearBits (MmPagedPoolAllocationMap, 0, PTE_PER_PAGE);
+
+ MiCreateBitMap (&MmEndOfPagedPoolBitmap, Size, NonPagedPool);
+ RtlClearAllBits (MmEndOfPagedPoolBitmap);
+
+ //
+ // Initialize paged pool.
+ //
+
+ InitializePool (PagedPool, 0L);
+
+ //
+ // Allow mapping of views into system space.
+ //
+
+ MiInitializeSystemSpaceMap ();
+
+ //
+ // Set up the modified page writer.
+
+ return;
+}
+
+VOID
+MiFindInitializationCode (
+ OUT PVOID *StartVa,
+ OUT PVOID *EndVa
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the start and end of the kernel initialization
+ code. This code resides in the "init" section of the kernel image.
+
+Arguments:
+
+ StartVa - Returns the starting address of the init section.
+
+ EndVa - Returns the ending address of the init section.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. End of system initialization.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY LdrDataTableEntry;
+ PVOID CurrentBase;
+ PVOID InitStart;
+ PVOID InitEnd;
+ PLIST_ENTRY Next;
+ PIMAGE_NT_HEADERS NtHeader;
+ PIMAGE_SECTION_HEADER SectionTableEntry;
+ LONG i;
+ ULONG ValidPages;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PVOID MiFindInitializationCodeAddress = MmGetProcedureAddress((PVOID)&MiFindInitializationCode);
+
+ *StartVa = NULL;
+
+ //
+ // Walk through the loader blocks looking for the base which
+ // contains this routine.
+ //
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE);
+ Next = PsLoadedModuleList.Flink;
+
+ while ( Next != &PsLoadedModuleList ) {
+ LdrDataTableEntry = CONTAINING_RECORD( Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks
+ );
+ if (LdrDataTableEntry->SectionPointer != NULL) {
+
+ //
+ // This entry was loaded by MmLoadSystemImage so it's already
+ // had its init section removed.
+ //
+
+ Next = Next->Flink;
+ continue;
+ }
+
+ CurrentBase = (PVOID)LdrDataTableEntry->DllBase;
+ NtHeader = RtlImageNtHeader(CurrentBase);
+
+ SectionTableEntry = (PIMAGE_SECTION_HEADER)((ULONG)NtHeader +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeader->FileHeader.SizeOfOptionalHeader);
+
+ //
+ // From the image header, locate the section named 'INIT'.
+ //
+
+ i = NtHeader->FileHeader.NumberOfSections;
+
+ InitStart = NULL;
+ while (i > 0) {
+
+#if DBG
+ if ((*(PULONG)SectionTableEntry->Name == 'tini') ||
+ (*(PULONG)SectionTableEntry->Name == 'egap')) {
+ DbgPrint("driver %wZ has lower case sections (init or pagexxx)\n",
+ &LdrDataTableEntry->FullDllName);
+ }
+#endif //DBG
+
+ if (*(PULONG)SectionTableEntry->Name == 'TINI') {
+ InitStart = (PVOID)((PCHAR)CurrentBase + SectionTableEntry->VirtualAddress);
+ InitEnd = (PVOID)((PCHAR)InitStart + SectionTableEntry->SizeOfRawData - 1);
+
+ InitEnd = (PVOID)((PCHAR)PAGE_ALIGN ((ULONG)InitEnd +
+ (NtHeader->OptionalHeader.SectionAlignment - 1)) - 1);
+ InitStart = (PVOID)ROUND_TO_PAGES (InitStart);
+
+ if (InitStart <= InitEnd) {
+ if ((MiFindInitializationCodeAddress >= InitStart) &&
+ (MiFindInitializationCodeAddress <= InitEnd)) {
+
+ //
+ // This init section is in the kernel, don't free it now as
+ // it would free this code!
+ //
+
+ *StartVa = InitStart;
+ *EndVa = InitEnd;
+ break;
+ } else {
+
+ //
+ // See if any more sections are discardable after this
+ // one.
+ //
+
+ while (i > 1) {
+ SectionTableEntry += 1;
+ i -= 1;
+ if ((SectionTableEntry->Characteristics &
+ IMAGE_SCN_MEM_DISCARDABLE) != 0) {
+ //
+ // Discard this too.
+ //
+
+ InitEnd = (PVOID)(((PCHAR)CurrentBase +
+ SectionTableEntry->VirtualAddress) +
+ (SectionTableEntry->SizeOfRawData - 1));
+
+ InitEnd = (PVOID)((PCHAR)PAGE_ALIGN ((ULONG)InitEnd +
+ (NtHeader->OptionalHeader.SectionAlignment - 1)) - 1);
+
+ } else {
+ break;
+ }
+ }
+
+ if (InitEnd > (PVOID)((PCHAR)CurrentBase +
+ LdrDataTableEntry->SizeOfImage)) {
+ InitEnd = (PVOID)(((ULONG)CurrentBase +
+ (LdrDataTableEntry->SizeOfImage - 1)) |
+ (PAGE_SIZE - 1));
+ }
+ MiFreeInitializationCode (InitStart, InitEnd);
+ }
+ }
+ }
+ i -= 1;
+ SectionTableEntry += 1;
+ }
+ Next = Next->Flink;
+ }
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+ return;
+}
+
+VOID
+MiFreeInitializationCode (
+ IN PVOID StartVa,
+ IN PVOID EndVa
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to delete the initialization code.
+
+Arguments:
+
+ StartVa - Supplies the starting address of the range to delete.
+
+ EndVa - Supplies the ending address of the range to delete.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. Runs after system initialization.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+ PVOID UnlockHandle;
+ ULONG ValidPages;
+
+ UnlockHandle = MmLockPagableCodeSection((PVOID)MiFreeInitializationCode);
+ ASSERT(UnlockHandle);
+ PointerPte = MiGetPteAddress (StartVa);
+
+ if (MI_IS_PHYSICAL_ADDRESS(StartVa)) {
+ LOCK_PFN (OldIrql);
+ while (StartVa < EndVa) {
+
+ //
+ // On certains architectures (e.g., MIPS) virtual addresses
+ // may be physical and hence have no corresponding PTE.
+ //
+
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartVa);
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ StartVa = (PVOID)((PUCHAR)StartVa + PAGE_SIZE);
+ }
+ UNLOCK_PFN (OldIrql);
+ } else {
+ MiDeleteSystemPagableVm (PointerPte,
+ 1 + MiGetPteAddress (EndVa) -
+ PointerPte,
+ MM_ZERO_KERNEL_PTE,
+ &ValidPages);
+ }
+ MmUnlockPagableImageSection(UnlockHandle);
+ return;
+}
+
+
+VOID
+MiEnablePagingTheExecutive (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the start and end of the kernel initialization
+ code. This code resides in the "init" section of the kernel image.
+
+Arguments:
+
+ StartVa - Returns the starting address of the init section.
+
+ EndVa - Returns the ending address of the init section.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel Mode Only. End of system initialization.
+
+--*/
+
+{
+
+#if defined(_X86_) || defined(_PPC_)
+
+ PLDR_DATA_TABLE_ENTRY LdrDataTableEntry;
+ PVOID CurrentBase;
+ PLIST_ENTRY Next;
+ PIMAGE_NT_HEADERS NtHeader;
+ PIMAGE_SECTION_HEADER SectionTableEntry;
+ LONG i;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ BOOLEAN PageSection;
+
+ //
+ // Don't page kernel mode code if customer does not want it paged.
+ //
+
+ if (MmDisablePagingExecutive) {
+ return;
+ }
+
+ //
+ // Walk through the loader blocks looking for the base which
+ // contains this routine.
+ //
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE);
+ Next = PsLoadedModuleList.Flink;
+ while ( Next != &PsLoadedModuleList ) {
+ LdrDataTableEntry = CONTAINING_RECORD( Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks
+ );
+ if (LdrDataTableEntry->SectionPointer != NULL) {
+
+ //
+ // This entry was loaded by MmLoadSystemImage so it's already paged.
+ //
+
+ Next = Next->Flink;
+ continue;
+ }
+
+ CurrentBase = (PVOID)LdrDataTableEntry->DllBase;
+ NtHeader = RtlImageNtHeader(CurrentBase);
+
+ SectionTableEntry = (PIMAGE_SECTION_HEADER)((ULONG)NtHeader +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeader->FileHeader.SizeOfOptionalHeader);
+
+ //
+ // From the image header, locate the section named 'PAGE' or
+ // '.edata'.
+ //
+
+ i = NtHeader->FileHeader.NumberOfSections;
+
+ PointerPte = NULL;
+
+ while (i > 0) {
+
+ if (MI_IS_PHYSICAL_ADDRESS (CurrentBase)) {
+
+ //
+ // Mapped physically, can't be paged.
+ //
+
+ break;
+ }
+
+ PageSection = (*(PULONG)SectionTableEntry->Name == 'EGAP') ||
+ (*(PULONG)SectionTableEntry->Name == 'ade.');
+
+ if (*(PULONG)SectionTableEntry->Name == 'EGAP' &&
+ SectionTableEntry->Name[4] == 'K' &&
+ SectionTableEntry->Name[5] == 'D') {
+
+ //
+ // Only pageout PAGEKD if KdPitchDebugger is TRUE
+ //
+
+ PageSection = KdPitchDebugger;
+ }
+
+ if (PageSection) {
+ //
+ // This section is pagable, save away the start and end.
+ //
+
+ if (PointerPte == NULL) {
+
+ //
+ // Previous section was NOT pagable, get the start address.
+ //
+
+ PointerPte = MiGetPteAddress (ROUND_TO_PAGES (
+ (ULONG)CurrentBase +
+ SectionTableEntry->VirtualAddress));
+ }
+ LastPte = MiGetPteAddress ((ULONG)CurrentBase +
+ SectionTableEntry->VirtualAddress +
+ (NtHeader->OptionalHeader.SectionAlignment - 1) +
+ (SectionTableEntry->SizeOfRawData - PAGE_SIZE));
+
+ } else {
+
+ //
+ // This section is not pagable, if the previous section was
+ // pagable, enable it.
+ //
+
+ if (PointerPte != NULL) {
+ MiEnablePagingOfDriverAtInit (PointerPte, LastPte);
+ PointerPte = NULL;
+ }
+ }
+ i -= 1;
+ SectionTableEntry += 1;
+ } //end while
+
+ if (PointerPte != NULL) {
+ MiEnablePagingOfDriverAtInit (PointerPte, LastPte);
+ }
+
+ Next = Next->Flink;
+ } //end while
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+
+#endif
+
+ return;
+}
+
+
+VOID
+MiEnablePagingOfDriverAtInit (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine marks the specified range of PTEs as pagable.
+
+Arguments:
+
+ PointerPte - Supplies the starting PTE.
+
+ LastPte - Supplies the ending PTE.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PVOID Base;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn;
+ MMPTE TempPte;
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ Base = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ while (PointerPte <= LastPte) {
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn->u2.ShareCount == 1);
+
+ //
+ // Set the working set index to zero. This allows page table
+ // pages to be brough back in with the proper WSINDEX.
+ //
+
+ Pfn->u1.WsIndex = 0;
+ Pfn->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+ Pfn->u3.e1.Modified = 1;
+ TempPte = *PointerPte;
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn->OriginalPte.u.Soft.Protection);
+
+
+ KeFlushSingleTb (Base,
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ //
+ // Flush the translation buffer and decrement the number of valid
+ // PTEs within the containing page table page. Note that for a
+ // private page, the page table page is still needed because the
+ // page is in transiton.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+ Base = (PVOID)((PCHAR)Base + PAGE_SIZE);
+ PointerPte += 1;
+ MmResidentAvailablePages += 1;
+ MiChargeCommitmentCantExpand (1, TRUE);
+ MmTotalSystemCodePages += 1;
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+
+MM_SYSTEMSIZE
+MmQuerySystemSize(
+ VOID
+ )
+{
+ //
+ // 12Mb is small
+ // 12-19 is medium
+ // > 19 is large
+ //
+ return MmSystemSize;
+}
+
+NTKERNELAPI
+BOOLEAN
+MmIsThisAnNtAsSystem(
+ VOID
+ )
+{
+ return (BOOLEAN)MmProductType;
+}
diff --git a/private/ntos/mm/mmquota.c b/private/ntos/mm/mmquota.c
new file mode 100644
index 000000000..614b57a4d
--- /dev/null
+++ b/private/ntos/mm/mmquota.c
@@ -0,0 +1,1050 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mmquota.c
+
+Abstract:
+
+ This module contains the routines which implement the quota and
+ commitment charging for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 12-December-89
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#define MM_MAXIMUM_QUOTA_OVERCHARGE 9
+
+#define MM_DONT_EXTEND_SIZE 512
+
+#define MM_COMMIT_POPUP_MAX ((512*1024)/PAGE_SIZE)
+
+#define MM_EXTEND_COMMIT ((1024*1024)/PAGE_SIZE)
+
+ULONG MmPeakCommitment;
+
+ULONG MmExtendedCommit;
+
+extern ULONG MmAllocatedPagedPool;
+
+extern ULONG MmAllocatedNonPagedPool;
+
+
+ULONG MiOverCommitCallCount;
+extern EPROCESS_QUOTA_BLOCK PspDefaultQuotaBlock;
+
+
+VOID
+MiCauseOverCommitPopup(
+ ULONG NumberOfPages,
+ ULONG Extension
+ );
+
+
+ULONG
+FASTCALL
+MiChargePageFileQuota (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to ensure the user has sufficient page file
+ quota remaining and, if so, charges the quota. If not an exception
+ is raised.
+
+Arguments:
+
+ QuotaCharge - Supplies the quota amount to charge.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+Return Value:
+
+ TRUE if the quota was successfully charged, raises an exception
+ otherwise.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ ULONG NewPagefileValue;
+ PEPROCESS_QUOTA_BLOCK QuotaBlock;
+ KIRQL OldIrql;
+
+ QuotaBlock = CurrentProcess->QuotaBlock;
+
+retry_charge:
+ if ( QuotaBlock != &PspDefaultQuotaBlock) {
+ ExAcquireFastLock (&QuotaBlock->QuotaLock,&OldIrql);
+do_charge:
+ NewPagefileValue = QuotaBlock->PagefileUsage + QuotaCharge;
+
+ if (NewPagefileValue > QuotaBlock->PagefileLimit) {
+ ExRaiseStatus (STATUS_PAGEFILE_QUOTA_EXCEEDED);
+ }
+
+ QuotaBlock->PagefileUsage = NewPagefileValue;
+
+ if (NewPagefileValue > QuotaBlock->PeakPagefileUsage) {
+ QuotaBlock->PeakPagefileUsage = NewPagefileValue;
+ }
+
+ NewPagefileValue = CurrentProcess->PagefileUsage + QuotaCharge;
+ CurrentProcess->PagefileUsage = NewPagefileValue;
+
+ if (NewPagefileValue > CurrentProcess->PeakPagefileUsage) {
+ CurrentProcess->PeakPagefileUsage = NewPagefileValue;
+ }
+ ExReleaseFastLock (&QuotaBlock->QuotaLock,OldIrql);
+ } else {
+ ExAcquireFastLock (&PspDefaultQuotaBlock.QuotaLock,&OldIrql);
+
+ if ( (QuotaBlock = CurrentProcess->QuotaBlock) != &PspDefaultQuotaBlock) {
+ ExReleaseFastLock(&PspDefaultQuotaBlock.QuotaLock,OldIrql);
+ goto retry_charge;
+ }
+ goto do_charge;
+ }
+ return TRUE;
+}
+
+VOID
+MiReturnPageFileQuota (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine releases page file quota.
+
+Arguments:
+
+ QuotaCharge - Supplies the quota amount to charge.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+
+ PEPROCESS_QUOTA_BLOCK QuotaBlock;
+ KIRQL OldIrql;
+
+ QuotaBlock = CurrentProcess->QuotaBlock;
+
+retry_return:
+ if ( QuotaBlock != &PspDefaultQuotaBlock) {
+ ExAcquireFastLock (&QuotaBlock->QuotaLock, &OldIrql);
+do_return:
+ ASSERT (CurrentProcess->PagefileUsage >= QuotaCharge);
+ CurrentProcess->PagefileUsage -= QuotaCharge;
+
+ ASSERT (QuotaBlock->PagefileUsage >= QuotaCharge);
+ QuotaBlock->PagefileUsage -= QuotaCharge;
+ ExReleaseFastLock(&QuotaBlock->QuotaLock,OldIrql);
+ } else {
+ ExAcquireFastLock (&PspDefaultQuotaBlock.QuotaLock, &OldIrql);
+ if ( (QuotaBlock = CurrentProcess->QuotaBlock) != &PspDefaultQuotaBlock ) {
+ ExReleaseFastLock(&PspDefaultQuotaBlock.QuotaLock,OldIrql);
+ goto retry_return;
+ }
+ goto do_return;
+ }
+ return;
+}
+
+VOID
+FASTCALL
+MiChargeCommitment (
+ IN ULONG QuotaCharge,
+ IN PEPROCESS Process OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to ensure the system has sufficient page file
+ space remaining. If not an exception is raised.
+
+Arguments:
+
+ QuotaCharge - Supplies the quota amount to charge.
+
+ Process - Optionally supplies the current process IF AND ONLY IF
+ the working set mutex is held. If the paging file
+ is being extended, the working set mutex is released if
+ this is non-null.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG NewCommitValue;
+ MMPAGE_FILE_EXPANSION PageExtend;
+ NTSTATUS status;
+ PLIST_ENTRY NextEntry;
+
+ ASSERT (QuotaCharge < 0x100000);
+
+ ExAcquireFastLock (&MmChargeCommitmentLock, &OldIrql);
+
+ NewCommitValue = MmTotalCommittedPages + QuotaCharge;
+
+ while (NewCommitValue > MmTotalCommitLimit) {
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+
+ if (Process != NULL) {
+ UNLOCK_WS (Process);
+ }
+ //
+ // Queue a message to the segment dereferencing / pagefile extending
+ // thread to see if the page file can be extended. This is done
+ // in the context of a system thread due to mutexes which may
+ // currently be held.
+ //
+
+ PageExtend.RequestedExpansionSize = QuotaCharge;
+ PageExtend.Segment = NULL;
+ KeInitializeEvent (&PageExtend.Event, NotificationEvent, FALSE);
+
+ ExAcquireFastLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+ InsertTailList ( &MmDereferenceSegmentHeader.ListHead,
+ &PageExtend.DereferenceList);
+ ExReleaseFastLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ KeReleaseSemaphore (&MmDereferenceSegmentHeader.Semaphore, 0L, 1L, TRUE);
+
+ //
+ // Wait for the thread to extend the paging file, with a
+ // one second timeout.
+ //
+
+ status = KeWaitForSingleObject (&PageExtend.Event,
+ Executive,
+ KernelMode,
+ FALSE,
+ (QuotaCharge < 10) ?
+ &MmOneSecond : &MmTwentySeconds);
+
+ if (status == STATUS_TIMEOUT) {
+
+ //
+ // The wait has timed out, if this request has not
+ // been processed, remove it from the list and check
+ // to see if we should allow this request to succeed.
+ // This prevents a deadlock between the file system
+ // trying to allocate memory in the FSP and the
+ // segment dereferencing thread trying to close a
+ // file object, and waiting in the file system.
+ //
+
+ //
+ // Check to see if this request is still in the list,
+ // and if so, remove it.
+ //
+
+ KdPrint(("MMQUOTA: wait timed out, page-extend= %lx, quota = %lx\n",
+ &PageExtend, QuotaCharge));
+
+ ExAcquireFastLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+
+ NextEntry = MmDereferenceSegmentHeader.ListHead.Flink;
+
+ while (NextEntry != &MmDereferenceSegmentHeader.ListHead) {
+
+ //
+ // Check to see if this is the entry we are waiting for.
+ //
+
+ if (NextEntry == &PageExtend.DereferenceList) {
+
+ //
+ // Remove this entry.
+ //
+
+ RemoveEntryList (&PageExtend.DereferenceList);
+ ExReleaseFastLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ if (Process != NULL) {
+ LOCK_WS (Process);
+ }
+
+ //
+ // If the quota is small enough, commit it otherwize
+ // return an error.
+ //
+
+ if (QuotaCharge < MM_MAXIMUM_QUOTA_OVERCHARGE) {
+
+ //
+ // Try the can't expand routine, note that
+ // this could raise an exception.
+ //
+
+ MiChargeCommitmentCantExpand (QuotaCharge, FALSE);
+ } else {
+
+ //
+ // Put up a popup and grant an extension if
+ // possible.
+ //
+
+ MiCauseOverCommitPopup (QuotaCharge, MM_EXTEND_COMMIT);
+ }
+ return;
+ }
+ NextEntry = NextEntry->Flink;
+ }
+
+ ExReleaseFastLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ //
+ // Entry is being processed, wait for completion.
+ //
+
+ KdPrint (("MMQUOTA: rewaiting...\n"));
+
+ KeWaitForSingleObject (&PageExtend.Event,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+ }
+
+ if (Process != NULL) {
+ LOCK_WS (Process);
+ }
+
+ if (PageExtend.ActualExpansion == 0) {
+ MiCauseOverCommitPopup (QuotaCharge, MM_EXTEND_COMMIT);
+ return;
+ }
+
+ ExAcquireFastLock (&MmChargeCommitmentLock, &OldIrql);
+ NewCommitValue = MmTotalCommittedPages + QuotaCharge;
+ }
+
+ MmTotalCommittedPages = NewCommitValue;
+ if (MmTotalCommittedPages > MmPeakCommitment) {
+ MmPeakCommitment = MmTotalCommittedPages;
+ }
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+VOID
+FASTCALL
+MiChargeCommitmentCantExpand (
+ IN ULONG QuotaCharge,
+ IN ULONG MustSucceed
+ )
+
+/*++
+
+Routine Description:
+
+ This routine charges the specified committment without attempting
+ to expand paging file and waiting for the expansion. The routine
+ determines if the paging file space is exhausted, and if so,
+ it attempts to assertain if the paging file space could be expanded.
+
+ If it appears as though the paging file space can't be expanded,
+ it raises an exception.
+
+Arguments:
+
+ QuotaCharge - Supplies the quota amount to charge.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disabled.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG NewCommitValue;
+ ULONG ExtendAmount;
+
+ ExAcquireFastLock (&MmChargeCommitmentLock, &OldIrql);
+
+ //
+ // If the overcommitment is bigger than 512 pages, don't extend.
+ //
+
+ NewCommitValue = MmTotalCommittedPages + QuotaCharge;
+
+ if (!MustSucceed) {
+ if (((LONG)((LONG)NewCommitValue - (LONG)MmTotalCommitLimit)) >
+ MM_DONT_EXTEND_SIZE) {
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ ExRaiseStatus (STATUS_COMMITMENT_LIMIT);
+ }
+ }
+
+ ExtendAmount = NewCommitValue - MmTotalCommitLimit;
+ MmTotalCommittedPages = NewCommitValue;
+
+ if (NewCommitValue > (MmTotalCommitLimit + 20)) {
+
+ //
+ // Attempt to expand the paging file, but don't wait
+ // to see if it succeeds.
+ //
+
+ if (MmAttemptForCantExtend.InProgress != FALSE) {
+
+ //
+ // An expansion request is already in progress, assume
+ // this will succeed.
+ //
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+ }
+
+ MmAttemptForCantExtend.InProgress = TRUE;
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+
+ //
+ // Queue a message to the segment dereferencing / pagefile extending
+ // thread to see if the page file can be extended. This is done
+ // in the context of a system thread due to mutexes which may
+ // currently be held.
+ //
+
+ if (QuotaCharge > ExtendAmount) {
+ ExtendAmount = QuotaCharge;
+ }
+
+ MmAttemptForCantExtend.RequestedExpansionSize = ExtendAmount;
+ ExAcquireFastLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+ InsertTailList ( &MmDereferenceSegmentHeader.ListHead,
+ &MmAttemptForCantExtend.DereferenceList);
+ ExReleaseFastLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ KeReleaseSemaphore (&MmDereferenceSegmentHeader.Semaphore, 0L, 1L, FALSE);
+
+ return;
+ }
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+VOID
+FASTCALL
+MiReturnCommitment (
+ IN ULONG QuotaCharge
+ )
+
+/*++
+
+Routine Description:
+
+ This routine releases page file quota.
+
+Arguments:
+
+ QuotaCharge - Supplies the quota amount to charge.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ ExAcquireFastLock (&MmChargeCommitmentLock, &OldIrql);
+
+ ASSERT (MmTotalCommittedPages >= QuotaCharge);
+
+ MmTotalCommittedPages -= QuotaCharge;
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+ULONG
+MiCalculatePageCommitment (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PMMVAD Vad,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the range of pages from the starting address
+ up to and including the ending address and returns the commit charge
+ for the pages within the range.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ EndingAddress - Supplies the ending address of the range.
+
+ Vad - Supplies the virtual address descriptor which describes the range.
+
+ Process - Supplies the current process.
+
+Return Value:
+
+ Commitment charge for the range.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ PMMPTE TempEnd;
+ ULONG NumberOfCommittedPages = 0;
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+
+ if (Vad->u.VadFlags.MemCommit == 1) {
+
+ TempEnd = EndingAddress;
+
+ //
+ // All the pages are committed within this range.
+ //
+
+ NumberOfCommittedPages = BYTES_TO_PAGES ((ULONG)TempEnd -
+ (ULONG)StartingAddress);
+
+
+ //
+ // Examine the PTEs to determine how many pages are committed.
+ //
+
+ LastPte = MiGetPteAddress (TempEnd);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, therefore the page
+ // is not committed.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ if (PointerPte > LastPte) {
+ goto DoneCommit;
+ }
+ }
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // This is a PDE boundary, check to see if the entire
+ // PDE page exists.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ if (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, check the VAD
+ // to see if the pages are not committed.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ //
+ // Check next page.
+ //
+
+ continue;
+ }
+ }
+
+ //
+ // The PDE exists, examine the PTE.
+ //
+
+ if (PointerPte->u.Long != 0) {
+
+ //
+ // Has this page been explicitly decommited?
+ //
+
+ if (MiIsPteDecommittedPage (PointerPte)) {
+
+ //
+ // This page is decommitted, remove it from the count.
+ //
+
+ NumberOfCommittedPages -= 1;
+
+ }
+ }
+
+ PointerPte += 1;
+ }
+
+DoneCommit:
+
+ if (TempEnd == EndingAddress) {
+ return NumberOfCommittedPages;
+ }
+
+ }
+
+ //
+ // Examine non committed range.
+ //
+
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, therefore the page
+ // is not committed.
+ //
+
+ PointerPde += 1;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ if (PointerPte > LastPte) {
+ return NumberOfCommittedPages;
+ }
+ }
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // This is a PDE boundary, check to see if the entire
+ // PDE page exists.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ if (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for the starting address, check the VAD
+ // to see if the pages are not committed.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ //
+ // Check next page.
+ //
+
+ continue;
+ }
+ }
+
+ //
+ // The PDE exists, examine the PTE.
+ //
+
+ if ((PointerPte->u.Long != 0) &&
+ (!MiIsPteDecommittedPage (PointerPte))) {
+
+ //
+ // This page is committed, count it.
+ //
+
+ NumberOfCommittedPages += 1;
+ }
+
+ PointerPte += 1;
+ }
+
+ return NumberOfCommittedPages;
+}
+
+VOID
+MiReturnPageTablePageCommitment (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD PreviousVad,
+ IN PMMVAD NextVad
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns commitment for COMPLETE page table pages which
+ span the virtual address range. For example (assuming 4k pages),
+ if the StartingAddress = 64k and the EndingAddress = 5mb, no
+ page table charges would be freed as a complete page table page is
+ not covered by the range. However, if the StartingAddress was 4mb
+ and the EndingAddress was 9mb, 1 page table page would be freed.
+
+Arguments:
+
+ StartingAddress - Supplies the starting address of the range.
+
+ EndingAddress - Supplies the ending address of the range.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+ PreviousVad - Supplies a pointer to the previous VAD, NULL if none.
+
+ NextVad - Supplies a pointer to the next VAD, NULL if none.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetLock and AddressCreation mutexes
+ held.
+
+--*/
+
+{
+ ULONG NumberToClear;
+ LONG FirstPage;
+ LONG LastPage;
+ LONG PreviousPage;
+ LONG NextPage;
+
+ //
+ // Check to see if any page table pages would be freed.
+ //
+
+ ASSERT (StartingAddress != EndingAddress);
+
+ if (PreviousVad == NULL) {
+ PreviousPage = -1;
+ } else {
+ PreviousPage = MiGetPdeOffset (PreviousVad->EndingVa);
+ }
+
+ if (NextVad == NULL) {
+ NextPage = MiGetPdeOffset (MM_HIGHEST_USER_ADDRESS) + 1;
+ } else {
+ NextPage = MiGetPdeOffset (NextVad->StartingVa);
+ }
+
+ ASSERT (PreviousPage <= NextPage);
+
+ FirstPage = MiGetPdeOffset (StartingAddress);
+
+ LastPage = MiGetPdeOffset(EndingAddress);
+
+ if (PreviousPage == FirstPage) {
+
+ //
+ // A VAD is within the starting page table page.
+ //
+
+ FirstPage += 1;
+ }
+
+ if (NextPage == LastPage) {
+
+ //
+ // A VAD is within the ending page table page.
+ //
+
+ LastPage -= 1;
+ }
+
+ //
+ // Indicate that the page table page is not in use.
+ //
+
+ if (FirstPage > LastPage) {
+ return;
+ }
+
+ NumberToClear = 1 + LastPage - FirstPage;
+
+ while (FirstPage <= LastPage) {
+ ASSERT (MI_CHECK_BIT (MmWorkingSetList->CommittedPageTables,
+ FirstPage));
+ MI_CLEAR_BIT (MmWorkingSetList->CommittedPageTables, FirstPage);
+ FirstPage += 1;
+ }
+
+ MmWorkingSetList->NumberOfCommittedPageTables -= NumberToClear;
+ MiReturnCommitment (NumberToClear);
+ MiReturnPageFileQuota (NumberToClear, CurrentProcess);
+ CurrentProcess->CommitCharge -= NumberToClear;
+
+ return;
+}
+
+
+VOID
+MiCauseOverCommitPopup(
+ ULONG NumberOfPages,
+ IN ULONG Extension
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an over commit popup to occur. If a popup is pending it returns
+ FALSE. Otherwise, it queues a popup to a noncritical worker thread.
+
+ In all cases, MiOverCommitCallCount is incremented once for each call.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ TRUE - An overcommit popup was queued
+
+ FALSE - An overcommit popup is still pending and will not be queued.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG MiOverCommitPending;
+
+ if (NumberOfPages > MM_COMMIT_POPUP_MAX) {
+ ExRaiseStatus (STATUS_COMMITMENT_LIMIT);
+ return;
+ }
+
+ MiOverCommitPending =
+ !IoRaiseInformationalHardError(STATUS_COMMITMENT_LIMIT, NULL, NULL);
+
+ ExAcquireFastLock (&MmChargeCommitmentLock, &OldIrql);
+
+ if (( MiOverCommitPending ) && (MiOverCommitCallCount > 0)) {
+
+ //
+ // There is already a popup outstanding and we have not
+ // returned any of the quota.
+ //
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+ ExRaiseStatus (STATUS_COMMITMENT_LIMIT);
+ return;
+ }
+
+ MiOverCommitCallCount += 1;
+
+ MmTotalCommitLimit += Extension;
+ MmExtendedCommit += Extension;
+ MmTotalCommittedPages += NumberOfPages;
+
+ if (MmTotalCommittedPages > MmPeakCommitment) {
+ MmPeakCommitment = MmTotalCommittedPages;
+ }
+
+ ExReleaseFastLock (&MmChargeCommitmentLock, OldIrql);
+
+ return;
+}
+
+
+ULONG MmTotalPagedPoolQuota;
+ULONG MmTotalNonPagedPoolQuota;
+
+BOOLEAN
+MmRaisePoolQuota(
+ IN POOL_TYPE PoolType,
+ IN ULONG OldQuotaLimit,
+ OUT PULONG NewQuotaLimit
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called (with a spinlock) whenever PS detects a quota
+ limit has been exceeded. The purpose of this function is to attempt to
+ increase the specified quota.
+
+Arguments:
+
+ PoolType - Supplies the pool type of the quota to be raised
+
+ OldQuotaLimit - Supplies the current quota limit for this pool type
+
+ NewQuotaLimit - Returns the new limit
+
+Return Value:
+
+ TRUE - The API succeeded and the quota limit was raised.
+
+ FASLE - We were unable to raise the quota limit.
+
+Environment:
+
+ Kernel mode, QUOTA SPIN LOCK HELD!!
+
+--*/
+
+{
+ ULONG Limit;
+
+ if (PoolType == PagedPool) {
+
+ //
+ // Check commit limit and make sure at least 1mb is available.
+ // Check to make sure 4mb of paged pool still exists.
+ //
+
+ if ((MmSizeOfPagedPoolInBytes >> PAGE_SHIFT) <
+ (MmAllocatedPagedPool + ((MMPAGED_QUOTA_CHECK) >> PAGE_SHIFT))) {
+
+ return FALSE;
+ }
+
+ MmTotalPagedPoolQuota += (MMPAGED_QUOTA_INCREASE);
+ *NewQuotaLimit = OldQuotaLimit + (MMPAGED_QUOTA_INCREASE);
+ return TRUE;
+
+ } else {
+
+ if ( MmAllocatedNonPagedPool + ((1*1024*1024) >> PAGE_SHIFT) < (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT)) {
+ goto aok;
+ }
+
+ //
+ // Make sure 200 pages and 5mb of nonpaged pool expansion
+ // available. Raise quota by 64k.
+ //
+
+ if ((MmAvailablePages < 200) ||
+ (MmResidentAvailablePages < ((MMNONPAGED_QUOTA_CHECK) >> PAGE_SHIFT))) {
+
+ return FALSE;
+ }
+
+ if (MmAvailablePages > ((4*1024*1024) >> PAGE_SHIFT)) {
+ Limit = (1*1024*1024) >> PAGE_SHIFT;
+ } else {
+ Limit = (4*1024*1024) >> PAGE_SHIFT;
+ }
+
+ if ((MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT) <
+ (MmAllocatedNonPagedPool + Limit)) {
+
+ return FALSE;
+ }
+aok:
+ MmTotalNonPagedPoolQuota += (MMNONPAGED_QUOTA_INCREASE);
+ *NewQuotaLimit = OldQuotaLimit + (MMNONPAGED_QUOTA_INCREASE);
+ return TRUE;
+ }
+}
+
+
+VOID
+MmReturnPoolQuota(
+ IN POOL_TYPE PoolType,
+ IN ULONG ReturnedQuota
+ )
+
+/*++
+
+Routine Description:
+
+ Returns pool quota.
+
+Arguments:
+
+ PoolType - Supplies the pool type of the quota to be returned.
+
+ ReturnedQuota - Number of bytes returned.
+
+Return Value:
+
+ NONE.
+
+Environment:
+
+ Kernel mode, QUOTA SPIN LOCK HELD!!
+
+--*/
+
+{
+
+ if (PoolType == PagedPool) {
+ MmTotalPagedPoolQuota -= ReturnedQuota;
+ } else {
+ MmTotalNonPagedPoolQuota -= ReturnedQuota;
+ }
+
+ return;
+}
diff --git a/private/ntos/mm/mmsup.c b/private/ntos/mm/mmsup.c
new file mode 100644
index 000000000..b7e8cdecf
--- /dev/null
+++ b/private/ntos/mm/mmsup.c
@@ -0,0 +1,1160 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mmsup.c
+
+Abstract:
+
+ This module contains the various routine for miscellaneous support
+ operations for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 31-Aug-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+ULONG
+FASTCALL
+MiIsPteDecommittedPage (
+ IN PMMPTE PointerPte
+ )
+
+/*++
+
+Routine Description:
+
+ This function checks the contents of a PTE to determine if the
+ PTE is explicitly decommitted.
+
+ If the PTE is a prototype PTE and the protection is not in the
+ prototype PTE, the value FALSE is returned.
+
+Arguments:
+
+ PointerPte - Supplies a pointer to the PTE to examine.
+
+Return Value:
+
+ TRUE if the PTE is in the explicit decommited state.
+ FALSE if the PTE is not in the explicit decommited state.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetLock held.
+
+--*/
+
+{
+ MMPTE PteContents;
+
+ PteContents = *PointerPte;
+
+ //
+ // If the protection in the PTE is not decommited, return false.
+ //
+
+ if (PteContents.u.Soft.Protection != MM_DECOMMIT) {
+ return FALSE;
+ }
+
+ //
+ // Check to make sure the protection field is really being interrpreted
+ // correctly.
+ //
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // The PTE is valid and therefore cannot be decommitted.
+ //
+
+ return FALSE;
+ }
+
+ if ((PteContents.u.Soft.Prototype == 1) &&
+ (PteContents.u.Soft.PageFileHigh != 0xFFFFF)) {
+
+ //
+ // The PTE's protection is not known as it is in
+ // prototype PTE format. Return FALSE.
+ //
+
+ return FALSE;
+ }
+
+ //
+ // It is a decommited PTE.
+ //
+
+ return TRUE;
+}
+
+//
+// Data for is protection compatible.
+//
+
+ULONG MmCompatibleProtectionMask[8] = {
+ PAGE_NOACCESS,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY,
+ PAGE_NOACCESS | PAGE_EXECUTE,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY | PAGE_EXECUTE |
+ PAGE_EXECUTE_READ,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY | PAGE_READWRITE,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY | PAGE_READWRITE |
+ PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE |
+ PAGE_EXECUTE_WRITECOPY,
+ PAGE_NOACCESS | PAGE_READONLY | PAGE_WRITECOPY | PAGE_EXECUTE |
+ PAGE_EXECUTE_READ | PAGE_EXECUTE_WRITECOPY
+ };
+
+
+
+ULONG
+FASTCALL
+MiIsProtectionCompatible (
+ IN ULONG OldProtect,
+ IN ULONG NewProtect
+ )
+
+/*++
+
+Routine Description:
+
+ This function takes two user supplied page protections and checks
+ to see if the new protection is compatable with the old protection.
+
+ protection compatible protections
+ NoAccess NoAccess
+ ReadOnly NoAccess, ReadOnly, ReadWriteCopy
+ ReadWriteCopy NoAccess, ReadOnly, ReadWriteCopy
+ ReadWrite NoAccess, ReadOnly, ReadWriteCopy, ReadWrite
+ Execute NoAccess, Execute
+ ExecuteRead NoAccess, ReadOnly, ReadWriteCopy, Execute, ExecuteRead,
+ ExecuteWriteCopy
+ ExecuteWrite NoAccess, ReadOnly, ReadWriteCopy, Execute, ExecuteRead,
+ ExecuteWriteCopy, ReadWrite, ExecuteWrite
+ ExecuteWriteCopy NoAccess, ReadOnly, ReadWriteCopy, Execute, ExecuteRead,
+ ExecuteWriteCopy
+
+Arguments:
+
+ OldProtect - Supplies the protection to be compatible with.
+
+ NewProtect - Supplies the protection to check out.
+
+
+Return Value:
+
+ Returns TRUE if the protection is compatable.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+ ULONG Mask;
+ ULONG ProtectMask;
+
+ Mask = MiMakeProtectionMask (OldProtect) & 0x7;
+ ProtectMask = MmCompatibleProtectionMask[Mask] | PAGE_GUARD | PAGE_NOCACHE;
+
+ if ((ProtectMask | NewProtect) != ProtectMask) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+//
+// Protection data for MiMakeProtectionMask
+//
+
+CCHAR MmUserProtectionToMask1[16] = {
+ 0,
+ MM_NOACCESS,
+ MM_READONLY,
+ -1,
+ MM_READWRITE,
+ -1,
+ -1,
+ -1,
+ MM_WRITECOPY,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1 };
+
+CCHAR MmUserProtectionToMask2[16] = {
+ 0,
+ MM_EXECUTE,
+ MM_EXECUTE_READ,
+ -1,
+ MM_EXECUTE_READWRITE,
+ -1,
+ -1,
+ -1,
+ MM_EXECUTE_WRITECOPY,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1 };
+
+
+ULONG
+FASTCALL
+MiMakeProtectionMask (
+ IN ULONG Protect
+ )
+
+/*++
+
+Routine Description:
+
+ This function takes a user supplied protection and converts it
+ into a 5-bit protection code for the PTE.
+
+Arguments:
+
+ Protect - Supplies the protection.
+
+
+Return Value:
+
+ Returns the protection code for use in the PTE.
+ An exception is raised if the user supplied protection is invalid.
+
+Environment:
+
+ Kernel Mode.
+
+--*/
+
+{
+ ULONG Field1;
+ ULONG Field2;
+ ULONG ProtectCode;
+
+ if (Protect >= (PAGE_NOCACHE * 2)) {
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ }
+
+ Field1 = Protect & 0xF;
+ Field2 = (Protect >> 4) & 0xF;
+
+ //
+ // Make sure at least one field is set.
+ //
+
+ if (Field1 == 0) {
+ if (Field2 == 0) {
+
+ //
+ // Both fields are zero, raise exception.
+ //
+
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ return 0;
+ }
+ ProtectCode = MmUserProtectionToMask2[Field2];
+ } else {
+ if (Field2 != 0) {
+ //
+ // Both fields are non-zero, raise exception.
+ //
+
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ return 0;
+ }
+ ProtectCode = MmUserProtectionToMask1[Field1];
+ }
+
+ if (ProtectCode == -1) {
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ }
+
+ if (Protect & PAGE_GUARD) {
+ if (ProtectCode == MM_NOACCESS) {
+
+ //
+ // Invalid protection, no access and no_cache.
+ //
+
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ }
+
+ ProtectCode |= MM_GUARD_PAGE;
+ }
+
+ if (Protect & PAGE_NOCACHE) {
+
+ if (ProtectCode == MM_NOACCESS) {
+
+ //
+ // Invalid protection, no access and no cache.
+ //
+
+ ExRaiseStatus (STATUS_INVALID_PAGE_PROTECTION);
+ }
+
+ ProtectCode |= MM_NOCACHE;
+ }
+
+ return ProtectCode;
+}
+
+ULONG
+MiDoesPdeExistAndMakeValid (
+ IN PMMPTE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN ULONG PfnMutexHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the specified Page Directory Entry to determine
+ if the page table page mapped by the PDE exists.
+
+ If the page table page exists and is not currently in memory, the
+ working set mutex and, if held, the PFN mutex are release and the
+ page table page is faulted into the working set. The mutexes are
+ required.
+
+ If the PDE exists, the function returns true.
+
+Arguments:
+
+ PointerPde - Supplies a pointer to the PDE to examine and potentially
+ bring into the working set.
+
+ TargetProcess - Supplies a pointer to the current process.
+
+ PfnMutexHeld - Supplies the value TRUE if the PFN mutex is held, FALSE
+ otherwise.
+
+Return Value:
+
+ TRUE if the PDE exists, FALSE if the PDE is zero.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ KIRQL OldIrql = APC_LEVEL;
+
+ if (PointerPde->u.Long == 0) {
+
+ //
+ // This page directory entry doesn't exist, return FASLE.
+ //
+
+ return FALSE;
+ }
+
+ if (PointerPde->u.Hard.Valid == 1) {
+
+ //
+ // Already valid.
+ //
+
+ return TRUE;
+ }
+
+ //
+ // Page directory entry exists, it is either valid, in transition
+ // or in the paging file. Fault it in.
+ //
+
+ if (PfnMutexHeld) {
+ UNLOCK_PFN (OldIrql);
+ }
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ MiMakeSystemAddressValid (PointerPte, TargetProcess);
+
+ if (PfnMutexHeld) {
+ LOCK_PFN (OldIrql);
+ }
+ return TRUE;
+}
+
+ULONG
+MiMakePdeExistAndMakeValid (
+ IN PMMPTE PointerPde,
+ IN PEPROCESS TargetProcess,
+ IN ULONG PfnMutexHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the specified Page Directory Entry to determine
+ if the page table page mapped by the PDE exists.
+
+ If the page table page exists and is not currently in memory, the
+ working set mutex and, if held, the PFN mutex are release and the
+ page table page is faulted into the working set. The mutexes are
+ required.
+
+ If the PDE exists, the function returns true.
+
+ If the PDE does not exist, a zero filled PTE is created and it
+ too is brought into the working set. In this case the return
+ value is FALSE/
+
+Arguments:
+
+ PointerPde - Supplies a pointer to the PDE to examine and bring
+ bring into the working set.
+
+ TargetProcess - Supplies a pointer to the current process.
+
+ PfnMutexHeld - Supplies the value TRUE if the PFN mutex is held, FALSE
+ otherwise.
+
+Return Value:
+
+ TRUE if the PDE exists, FALSE if the PDE was created.
+
+Environment:
+
+ Kernel mode, APCs disable, WorkingSetLock held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ KIRQL OldIrql = APC_LEVEL;
+ ULONG ReturnValue;
+
+ if (PointerPde->u.Hard.Valid == 1) {
+
+ //
+ // Already valid.
+ //
+
+ return TRUE;
+ }
+
+ if (PointerPde->u.Long == 0) {
+ ReturnValue = FALSE;
+ } else {
+ ReturnValue = TRUE;
+ }
+
+ //
+ // Page dirctory entry not valid, make it valid.
+ //
+
+ if (PfnMutexHeld) {
+ UNLOCK_PFN (OldIrql);
+ }
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ //
+ // Fault it in.
+ //
+
+ MiMakeSystemAddressValid (PointerPte, TargetProcess);
+
+ ASSERT (PointerPde->u.Hard.Valid == 1);
+
+ if (PfnMutexHeld) {
+ LOCK_PFN (OldIrql);
+ }
+ return ReturnValue;
+}
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValid (
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the virtual address is valid, and if
+ not makes it valid.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to make valid.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+Return Value:
+
+ Returns TRUE if lock released and wait performed, FALSE otherwise.
+
+Environment:
+
+ Kernel mode, APCs disabled, WorkingSetLock held.
+
+--*/
+
+{
+ NTSTATUS status;
+ ULONG Waited = FALSE;
+
+ ASSERT (VirtualAddress > MM_HIGHEST_USER_ADDRESS);
+
+ ASSERT ((VirtualAddress < MM_PAGED_POOL_START) ||
+ (VirtualAddress > MmPagedPoolEnd));
+
+ while (!MmIsAddressValid(VirtualAddress)) {
+
+ //
+ // The virtual address is not present. Release
+ // the working set mutex and fault it in.
+ //
+
+ UNLOCK_WS (CurrentProcess);
+
+ status = MmAccessFault (FALSE, VirtualAddress, KernelMode);
+ if (!NT_SUCCESS(status)) {
+ KdPrint (("MM:page fault status %lx\n",status));
+ KeBugCheckEx (KERNEL_DATA_INPAGE_ERROR,
+ 1,
+ (ULONG)status,
+ (ULONG)CurrentProcess,
+ (ULONG)VirtualAddress);
+
+ return FALSE;
+ }
+
+ LOCK_WS (CurrentProcess);
+
+ Waited = TRUE;
+ }
+
+ return Waited;
+}
+
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValidPfnWs (
+ IN PVOID VirtualAddress,
+ IN PEPROCESS CurrentProcess OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the virtual address is valid, and if
+ not makes it valid.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to make valid.
+
+ CurrentProcess - Supplies a pointer to the current process, if the
+ working set lock is not held, this value is NULL.
+
+Return Value:
+
+ Returns TRUE if lock released and wait performed, FALSE otherwise.
+
+Environment:
+
+ Kernel mode, APCs disabled, PFN lock held, working set lock held
+ if CurrentProcess != NULL.
+
+--*/
+
+{
+ NTSTATUS status;
+ ULONG Waited = FALSE;
+ KIRQL OldIrql = APC_LEVEL;
+
+ ASSERT (VirtualAddress > MM_HIGHEST_USER_ADDRESS);
+
+ while (!MmIsAddressValid(VirtualAddress)) {
+
+ //
+ // The virtual address is not present. Release
+ // the working set mutex and fault it in.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ if (CurrentProcess != NULL) {
+ UNLOCK_WS (CurrentProcess);
+ }
+ status = MmAccessFault (FALSE, VirtualAddress, KernelMode);
+ if (!NT_SUCCESS(status)) {
+ KdPrint (("MM:page fault status %lx\n",status));
+ KeBugCheckEx (KERNEL_DATA_INPAGE_ERROR,
+ 2,
+ (ULONG)status,
+ (ULONG)CurrentProcess,
+ (ULONG)VirtualAddress);
+ return FALSE;
+ }
+ if (CurrentProcess != NULL) {
+ LOCK_WS (CurrentProcess);
+ }
+ LOCK_PFN (OldIrql);
+
+ Waited = TRUE;
+ }
+ return Waited;
+}
+
+ULONG
+FASTCALL
+MiMakeSystemAddressValidPfn (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the virtual address is valid, and if
+ not makes it valid.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to make valid.
+
+Return Value:
+
+ Returns TRUE if lock released and wait performed, FALSE otherwise.
+
+Environment:
+
+ Kernel mode, APCs disabled, only the PFN Lock held.
+
+--*/
+
+{
+ NTSTATUS status;
+ KIRQL OldIrql = APC_LEVEL;
+
+ ULONG Waited = FALSE;
+
+ ASSERT (VirtualAddress > MM_HIGHEST_USER_ADDRESS);
+
+ while (!MmIsAddressValid(VirtualAddress)) {
+
+ //
+ // The virtual address is not present. Release
+ // the working set mutex and fault it in.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ status = MmAccessFault (FALSE, VirtualAddress, KernelMode);
+ if (!NT_SUCCESS(status)) {
+ KdPrint (("MM:page fault status %lx\n",status));
+ KeBugCheckEx (KERNEL_DATA_INPAGE_ERROR,
+ 3,
+ (ULONG)status,
+ (ULONG)VirtualAddress,
+ 0);
+ return FALSE;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ Waited = TRUE;
+ }
+
+ return Waited;
+}
+
+ULONG
+FASTCALL
+MiLockPagedAddress (
+ IN PVOID VirtualAddress,
+ IN ULONG PfnLockHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the virtual address is valid, and if
+ not makes it valid.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to make valid.
+
+ CurrentProcess - Supplies a pointer to the current process.
+
+Return Value:
+
+ Returns TRUE if lock released and wait performed, FALSE otherwise.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ ULONG Waited = FALSE;
+
+ PointerPte = MiGetPteAddress(VirtualAddress);
+
+ //
+ // The address must be within paged pool.
+ //
+
+ if (PfnLockHeld == FALSE) {
+ LOCK_PFN2 (OldIrql);
+ }
+
+ if (PointerPte->u.Hard.Valid == 0) {
+
+ Waited = MiMakeSystemAddressValidPfn (
+ MiGetVirtualAddressMappedByPte(PointerPte));
+
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ if (PfnLockHeld == FALSE) {
+ UNLOCK_PFN2 (OldIrql);
+ }
+ return Waited;
+}
+
+
+VOID
+FASTCALL
+MiUnlockPagedAddress (
+ IN PVOID VirtualAddress,
+ IN ULONG PfnLockHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the virtual address is valid, and if
+ not makes it valid.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to make valid.
+
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. PFN LOCK MUST NOT BE HELD.
+
+--*/
+
+{
+
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+ PointerPte = MiGetPteAddress(VirtualAddress);
+
+ //
+ // Address must be within paged pool.
+ //
+
+ if (PfnLockHeld == FALSE) {
+ LOCK_PFN2 (OldIrql);
+ }
+#if DBG
+ { PMMPFN Pfn;
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn->u3.e2.ReferenceCount > 1);
+ }
+#endif //DBG
+
+ MiDecrementReferenceCount (PointerPte->u.Hard.PageFrameNumber);
+
+ if (PfnLockHeld == FALSE) {
+ UNLOCK_PFN2 (OldIrql);
+ }
+ return;
+}
+
+VOID
+FASTCALL
+MiZeroPhysicalPage (
+ IN ULONG PageFrameIndex,
+ IN ULONG PageColor
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and fills the page with zeros.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to fill with
+ zeroes.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PULONG va;
+ KIRQL OldIrql;
+
+#if defined(MIPS) || defined(_ALPHA_)
+ HalZeroPage((PVOID)((PageColor & MM_COLOR_MASK) << PAGE_SHIFT),
+ (PVOID)((PageColor & MM_COLOR_MASK) << PAGE_SHIFT),
+ PageFrameIndex);
+#elif defined(_PPC_)
+ KeZeroPage(PageFrameIndex);
+#else
+ va = (PULONG)MiMapPageInHyperSpace (PageFrameIndex, &OldIrql);
+
+ RtlZeroMemory (va, PAGE_SIZE);
+
+ MiUnmapPageInHyperSpace (OldIrql);
+#endif //MIPS || ALPHA
+
+ return;
+}
+
+VOID
+FASTCALL
+MiRestoreTransitionPte (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure restores the original contents into the PTE (which could
+ be a prototype PTE) referred to by the PFN database for the specified
+ physical page. It also updates all necessary data structures to
+ reflect the fact the the referenced PTE is no longer in transition.
+
+ The physical address of the referenced PTE is mapped into hyper space
+ of the current process and the PTE is then updated.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number which refers to a
+ transition PTE.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ KIRQL OldIrql = 99;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ ASSERT (Pfn1->u3.e1.PageLocation == StandbyPageList);
+
+ if (Pfn1->u3.e1.PrototypePte) {
+
+ if (MmIsAddressValid (Pfn1->PteAddress)) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The page containing the prototype PTE is not valid,
+ // map the page into hyperspace and reference it that way.
+ //
+
+ PointerPte = MiMapPageInHyperSpace (Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((PCHAR)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ ASSERT ((PointerPte->u.Trans.PageFrameNumber == PageFrameIndex) &&
+ (PointerPte->u.Hard.Valid == 0));
+
+ //
+ // This page is referenced by a prototype PTE. The
+ // segment structures need to be updated when the page
+ // is removed from the transition state.
+ //
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype) {
+
+ //
+ // The prototype PTE is in subsection format, calculate the
+ // address of the control area for the subsection and decrement
+ // the number of PFN references to the control area.
+ //
+ // Calculate address of subsection for this prototype PTE.
+ //
+
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ ControlArea = Subsection->ControlArea;
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+
+ MiCheckForControlAreaDeletion (ControlArea);
+ }
+
+ } else {
+
+ //
+ // The page points to a page table page which may not be
+ // for the current process. Map the page into hyperspace
+ // reference it through hyperspace. If the page resides in
+ // system space, it does not need to be mapped as all PTEs for
+ // system space must be resident.
+ //
+
+ PointerPte = Pfn1->PteAddress;
+ if (PointerPte < MiGetPteAddress (MM_SYSTEM_SPACE_START)) {
+ PointerPte = MiMapPageInHyperSpace (Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((PCHAR)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+ ASSERT ((PointerPte->u.Trans.PageFrameNumber == PageFrameIndex) &&
+ (PointerPte->u.Hard.Valid == 0));
+ }
+
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+ ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)));
+
+ *PointerPte = Pfn1->OriginalPte;
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+
+ //
+ // The PTE has been restored to its original contents and is
+ // no longer in transition. Decrement the share count on
+ // the page table page which contains the PTE.
+ //
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ return;
+}
+
+PSUBSECTION
+MiGetSubsectionAndProtoFromPte (
+ IN PMMPTE PointerPte,
+ OUT PMMPTE *ProtoPte,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the contents of the supplied PTE (which must
+ map a page within a section) and determines the address of the
+ subsection in which the PTE is contained.
+
+Arguments:
+
+ PointerPte - Supplies a pointer to the PTE.
+
+ ProtoPte - Supplies a pointer to a PMMPTE which receives the
+ address of the prototype PTE which is mapped by the supplied
+ PointerPte.
+
+ Process - Supplies a pointer to the current process.
+
+Return Value:
+
+ Returns the pointer to the subsection for this PTE.
+
+Environment:
+
+ Kernel mode - Must be holding the PFN database lock and
+ working set mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPTE PointerProto;
+ PMMPFN Pfn1;
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ *ProtoPte = Pfn1->PteAddress;
+ return MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ }
+
+ PointerProto = MiPteToProto (PointerPte);
+ *ProtoPte = PointerProto;
+
+ MiMakeSystemAddressValidPfnWs (PointerProto, Process);
+
+ if (PointerProto->u.Hard.Valid == 1) {
+ //
+ // Prototype Pte is valid.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PointerProto->u.Hard.PageFrameNumber);
+ return MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ }
+
+ if ((PointerProto->u.Soft.Transition == 1) &&
+ (PointerProto->u.Soft.Prototype == 0)) {
+
+ //
+ // Prototype Pte is in transition.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PointerProto->u.Trans.PageFrameNumber);
+ return MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ }
+
+ ASSERT (PointerProto->u.Soft.Prototype == 1);
+ return MiGetSubsectionAddress (PointerProto);
+}
+
+BOOLEAN
+MmIsNonPagedSystemAddressValid (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ For a given virtual address this function returns TRUE if the address
+ is within the nonpagable portion of the system's address space,
+ FALSE otherwise.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ TRUE if the address is within the nonpagable portion of the system
+ address space, FALSE otherwise.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ //
+ // Return TRUE if address is within the non pageable portion
+ // of the system. Check limits for paged pool and if not within
+ // those limits, return TRUE.
+ //
+
+ if ((VirtualAddress >= MmPagedPoolStart) &&
+ (VirtualAddress <= MmPagedPoolEnd)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOLEAN
+MmIsSystemAddressAccessable (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ For a given virtual address this function returns TRUE if the address
+ is accessable without an access violation (it may incur a page fault).
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ TRUE if the address is accessable without an access violation.
+ FALSE otherwise.
+
+Environment:
+
+ Kernel mode. APC_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if (!MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) {
+ PointerPte = MiGetPdeAddress (VirtualAddress);
+ if ((PointerPte->u.Long == MM_ZERO_PTE) ||
+ (PointerPte->u.Long == MM_ZERO_KERNEL_PTE) ||
+ (PointerPte->u.Soft.Protection == 0)) {
+ return FALSE;
+ }
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ if ((PointerPte->u.Long == MM_ZERO_PTE) ||
+ (PointerPte->u.Long == MM_ZERO_KERNEL_PTE) ||
+ (PointerPte->u.Soft.Protection == 0)) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
diff --git a/private/ntos/mm/modwrite.c b/private/ntos/mm/modwrite.c
new file mode 100644
index 000000000..926e3735c
--- /dev/null
+++ b/private/ntos/mm/modwrite.c
@@ -0,0 +1,4025 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ modwrite.c
+
+Abstract:
+
+ This module contains the modified page writer for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Jun-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+typedef struct _MMWORK_CONTEXT {
+ LARGE_INTEGER Size;
+ NTSTATUS Status;
+ KEVENT Event;
+} MMWORK_CONTEXT, *PMMWORK_CONTEXT;
+
+typedef struct _MM_WRITE_CLUSTER {
+ ULONG Count;
+ ULONG StartIndex;
+ ULONG Cluster[2 * (MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE) + 1];
+} MM_WRITE_CLUSTER, *PMM_WRITE_CLUSTER;
+
+ULONG MmWriteAllModifiedPages;
+
+NTSTATUS
+MiCheckForCrashDump (
+ PFILE_OBJECT File,
+ IN ULONG FileNumber
+ );
+
+VOID
+MiCrashDumpWorker (
+ IN PVOID Context
+ );
+
+VOID
+MiClusterWritePages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex,
+ IN PMM_WRITE_CLUSTER WriteCluster,
+ IN ULONG Size
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtCreatePagingFile)
+#pragma alloc_text(PAGE,MmGetPageFileInformation)
+#pragma alloc_text(PAGE,MiModifiedPageWriter)
+#pragma alloc_text(PAGE,MiCheckForCrashDump)
+#pragma alloc_text(PAGE,MmGetCrashDumpInformation)
+#pragma alloc_text(PAGE,MiCrashDumpWorker)
+#pragma alloc_text(PAGE,MiFlushAllPages)
+#endif
+
+
+PSECTION MmCrashDumpSection;
+
+extern POBJECT_TYPE IoFileObjectType;
+extern HANDLE PspInitialSystemProcessHandle;
+
+extern ULONG MmExtendedCommit;
+
+LIST_ENTRY MmMappedPageWriterList;
+
+KEVENT MmMappedPageWriterEvent;
+
+KEVENT MmMappedFileIoComplete;
+
+ULONG MmSystemShutdown;
+
+ULONG MmOverCommit2;
+
+ULONG MmPageFileFullExtend;
+
+ULONG MmPageFileFull;
+
+ULONG MmModNoWriteInsert;
+
+BOOLEAN MmSystemPageFileLocated;
+
+NTSTATUS
+MiCheckPageFileMapping (
+ IN PFILE_OBJECT File
+ );
+
+VOID
+MiInsertPageFileInList (
+ VOID
+ );
+
+VOID
+MiGatherMappedPages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex
+ );
+
+VOID
+MiGatherPagefilePages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex
+ );
+
+VOID
+MiPageFileFull (
+ VOID
+ );
+
+VOID
+MiCauseOverCommitPopup(
+ ULONG NumberOfPages,
+ ULONG Extension
+ );
+
+#if DBG
+ULONG MmPagingFileDebug[8192];
+#endif
+
+extern ULONG MmMoreThanEnoughFreePages;
+
+#define MINIMUM_PAGE_FILE_SIZE ((ULONG)(256*PAGE_SIZE))
+
+VOID
+MiModifiedPageWriterWorker (
+ VOID
+ );
+
+VOID
+MiMappedPageWriter (
+ IN PVOID StartContext
+ );
+
+ULONG
+MiAttemptPageFileExtension (
+ IN ULONG PageFileNumber,
+ IN ULONG ExtendSize,
+ IN ULONG Maximum
+ );
+
+
+NTSTATUS
+NtCreatePagingFile (
+ IN PUNICODE_STRING PageFileName,
+ IN PLARGE_INTEGER MinimumSize,
+ IN PLARGE_INTEGER MaximumSize,
+ IN ULONG Priority OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This routine opens the specified file, attempts to write a page
+ to the specified file, and creates the neccessary structures to
+ use the file as a paging file.
+
+ If this file is the first paging file, the modified page writer
+ is started.
+
+ This system service requires the caller to have SeCreatePagefilePrivilege.
+
+Arguments:
+
+ PageFileName - Supplies the fully qualified file name.
+
+ MinimmumSize - Supplies the starting size of the paging file.
+ This value is rounded up to the host page size.
+
+ MaximumSize - Supplies the maximum number of bytes to write to the file.
+ This value is rounded up to the host page size.
+
+ Priority - Supplies the relative priority of this paging file.
+
+Return Value:
+
+ tbs
+
+--*/
+
+{
+ PFILE_OBJECT File;
+ NTSTATUS Status;
+ OBJECT_ATTRIBUTES PagingFileAttributes;
+ HANDLE FileHandle;
+ IO_STATUS_BLOCK IoStatus;
+ UNICODE_STRING CapturedName;
+ PWSTR CapturedBuffer = NULL;
+ LARGE_INTEGER CapturedMaximumSize;
+ LARGE_INTEGER CapturedMinimumSize;
+ FILE_END_OF_FILE_INFORMATION EndOfFileInformation;
+ KPROCESSOR_MODE PreviousMode;
+ BOOLEAN Attached = FALSE;
+ BOOLEAN HasPrivilege;
+ HANDLE SystemProcess;
+ FILE_FS_DEVICE_INFORMATION FileDeviceInfo;
+ ULONG ReturnedLength;
+ ULONG FinalStatus;
+ ULONG PageFileNumber;
+
+ DBG_UNREFERENCED_PARAMETER (Priority);
+
+ PAGED_CODE();
+
+ if (MmNumberOfPagingFiles == MAX_PAGE_FILES) {
+
+ //
+ // The maximum number of paging files is already in use.
+ //
+
+ Status = STATUS_TOO_MANY_PAGING_FILES;
+ goto ErrorReturn0;
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+
+ //
+ // I allowed anyone to create a page file. (markl 2/10/93)
+ //
+ // I put the privilege check back in as per bug 6919 for
+ // Daytona Beta II.
+ //
+
+ //
+ // Make sure the caller has the proper privilege to make
+ // this call.
+ //
+ HasPrivilege = SeSinglePrivilegeCheck(
+ SeCreatePagefilePrivilege,
+ PreviousMode
+ );
+
+ if (!HasPrivilege) {
+
+ Status = STATUS_PRIVILEGE_NOT_HELD;
+ goto ErrorReturn0;
+ }
+ //
+ // Probe arguments.
+ //
+
+ ProbeForRead( PageFileName, sizeof(*PageFileName), sizeof(UCHAR));
+ ProbeForRead( MaximumSize, sizeof(LARGE_INTEGER), 4);
+ ProbeForRead( MinimumSize, sizeof(LARGE_INTEGER), 4);
+ }
+
+ //
+ // Capture arguments.
+ //
+
+ CapturedMinimumSize = *MinimumSize;
+
+ if ((CapturedMinimumSize.HighPart != 0) ||
+ (CapturedMinimumSize.LowPart < MINIMUM_PAGE_FILE_SIZE)) {
+ Status = STATUS_INVALID_PARAMETER_2;
+ goto ErrorReturn0;
+ }
+
+ CapturedMaximumSize = *MaximumSize;
+
+ if ((CapturedMaximumSize.HighPart != 0) ||
+ (CapturedMinimumSize.QuadPart > CapturedMaximumSize.QuadPart)) {
+ Status = STATUS_INVALID_PARAMETER_3;
+ goto ErrorReturn0;
+ }
+
+ CapturedName = *PageFileName;
+ CapturedName.MaximumLength = CapturedName.Length;
+
+ if ((CapturedName.Length == 0) ||
+ (CapturedName.Length > MAXIMUM_FILENAME_LENGTH )) {
+ Status = STATUS_OBJECT_NAME_INVALID;
+ goto ErrorReturn0;
+ }
+
+ if (PreviousMode != KernelMode) {
+ ProbeForRead (CapturedName.Buffer,
+ CapturedName.Length,
+ sizeof( UCHAR ));
+ }
+
+ CapturedBuffer = ExAllocatePoolWithTag (PagedPool,
+ (ULONG)CapturedName.Length,
+ ' mM');
+
+ if (CapturedBuffer == NULL) {
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn0;
+ }
+
+ //
+ // Copy the string to the allocated buffer.
+ //
+
+ RtlMoveMemory (CapturedBuffer,
+ CapturedName.Buffer,
+ CapturedName.Length);
+
+ //
+ // Point the buffer to the string that was just copied.
+ //
+
+ CapturedName.Buffer = CapturedBuffer;
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ if (CapturedBuffer != NULL) {
+ ExFreePool (CapturedBuffer);
+ }
+
+ Status = GetExceptionCode();
+ goto ErrorReturn0;
+ }
+
+ //
+ // Open a paging file and get the size.
+ //
+
+ InitializeObjectAttributes( &PagingFileAttributes,
+ &CapturedName,
+ OBJ_CASE_INSENSITIVE,
+ NULL,
+ NULL );
+
+ EndOfFileInformation.EndOfFile.HighPart = 0;
+ EndOfFileInformation.EndOfFile.LowPart =
+ ROUND_TO_PAGES (CapturedMinimumSize.LowPart);
+
+ Status = IoCreateFile( &FileHandle,
+ FILE_READ_DATA | FILE_WRITE_DATA | SYNCHRONIZE,
+ &PagingFileAttributes,
+ &IoStatus,
+ &CapturedMinimumSize,
+ 0L,
+ 0L,
+ FILE_SUPERSEDE,
+ FILE_NO_INTERMEDIATE_BUFFERING | FILE_NO_COMPRESSION,
+ (PVOID) NULL,
+ 0L,
+ CreateFileTypeNone,
+ (PVOID) NULL,
+ IO_OPEN_PAGING_FILE | IO_NO_PARAMETER_CHECKING );
+
+ if (!NT_SUCCESS(Status)) {
+#if DBG
+ if (Status != STATUS_DISK_FULL) {
+ DbgPrint("MM MODWRITE: unable to open paging file %wZ - status = %X \n", &CapturedName, Status);
+ }
+#endif
+ goto ErrorReturn1;
+ }
+
+ if (!NT_SUCCESS(IoStatus.Status)) {
+ KdPrint(("MM MODWRITE: unable to open paging file %wZ - iosb %lx\n", &CapturedName, IoStatus.Status));
+ Status = IoStatus.Status;
+ goto ErrorReturn1;
+ }
+
+ Status = ZwSetInformationFile (FileHandle,
+ &IoStatus,
+ &EndOfFileInformation,
+ sizeof(EndOfFileInformation),
+ FileEndOfFileInformation);
+
+ if (!NT_SUCCESS(Status)) {
+ KdPrint(("MM MODWRITE: unable to set length of paging file %wZ status = %X \n",
+ &CapturedName, Status));
+ goto ErrorReturn2;
+ }
+
+ if (!NT_SUCCESS(IoStatus.Status)) {
+ KdPrint(("MM MODWRITE: unable to set length of paging file %wZ - iosb %lx\n",
+ &CapturedName, IoStatus.Status));
+ Status = IoStatus.Status;
+ goto ErrorReturn2;
+ }
+
+ Status = ObReferenceObjectByHandle ( FileHandle,
+ FILE_READ_DATA | FILE_WRITE_DATA,
+ IoFileObjectType,
+ KernelMode,
+ (PVOID *)&File,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ KdPrint(("MM MODWRITE: Unable to reference paging file - %wZ\n",
+ &CapturedName));
+ goto ErrorReturn2;
+ }
+
+ //
+ // Make sure the specified file is not currently being used
+ // as a mapped data file.
+ //
+
+ Status = MiCheckPageFileMapping (File);
+ if (!NT_SUCCESS(Status)) {
+ goto ErrorReturn3;
+ }
+
+ //
+ // Make sure the volume is not a floppy disk.
+ //
+
+ Status = IoQueryVolumeInformation ( File,
+ FileFsDeviceInformation,
+ sizeof(FILE_FS_DEVICE_INFORMATION),
+ &FileDeviceInfo,
+ &ReturnedLength
+ );
+
+ if (FILE_FLOPPY_DISKETTE & FileDeviceInfo.Characteristics) {
+ Status = STATUS_FLOPPY_VOLUME;
+ goto ErrorReturn3;
+ }
+
+ //
+ // Acquire the global page file creation mutex.
+ //
+
+ ExAcquireFastMutex (&MmPageFileCreationLock);
+
+ MmPagingFile[MmNumberOfPagingFiles] = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMPAGING_FILE),
+ ' mM');
+ if (MmPagingFile[MmNumberOfPagingFiles] == NULL) {
+
+ //
+ // Allocate pool failed.
+ //
+
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn4;
+ }
+
+ RtlZeroMemory (MmPagingFile[MmNumberOfPagingFiles], sizeof(MMPAGING_FILE));
+ MmPagingFile[MmNumberOfPagingFiles]->File = File;
+ MmPagingFile[MmNumberOfPagingFiles]->Size = (ULONG)(
+ CapturedMinimumSize.QuadPart
+ >> PAGE_SHIFT);
+
+ MmPagingFile[MmNumberOfPagingFiles]->MinimumSize =
+ MmPagingFile[MmNumberOfPagingFiles]->Size;
+ MmPagingFile[MmNumberOfPagingFiles]->FreeSpace =
+ MmPagingFile[MmNumberOfPagingFiles]->Size - 1;
+
+ MmPagingFile[MmNumberOfPagingFiles]->MaximumSize = (ULONG)(
+ CapturedMaximumSize.QuadPart >>
+ PAGE_SHIFT);
+
+ MmPagingFile[MmNumberOfPagingFiles]->PageFileNumber = MmNumberOfPagingFiles;
+
+ //
+ // Adjust the commit page limit to reflect the new page file space.
+ //
+
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[0] = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMMOD_WRITER_MDL_ENTRY) +
+ MmModifiedWriteClusterSize *
+ sizeof(ULONG),
+ ' mM');
+
+ if (MmPagingFile[MmNumberOfPagingFiles]->Entry[0] == NULL) {
+
+ //
+ // Allocate pool failed.
+ //
+
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn5;
+ }
+
+ RtlZeroMemory (MmPagingFile[MmNumberOfPagingFiles]->Entry[0],
+ sizeof(MMMOD_WRITER_MDL_ENTRY));
+
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[0]->PagingListHead =
+ &MmPagingFileHeader;
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[0]->PagingFile =
+ MmPagingFile[MmNumberOfPagingFiles];
+
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[1] = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMMOD_WRITER_MDL_ENTRY) +
+ MmModifiedWriteClusterSize *
+ sizeof(ULONG),
+ ' mM');
+
+ if (MmPagingFile[MmNumberOfPagingFiles]->Entry[1] == NULL) {
+
+ //
+ // Allocate pool failed.
+ //
+
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn6;
+ }
+
+ RtlZeroMemory (MmPagingFile[MmNumberOfPagingFiles]->Entry[1],
+ sizeof(MMMOD_WRITER_MDL_ENTRY));
+
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[1]->PagingListHead =
+ &MmPagingFileHeader;
+ MmPagingFile[MmNumberOfPagingFiles]->Entry[1]->PagingFile =
+ MmPagingFile[MmNumberOfPagingFiles];
+
+ MmPagingFile[MmNumberOfPagingFiles]->PageFileName = CapturedName;
+
+ MiCreateBitMap (&MmPagingFile[MmNumberOfPagingFiles]->Bitmap,
+ MmPagingFile[MmNumberOfPagingFiles]->MaximumSize,
+ NonPagedPool);
+
+ if (MmPagingFile[MmNumberOfPagingFiles]->Bitmap == NULL) {
+
+ //
+ // Allocate pool failed.
+ //
+
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto ErrorReturn7;
+ }
+
+ RtlSetAllBits (MmPagingFile[MmNumberOfPagingFiles]->Bitmap);
+
+ //
+ // Set the first bit as 0 is an invalid page location, clear the
+ // following bits.
+ //
+
+ RtlClearBits (MmPagingFile[MmNumberOfPagingFiles]->Bitmap,
+ 1,
+ MmPagingFile[MmNumberOfPagingFiles]->Size - 1);
+
+ PageFileNumber = MmNumberOfPagingFiles;
+ MiInsertPageFileInList ();
+
+ FinalStatus = MiCheckForCrashDump (File, PageFileNumber);
+
+ if (PageFileNumber == 0) {
+
+ //
+ // The first paging file has been created, start the modified
+ // page writer.
+ //
+
+ KeSetEvent (MmPagingFileCreated, 0 ,FALSE);
+ }
+
+ ExReleaseFastMutex (&MmPageFileCreationLock);
+
+ //
+ // Note that the file handle is not closed to prevent the
+ // paging file from being deleted or opened again. (Actually,
+ // the file handle is duped to the system process so process
+ // termination will not close the handle).
+ //
+
+ Status = ObOpenObjectByPointer(
+ PsInitialSystemProcess,
+ 0,
+ NULL,
+ 0,
+ PsProcessType,
+ KernelMode,
+ &SystemProcess
+ );
+
+ if ( !NT_SUCCESS(Status)) {
+ ZwClose (FileHandle);
+ return FinalStatus;
+ }
+
+ Status = ZwDuplicateObject(
+ NtCurrentProcess(),
+ FileHandle,
+ SystemProcess,
+ NULL,
+ 0,
+ 0,
+ DUPLICATE_SAME_ATTRIBUTES | DUPLICATE_SAME_ACCESS
+ );
+
+ ASSERT(NT_SUCCESS(Status));
+
+ if (!MmSystemPageFileLocated) {
+ MmSystemPageFileLocated = IoPageFileCreated(FileHandle);
+ }
+
+ ZwClose (SystemProcess);
+ ZwClose (FileHandle);
+
+ return FinalStatus;
+
+ //
+ // Error returns:
+ //
+
+ErrorReturn7:
+ ExFreePool (MmPagingFile[MmNumberOfPagingFiles]->Entry[0]);
+
+ErrorReturn6:
+ ExFreePool (MmPagingFile[MmNumberOfPagingFiles]->Entry[1]);
+
+ErrorReturn5:
+ ExFreePool (MmPagingFile[MmNumberOfPagingFiles]);
+
+ErrorReturn4:
+ ExReleaseFastMutex (&MmPageFileCreationLock);
+
+ErrorReturn3:
+ ObDereferenceObject (File);
+
+ErrorReturn2:
+ ZwClose (FileHandle);
+
+ErrorReturn1:
+ ExFreePool (CapturedBuffer);
+
+ErrorReturn0:
+ return(Status);
+}
+
+
+NTSTATUS
+MiCheckForCrashDump (
+ PFILE_OBJECT File,
+ IN ULONG FileNumber
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the first page of the paging file to
+ determine if a crash dump exists. If a crash dump is found
+ a section is created which maps the crash dump. A handle
+ to the section is created via NtQuerySystemInformation specifying
+ SystemCrashDumpInformation.
+
+Arguments:
+
+ File - Supplies a pointer to the file object for the paging file.
+
+ FileNumber - Supplies the index into the paging file array.
+
+Return Value:
+
+ Returns STATUS_CRASH_DUMP if a crash dump exists, success otherwise.
+
+--*/
+
+{
+ PMDL Mdl;
+ LARGE_INTEGER Offset = {0,0};
+ PULONG Block;
+ IO_STATUS_BLOCK IoStatus;
+ NTSTATUS Status;
+ PPHYSICAL_MEMORY_DESCRIPTOR Memory;
+ ULONG j;
+ PULONG Page;
+ NTSTATUS FinalStatus = STATUS_SUCCESS;
+ PMMPTE PointerPte;
+ PMMPFN Pfn;
+ ULONG MdlHack[(sizeof(MDL)/4) + 1];
+ WORK_QUEUE_ITEM WorkItem;
+ MMWORK_CONTEXT Context;
+
+ Mdl = (PMDL)&MdlHack[0];
+ MmCreateMdl( Mdl, NULL, PAGE_SIZE);
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+
+ Page = (PULONG)(Mdl + 1);
+ *Page = MiGetPageForHeader ();
+ Block = MmGetSystemAddressForMdl (Mdl);
+
+ KeInitializeEvent (&Context.Event, NotificationEvent, FALSE);
+
+ Status = IoPageRead (File,
+ Mdl,
+ &Offset,
+ &Context.Event,
+ &IoStatus);
+
+ if (Status == STATUS_PENDING) {
+ KeWaitForSingleObject( &Context.Event,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ KeClearEvent (&Context.Event);
+ Memory = (PPHYSICAL_MEMORY_DESCRIPTOR)&Block[DH_PHYSICAL_MEMORY_BLOCK];
+
+ if ((Block[0] == 'EGAP') &&
+ (Block[1] == 'PMUD') &&
+ (Memory->NumberOfPages < MmPagingFile[FileNumber]->Size)) {
+
+ //
+ // A crash dump already exists, don't let pager use
+ // it and build named section for it.
+ //
+
+ Context.Size.QuadPart = (LONGLONG)(Memory->NumberOfPages + 1) <<
+ PAGE_SHIFT;
+
+ ExInitializeWorkItem(&WorkItem,
+ MiCrashDumpWorker,
+ (PVOID)&Context);
+
+ ExQueueWorkItem( &WorkItem, DelayedWorkQueue );
+
+ KeWaitForSingleObject( &Context.Event,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ KeClearEvent (&Context.Event);
+
+ if (!NT_SUCCESS(Context.Status)) {
+ goto Failed;
+ }
+
+ //
+ // Make the section point to the paging file.
+ //
+
+ PointerPte = MmCrashDumpSection->Segment->PrototypePte;
+ *PointerPte = MmCrashDumpSection->Segment->SegmentPteTemplate;
+
+ Pfn = MI_PFN_ELEMENT (*Page);
+ Pfn->u3.e1.Modified = 1;
+
+ PointerPte += 1;
+
+ for (j = 1; j <= Memory->NumberOfPages; j++) {
+
+ PointerPte->u.Long = SET_PAGING_FILE_INFO (
+ MmCrashDumpSection->Segment->SegmentPteTemplate,
+ FileNumber,
+ j);
+#if DBG
+ if ((j < 8192) && (FileNumber == 0)) {
+ ASSERT ((MmPagingFileDebug[j] & 1) == 0);
+ MmPagingFileDebug[j] = (((ULONG)PointerPte & 0xFFFFFFF) | 1);
+ }
+#endif //DBG
+ PointerPte += 1;
+ }
+
+ //
+ // Change the original PTE contents to refer to
+ // the paging file offset where this was written.
+ //
+
+ RtlSetBits (MmPagingFile[FileNumber]->Bitmap,
+ 1,
+ Memory->NumberOfPages);
+
+ MmPagingFile[FileNumber]->FreeSpace -= Memory->NumberOfPages;
+ MmPagingFile[FileNumber]->CurrentUsage += Memory->NumberOfPages;
+ FinalStatus = STATUS_CRASH_DUMP;
+
+Failed:
+
+ //
+ // Indicate that no crash dump is in file so if system is
+ // rebooted the page file is available.
+ //
+
+ Block[1] = 'EGAP';
+ } else {
+
+ //
+ // Set new pattern into file.
+ //
+
+ RtlFillMemoryUlong (Block,
+ PAGE_SIZE,
+ 'EGAP');
+
+ Block[4] = PsInitialSystemProcess->Pcb.DirectoryTableBase[0];
+ Block[5] = (ULONG)MmPfnDatabase;
+ Block[6] = (ULONG)&PsLoadedModuleList;
+ Block[7] = (ULONG)&PsActiveProcessHead;
+ Block[8] =
+#ifdef _X86_
+ IMAGE_FILE_MACHINE_I386;
+#endif //_X86_
+
+#ifdef _MIPS_
+ IMAGE_FILE_MACHINE_R4000;
+#endif //_MIPS_
+
+#ifdef _PPC_
+ IMAGE_FILE_MACHINE_POWERPC;
+#endif //_PPC_
+
+#ifdef _ALPHA_
+ IMAGE_FILE_MACHINE_ALPHA;
+#endif //_ALPHA_
+
+ RtlCopyMemory (&Block[DH_PHYSICAL_MEMORY_BLOCK],
+ MmPhysicalMemoryBlock,
+ (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
+ (sizeof(PHYSICAL_MEMORY_RUN) *
+ (MmPhysicalMemoryBlock->NumberOfRuns - 1))));
+ }
+
+ Status = IoSynchronousPageWrite (
+ File,
+ Mdl,
+ &Offset,
+ &Context.Event,
+ &IoStatus );
+
+ KeWaitForSingleObject (&Context.Event,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ if (FinalStatus == STATUS_CRASH_DUMP) {
+
+ //
+ // Set the first page to point to the page that was just operated
+ // upon.
+ //
+
+ MiUpdateImageHeaderPage (MmCrashDumpSection->Segment->PrototypePte,
+ *Page,
+ MmCrashDumpSection->Segment->ControlArea);
+ } else {
+ MiRemoveImageHeaderPage(*Page);
+ }
+ return FinalStatus;
+}
+
+
+VOID
+MiCrashDumpWorker (
+ IN PVOID Context
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called in the context of a delayed worker thread.
+ Its function is to create a section which will be used to map the
+ crash dump in the paging file.
+
+Arguments:
+
+ Context - suppplies the context record which contains the section's
+ size, an event to set at completion and a status value
+ to be returned.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMWORK_CONTEXT Work;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+
+ PAGED_CODE();
+
+ Work = (PMMWORK_CONTEXT)Context;
+
+ InitializeObjectAttributes( &ObjectAttributes,
+ NULL,
+ 0,
+ NULL,
+ NULL );
+
+
+ Work->Status = MmCreateSection ( &MmCrashDumpSection,
+ SECTION_MAP_READ,
+ &ObjectAttributes,
+ &Work->Size,
+ PAGE_READONLY,
+ SEC_COMMIT,
+ NULL,
+ NULL );
+
+ KeSetEvent (&Work->Event, 0, FALSE);
+ return;
+}
+
+
+NTSTATUS
+MmGetCrashDumpInformation (
+ IN PSYSTEM_CRASH_DUMP_INFORMATION CrashInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function checks to see if a crash dump section exists and
+ if so creates a handle to the section and returns that value
+ in the CrashDumpInformation structure. Once the handle to the
+ section has been created, no other refererences can be made
+ to the crash dump section, and when that handle is closed, the
+ crash dump section is deleted and the paging file space is
+ available for reuse.
+
+Arguments:
+
+ CrashInfo - Supplies a pointer to the crash dump information
+ structure.
+
+Return Value:
+
+ Status of the operation. A handle value of zero indicates no
+ crash dump was located.
+
+--*/
+
+{
+ NTSTATUS Status;
+ HANDLE Handle;
+
+ PAGED_CODE();
+
+ if (MmCrashDumpSection == NULL) {
+ Handle = 0;
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = ObInsertObject (MmCrashDumpSection,
+ NULL,
+ SECTION_MAP_READ,
+ 0,
+ (PVOID *)NULL,
+ &Handle);
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // One shot operation.
+ //
+
+ MmCrashDumpSection = NULL;
+ }
+ }
+
+ CrashInfo->CrashDumpSection = Handle;
+ return Status;
+}
+
+
+NTSTATUS
+MmGetCrashDumpStateInformation (
+ IN PSYSTEM_CRASH_STATE_INFORMATION CrashInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function checks to see if a crash dump section exists and
+ returns a BOOLEAN value in the CrashStateInformation structure
+ based on the outcome.
+
+Arguments:
+
+ CrashInfo - Supplies a pointer to the crash dump state information
+ structure.
+
+Return Value:
+
+ Status of the operation. A BOOLEAN value of FALSE indicates no
+ crash dump was located.
+
+--*/
+
+{
+ PAGED_CODE();
+
+ CrashInfo->ValidCrashDump = (MmCrashDumpSection != NULL);
+ return STATUS_SUCCESS;
+}
+
+
+ULONG
+MiAttemptPageFileExtension (
+ IN ULONG PageFileNumber,
+ IN ULONG ExtendSize,
+ IN ULONG Maximum
+ )
+
+/*++
+
+Routine Description:
+
+ This routine attempts to extend the specified page file by
+ ExtendSize.
+
+Arguments:
+
+ PageFileNumber - Supplies the page file number to attempt to extend.
+
+ ExtendSize - Supplies the number of pages to extend the file by.
+
+ Maximum - Supplies TRUE if the page file should be extended
+ by the maximum size possible, but not to exceed
+ ExtendSize.
+
+Return Value:
+
+ Returns the size of the extension. Zero if the page file cannot
+ be extended.
+
+--*/
+
+{
+
+ NTSTATUS status;
+ FILE_FS_SIZE_INFORMATION FileInfo;
+ FILE_END_OF_FILE_INFORMATION EndOfFileInformation;
+ KIRQL OldIrql;
+ ULONG AllocSize;
+ ULONG AdditionalAllocation;
+ ULONG ReturnedLength;
+ ULONG PagesAvailable;
+ ULONG SizeToExtend;
+ LARGE_INTEGER BytesAvailable;
+
+ //
+ // Check to see if this page file is at the maximum.
+ //
+
+ if (MmPagingFile[PageFileNumber]->Size ==
+ MmPagingFile[PageFileNumber]->MaximumSize) {
+ return 0;
+ }
+
+ //
+ // Find out how much free space is on this volume.
+ //
+
+ status = IoQueryVolumeInformation ( MmPagingFile[PageFileNumber]->File,
+ FileFsSizeInformation,
+ sizeof(FileInfo),
+ &FileInfo,
+ &ReturnedLength
+ );
+
+ if (!NT_SUCCESS (status)) {
+
+ //
+ // The volume query did not succeed - return 0 indicating
+ // the paging file was not extended.
+ //
+
+ return 0;
+ }
+
+ //
+ // Always attempt to extend by a megabyte.
+ //
+
+ SizeToExtend = ExtendSize;
+ if (ExtendSize < MmPageFileExtension) {
+ SizeToExtend = MmPageFileExtension;
+ }
+
+ //
+ // Don't go over the maximum size for the paging file.
+ //
+
+ if ((SizeToExtend + MmPagingFile[PageFileNumber]->Size) >
+ MmPagingFile[PageFileNumber]->MaximumSize) {
+ SizeToExtend = (MmPagingFile[PageFileNumber]->MaximumSize -
+ MmPagingFile[PageFileNumber]->Size);
+ }
+
+ if ((Maximum == FALSE) && (SizeToExtend < ExtendSize)) {
+
+ //
+ // Can't meet the requirement.
+ //
+
+ return 0;
+ }
+ //
+ // See if there is enough space on the volume for the extension.
+ //
+
+ AllocSize = FileInfo.SectorsPerAllocationUnit * FileInfo.BytesPerSector;
+
+ BytesAvailable = RtlExtendedIntegerMultiply (
+ FileInfo.AvailableAllocationUnits,
+ AllocSize);
+
+ if (BytesAvailable.QuadPart > (LONGLONG)MmMinimumFreeDiskSpace) {
+
+ BytesAvailable.QuadPart = BytesAvailable.QuadPart -
+ (LONGLONG)MmMinimumFreeDiskSpace;
+
+ if (BytesAvailable.QuadPart > (LONGLONG)(SizeToExtend << PAGE_SHIFT)) {
+ BytesAvailable.QuadPart = (LONGLONG)(SizeToExtend << PAGE_SHIFT);
+ }
+
+ PagesAvailable = (ULONG)(BytesAvailable.QuadPart >> PAGE_SHIFT);
+
+ if ((Maximum == FALSE) && (PagesAvailable < ExtendSize)) {
+
+ //
+ // Can't satisfy this requirement.
+ //
+
+ return 0;
+ }
+
+ } else {
+
+ //
+ // Not enough space is available.
+ //
+
+ return 0;
+ }
+
+ EndOfFileInformation.EndOfFile.LowPart =
+ (MmPagingFile[PageFileNumber]->Size + PagesAvailable) * PAGE_SIZE;
+
+ //
+ // Set high part to zero, paging files are limited to 4gb.
+ //
+
+ EndOfFileInformation.EndOfFile.HighPart = 0;
+
+ //
+ // Attempt to extend the file by setting the end-of-file position.
+ //
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+ status = IoSetInformation (MmPagingFile[PageFileNumber]->File,
+ FileEndOfFileInformation,
+ sizeof(FILE_END_OF_FILE_INFORMATION),
+ &EndOfFileInformation
+ );
+
+ if (status != STATUS_SUCCESS) {
+ KdPrint(("MM MODWRITE: page file extension failed %lx %lx\n",status));
+ return 0;
+ }
+
+ //
+ // Clear bits within the paging file bitmap to allow the extension
+ // to take effect.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ ASSERT (RtlCheckBit (MmPagingFile[PageFileNumber]->Bitmap,
+ MmPagingFile[PageFileNumber]->Size) == 1);
+
+ AdditionalAllocation = PagesAvailable;
+
+ RtlClearBits (MmPagingFile[PageFileNumber]->Bitmap,
+ MmPagingFile[PageFileNumber]->Size,
+ AdditionalAllocation );
+
+ MmPagingFile[PageFileNumber]->Size += AdditionalAllocation;
+ MmPagingFile[PageFileNumber]->FreeSpace += AdditionalAllocation;
+
+ MiUpdateModifiedWriterMdls (PageFileNumber);
+
+ UNLOCK_PFN (OldIrql);
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ MmTotalCommitLimit += AdditionalAllocation;
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ return AdditionalAllocation;
+}
+
+ULONG
+MiExtendPagingFiles (
+ IN ULONG DesiredQuota
+ )
+
+/*++
+
+Routine Description:
+
+ This routine attempts to extend the paging files to provide
+ ExtendSize bytes.
+
+ Note - Page file expansion and page file reduction are synchronized
+ because a single thread is responsible for performing the
+ operation. Hence, while expansion is occurring, a reduction
+ request will be queued to the thread.
+
+Arguments:
+
+ DesiredQuota - Supplies the quota in pages desired.
+
+Return Value:
+
+ Returns the size of the extension. Zero if the page file(s) cannot
+ be extended.
+
+--*/
+
+{
+ ULONG ExtendedSize = 0;
+ ULONG ExtendSize;
+ ULONG i;
+ KIRQL OldIrql;
+
+ if (MmNumberOfPagingFiles == 0) {
+ return 0;
+ }
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ //
+ // Check to see if ample space already exits now that we have
+ // the spinlock.
+ //
+
+ ExtendSize = DesiredQuota + MmTotalCommittedPages;
+
+ if (MmTotalCommitLimit >= ExtendSize) {
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ return 1;
+ }
+
+ //
+ // Calculate the addtional pages needed.
+ //
+
+ ExtendSize -= MmTotalCommitLimit;
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ //
+ // Make sure ample space exits within the paging files.
+ //
+
+ i = 0;
+ do {
+ ExtendedSize += MmPagingFile[i]->MaximumSize - MmPagingFile[i]->Size;
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+
+ if (ExtendedSize < ExtendSize) {
+ return 0;
+ }
+
+ //
+ // Attempt to extend only one of the paging files.
+ //
+
+ i = 0;
+ do {
+ ExtendedSize = MiAttemptPageFileExtension (i, ExtendSize, FALSE);
+ if (ExtendedSize != 0) {
+ return ExtendedSize;
+ }
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+
+ if (MmNumberOfPagingFiles == 1) {
+
+ //
+ // If the attempt didn't succeed for one (not enough disk space free) -
+ // don't try to set it to the maximum size.
+ //
+
+ return 0;
+ }
+
+ //
+ // Attempt to extend all paging files.
+ //
+
+ i = 0;
+ do {
+ ExtendedSize = MiAttemptPageFileExtension (i, ExtendSize, TRUE);
+ if (ExtendedSize >= ExtendSize) {
+ return ExtendSize;
+ }
+ ExtendSize -= ExtendedSize;
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+
+ //
+ // Not enough space is available.
+ //
+
+ return 0;
+}
+
+VOID
+MiContractPagingFiles (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if ample space is no longer committed
+ and if so, does enough free space exist in any paging file. IF
+ the answer to both these is affirmitive, a reduction in the
+ paging file size(s) is attempted.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ BOOLEAN Reduce = FALSE;
+ PMMPAGE_FILE_EXPANSION PageReduce;
+ KIRQL OldIrql;
+ ULONG i;
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ if (MiOverCommitCallCount != 0) {
+
+ //
+ // Check to see if we can take the overcommitment back.
+ //
+
+ if ((MmTotalCommitLimit - MmExtendedCommit) >
+ MmTotalCommittedPages) {
+
+ MmTotalCommitLimit -= MmExtendedCommit;
+ MiOverCommitCallCount = 0;
+ MmExtendedCommit = 0;
+ }
+ }
+
+ if ((MmTotalCommitLimit + MmMinimumPageFileReduction) >
+ MmTotalCommittedPages) {
+
+ for (i = 0;i < MmNumberOfPagingFiles; i++) {
+ if (MmPagingFile[i]->Size != MmPagingFile[i]->MinimumSize) {
+ if (MmPagingFile[i]->FreeSpace > MmMinimumPageFileReduction) {
+ Reduce = TRUE;
+ break;
+ }
+ }
+ }
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ if (!Reduce) {
+ return;
+ }
+
+ PageReduce = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMPAGE_FILE_EXPANSION),
+ ' mM');
+
+ if (PageReduce == NULL) {
+ return;
+ }
+
+ PageReduce->Segment = NULL;
+ PageReduce->RequestedExpansionSize = 0xFFFFFFFF;
+
+ ExAcquireSpinLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+ InsertTailList ( &MmDereferenceSegmentHeader.ListHead,
+ &PageReduce->DereferenceList);
+ ExReleaseSpinLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ KeReleaseSemaphore (&MmDereferenceSegmentHeader.Semaphore, 0L, 1L, FALSE);
+ return;
+ }
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+VOID
+MiAttemptPageFileReduction (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine attempts to reduce the size of the paging files to
+ their minimum levels.
+
+ Note - Page file expansion and page file reduction are synchronized
+ because a single thread is responsible for performing the
+ operation. Hence, while expansion is occurring, a reduction
+ request will be queued to the thread.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ BOOLEAN Reduce = FALSE;
+ KIRQL OldIrql;
+ ULONG i;
+ ULONG StartReduction;
+ ULONG ReductionSize;
+ ULONG TryBit;
+ ULONG TryReduction;
+ ULONG MaxReduce;
+ FILE_ALLOCATION_INFORMATION FileAllocationInfo;
+ NTSTATUS status;
+
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ //
+ // Make sure the commit limit is greater than the number of committed
+ // pages by twice the minimum page file reduction. Keep the
+ // difference between the two at least minimum page file reduction.
+ //
+
+ if ((MmTotalCommittedPages + (2 * MmMinimumPageFileReduction)) <
+ MmTotalCommitLimit) {
+
+ MaxReduce = MmTotalCommitLimit -
+ (MmMinimumPageFileReduction + MmTotalCommittedPages);
+ ASSERT ((LONG)MaxReduce >= 0);
+
+ i = 0;
+ do {
+
+ if (MaxReduce < MmMinimumPageFileReduction) {
+
+ //
+ // Don't reduce any more paging files.
+ //
+
+ break;
+ }
+
+ if (MmPagingFile[i]->MinimumSize != MmPagingFile[i]->Size) {
+
+ if (MmPagingFile[i]->FreeSpace > MmMinimumPageFileReduction) {
+
+ //
+ // Attempt to reduce this paging file.
+ //
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ //
+ // Lock the PFN database and check to see if ample pages
+ // are free at the end of the paging file.
+ //
+
+ TryBit = MmPagingFile[i]->Size - MmMinimumPageFileReduction;
+ TryReduction = MmMinimumPageFileReduction;
+
+ if (TryBit <= MmPagingFile[i]->MinimumSize) {
+ TryBit = MmPagingFile[i]->MinimumSize;
+ TryReduction = MmPagingFile[i]->Size -
+ MmPagingFile[i]->MinimumSize;
+ }
+
+ StartReduction = 0;
+ ReductionSize = 0;
+
+ LOCK_PFN (OldIrql);
+
+ while (TRUE) {
+
+ //
+ // Try to reduce.
+ //
+
+ if ((ReductionSize + TryReduction) > MaxReduce) {
+
+ //
+ // The reduction attempt would remove more
+ // than MaxReduce pages.
+ //
+
+ break;
+ }
+
+ if (RtlAreBitsClear (MmPagingFile[i]->Bitmap,
+ TryBit,
+ TryReduction)) {
+
+ //
+ // Can reduce it by TryReduction, see if it can
+ // be made smaller.
+ //
+
+ StartReduction = TryBit;
+ ReductionSize += TryReduction;
+
+ if (StartReduction == MmPagingFile[i]->MinimumSize) {
+ break;
+ }
+
+ TryBit = StartReduction - MmMinimumPageFileReduction;
+
+ if (TryBit <= MmPagingFile[i]->MinimumSize) {
+ TryReduction -=
+ MmPagingFile[i]->MinimumSize - TryBit;
+ TryBit = MmPagingFile[i]->MinimumSize;
+ } else {
+ TryReduction = MmMinimumPageFileReduction;
+ }
+ } else {
+
+ //
+ // Reduction has failed.
+ //
+
+ break;
+ }
+ } //end while
+
+ //
+ // Make sure there are no outstanding writes to
+ // pages within the start reduction range.
+ //
+
+ if (StartReduction != 0) {
+
+ //
+ // There is an outstanding write past where the
+ // new end of the paging file should be. This
+ // is a very rare condition, so just punt shrinking
+ // the file.
+ //
+
+ if ((MmPagingFile[i]->Entry[0]->LastPageToWrite >
+ StartReduction) ||
+ (MmPagingFile[i]->Entry[1]->LastPageToWrite >
+ StartReduction)) {
+ StartReduction = 0;
+ }
+ }
+
+ //
+ // Are there any pages to remove.
+ //
+
+ if (StartReduction != 0) {
+
+ //
+ // Reduce the paging file's size and free space.
+ //
+
+ ASSERT (ReductionSize == (MmPagingFile[i]->Size - StartReduction));
+
+ MmPagingFile[i]->Size = StartReduction;
+ MmPagingFile[i]->FreeSpace -= ReductionSize;
+ MaxReduce -= ReductionSize;
+ ASSERT ((LONG)MaxReduce >= 0);
+
+ RtlSetBits (MmPagingFile[i]->Bitmap,
+ StartReduction,
+ ReductionSize );
+
+ //
+ // Release the pfn mutex now that the size info
+ // has been updated.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Change the commit limit to reflect the returned
+ // page file space.
+ //
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ MmTotalCommitLimit -= ReductionSize;
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ FileAllocationInfo.AllocationSize.LowPart =
+ StartReduction * PAGE_SIZE;
+
+ //
+ // Set high part to zero, paging files are
+ // limited to 4gb.
+ //
+
+ FileAllocationInfo.AllocationSize.HighPart = 0;
+
+ //
+ // Reduce the allocated size of the paging file
+ // thereby actually freeing the space and
+ // setting a new end of file.
+ //
+
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+ status = IoSetInformation (
+ MmPagingFile[i]->File,
+ FileAllocationInformation,
+ sizeof(FILE_ALLOCATION_INFORMATION),
+ &FileAllocationInfo
+ );
+#if DBG
+
+ //
+ // Ignore errors on truncating the paging file
+ // as we can always have less space in the bitmap
+ // than the pagefile holds.
+ //
+
+ if (status != STATUS_SUCCESS) {
+ DbgPrint ("MM: pagefile truncate status %lx\n",
+ status);
+ }
+#endif
+ } else {
+ UNLOCK_PFN (OldIrql);
+ }
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+ }
+ }
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+ }
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+VOID
+MiWriteComplete (
+ IN PVOID Context,
+ IN PIO_STATUS_BLOCK IoStatus,
+ IN ULONG Reserved
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is the APC write completion procedure. It is invoked
+ at APC_LEVEL when a page write operation is completed.
+
+Arguments:
+
+ Context - Supplies a pointer to the MOD_WRITER_MDL_ENTRY which was
+ used for this I/O.
+
+ IoStatus - Supplies a pointer to the IO_STATUS_BLOCK which was used for this I/O.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC_LEVEL.
+
+--*/
+
+{
+
+ PMMMOD_WRITER_MDL_ENTRY WriterEntry;
+ PMMMOD_WRITER_MDL_ENTRY NextWriterEntry;
+ PULONG Page;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ LONG ByteCount;
+ NTSTATUS status;
+ PCONTROL_AREA ControlArea;
+ ULONG FailAllIo = FALSE;
+ PFILE_OBJECT FileObject;
+ PERESOURCE FileResource;
+
+#if DBG
+ if (MmDebug & MM_DBG_MOD_WRITE) {
+ DbgPrint("MM MODWRITE: mofified page write completed\n");
+ }
+#endif
+
+ //
+ // A page write has completed, at this time the pages are not
+ // on any lists, write-in-progress is set in the pfn database,
+ // and the reference count was incremented.
+ //
+
+ WriterEntry = (PMMMOD_WRITER_MDL_ENTRY)Context;
+ ByteCount = (LONG)WriterEntry->Mdl.ByteCount;
+ Page = &WriterEntry->Page[0];
+
+ if (WriterEntry->Mdl.MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (WriterEntry->Mdl.MappedSystemVa,
+ &WriterEntry->Mdl);
+ }
+
+ //
+ // Get the PFN mutex so the pfn database can be manipulated.
+ //
+
+ status = IoStatus->Status;
+ ControlArea = WriterEntry->ControlArea;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Indicate that the write is complete.
+ //
+
+ WriterEntry->LastPageToWrite = 0;
+
+
+ while (ByteCount > 0) {
+
+ Pfn1 = MI_PFN_ELEMENT (*Page);
+ ASSERT (Pfn1->u3.e1.WriteInProgress == 1);
+#if DBG
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 0) {
+
+ ULONG Offset;
+ Offset = GET_PAGING_FILE_OFFSET(Pfn1->OriginalPte);
+ if ((Offset < 8192) &&
+ (GET_PAGING_FILE_NUMBER(Pfn1->OriginalPte) == 0)) {
+ ASSERT ((MmPagingFileDebug[Offset] & 1) != 0);
+ if (!MI_IS_PFN_DELETED(Pfn1)) {
+ if ((GET_PAGING_FILE_NUMBER (Pfn1->OriginalPte)) == 0) {
+ if (((MmPagingFileDebug[Offset] -1) << 4) !=
+ ((ULONG)Pfn1->PteAddress << 4)) {
+ if (Pfn1->PteAddress != MiGetPteAddress(PDE_BASE)) {
+
+ //
+ // Make sure this isn't a PTE that was forked
+ // during the I/O.
+ //
+
+ if ((Pfn1->PteAddress < (PMMPTE)PDE_TOP) ||
+ ((Pfn1->OriginalPte.u.Soft.Protection &
+ MM_COPY_ON_WRITE_MASK) ==
+ MM_PROTECTION_WRITE_MASK)) {
+ DbgPrint("MMWRITE: Missmatch Pfn1 %lx Offset %lx info %lx\n",
+ Pfn1, Offset,
+ MmPagingFileDebug[Offset]);
+ DbgBreakPoint();
+ } else {
+ MmPagingFileDebug[Offset] &= 0xF0000001;
+ MmPagingFileDebug[Offset] |=
+ ((ULONG)Pfn1->PteAddress & 0xfffffff);
+ }
+ }
+
+ }
+ }
+ }
+ }
+ }
+#endif //DBG
+
+ Pfn1->u3.e1.WriteInProgress = 0;
+
+ if (NT_ERROR(status)) {
+
+ //
+ // If the file object is over the network, assume that this
+ // I/O operation can never complete and mark the pages as
+ // clean and indicate in the control area all I/O should fail.
+ // Note that the modified bit in the PFN database is not set.
+ //
+
+ if (((status != STATUS_FILE_LOCK_CONFLICT) &&
+ (ControlArea != NULL) &&
+ (ControlArea->u.Flags.Networked == 1))
+ ||
+ (status == STATUS_FILE_INVALID)) {
+
+ if (ControlArea->u.Flags.FailAllIo == 0) {
+ ControlArea->u.Flags.FailAllIo = 1;
+ FailAllIo = TRUE;
+
+ KdPrint(("MM MODWRITE: failing all io, controlarea %lx status %lx\n",
+ ControlArea, status));
+ }
+ } else {
+
+ //
+ // The modified write operation failed, SET the modified bit
+ // for each page which was written and free the page file
+ // space.
+ //
+
+#if DBG
+ if ((status != STATUS_FILE_LOCK_CONFLICT) &&
+ ((MmDebug & MM_DBG_PRINTS_MODWRITES) == 0)) {
+ KdPrint(("MM MODWRITE: modified page write iosb faild - status 0x%lx\n",
+ status));
+ }
+#endif
+
+ Pfn1->u3.e1.Modified = 1;
+ }
+ }
+
+ if ((Pfn1->u3.e1.Modified == 1) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)) {
+
+ //
+ // This page was modified since the write was done,
+ // release the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ MiDecrementReferenceCount (*Page);
+#if DBG
+ *Page = 0xF0FFFFFF;
+#endif //DBG
+
+ Page += 1;
+ ByteCount -= (LONG)PAGE_SIZE;
+ }
+
+ //
+ // Check to which list to insert this entry into depending on
+ // the amount of free space left in the paging file.
+ //
+
+ FileObject = WriterEntry->File;
+ FileResource = WriterEntry->FileResource;
+
+ if ((WriterEntry->PagingFile != NULL) &&
+ (WriterEntry->PagingFile->FreeSpace < MM_USABLE_PAGES_FREE)) {
+
+ if (MmNumberOfActiveMdlEntries == 1) {
+
+ //
+ // If we put this entry on the list, there will be
+ // no more paging. Locate all entries which are non
+ // zero and pull them from the list.
+ //
+
+ InsertTailList (&MmFreePagingSpaceLow, &WriterEntry->Links);
+ WriterEntry->CurrentList = &MmFreePagingSpaceLow;
+
+ MmNumberOfActiveMdlEntries -= 1;
+
+ //
+ // Try to pull entries off the list.
+ //
+
+ WriterEntry = (PMMMOD_WRITER_MDL_ENTRY)MmFreePagingSpaceLow.Flink;
+
+ while ((PLIST_ENTRY)WriterEntry != &MmFreePagingSpaceLow) {
+
+ NextWriterEntry =
+ (PMMMOD_WRITER_MDL_ENTRY)WriterEntry->Links.Flink;
+
+ if (WriterEntry->PagingFile->FreeSpace != 0) {
+
+ RemoveEntryList (&WriterEntry->Links);
+
+ //
+ // Insert this into the active list.
+ //
+
+ if (IsListEmpty (&WriterEntry->PagingListHead->ListHead)) {
+ KeSetEvent (&WriterEntry->PagingListHead->Event,
+ 0,
+ FALSE);
+ }
+
+ InsertTailList (&WriterEntry->PagingListHead->ListHead,
+ &WriterEntry->Links);
+ WriterEntry->CurrentList = &MmPagingFileHeader.ListHead;
+ MmNumberOfActiveMdlEntries += 1;
+ }
+
+ WriterEntry = NextWriterEntry;
+ }
+
+ } else {
+
+ InsertTailList (&MmFreePagingSpaceLow, &WriterEntry->Links);
+ WriterEntry->CurrentList = &MmFreePagingSpaceLow;
+ MmNumberOfActiveMdlEntries -= 1;
+ }
+ } else {
+
+ //
+ // Ample space exists, put this on the active list.
+ //
+
+ if (IsListEmpty (&WriterEntry->PagingListHead->ListHead)) {
+ KeSetEvent (&WriterEntry->PagingListHead->Event, 0, FALSE);
+ }
+
+ InsertTailList (&WriterEntry->PagingListHead->ListHead,
+ &WriterEntry->Links);
+ }
+
+ ASSERT (((ULONG)WriterEntry->Links.Flink & 1) == 0);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (FileResource != NULL) {
+ FsRtlReleaseFileForModWrite (FileObject, FileResource);
+ }
+
+ if (FailAllIo) {
+
+ if (ControlArea->FilePointer->FileName.Length &&
+ ControlArea->FilePointer->FileName.MaximumLength &&
+ ControlArea->FilePointer->FileName.Buffer) {
+
+ IoRaiseInformationalHardError(
+ STATUS_LOST_WRITEBEHIND_DATA,
+ &ControlArea->FilePointer->FileName,
+ NULL
+ );
+ }
+ }
+
+ if (ControlArea != NULL) {
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // A write to a mapped file just completed, check to see if
+ // there are any waiters on the completion of this i/o.
+ //
+
+ ControlArea->ModifiedWriteCount -= 1;
+ ASSERT ((SHORT)ControlArea->ModifiedWriteCount >= 0);
+ if (ControlArea->u.Flags.SetMappedFileIoComplete != 0) {
+ KePulseEvent (&MmMappedFileIoComplete,
+ 0,
+ FALSE);
+ }
+
+ ControlArea->NumberOfPfnReferences -= 1;
+
+ if (ControlArea->NumberOfPfnReferences == 0) {
+
+ //
+ // This routine return with the PFN lock released!.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+ } else {
+ UNLOCK_PFN (OldIrql);
+ }
+ }
+
+ if (NT_ERROR(status)) {
+
+ //
+ // Wait for a short time so other processing can continue.
+ //
+
+ KeDelayExecutionThread (KernelMode, FALSE, &Mm30Milliseconds);
+ }
+
+ return;
+}
+
+VOID
+MiModifiedPageWriter (
+ IN PVOID StartContext
+ )
+
+/*++
+
+Routine Description:
+
+ Implements the NT modified page writer thread. When the modified
+ page thresh-hold is reached, or memory becomes overcommitted the
+ modified page writer event is set, and this thread becomes active.
+
+Arguments:
+
+ StartContext - not used.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ HANDLE ThreadHandle;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ ULONG i;
+
+ PAGED_CODE();
+
+ StartContext; //avoid compiler warning.
+
+ //
+ // Initialize listheads as empty.
+ //
+
+ MmSystemShutdown = 0;
+ KeInitializeEvent (&MmPagingFileHeader.Event, NotificationEvent, FALSE);
+ KeInitializeEvent (&MmMappedFileHeader.Event, NotificationEvent, FALSE);
+
+ InitializeListHead(&MmPagingFileHeader.ListHead);
+ InitializeListHead(&MmMappedFileHeader.ListHead);
+ InitializeListHead(&MmFreePagingSpaceLow);
+
+ for (i = 0; i < MM_MAPPED_FILE_MDLS; i++) {
+ MmMappedFileMdl[i] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ sizeof(MMMOD_WRITER_MDL_ENTRY) +
+ MmModifiedWriteClusterSize *
+ sizeof(ULONG),
+ ' mM');
+
+ MmMappedFileMdl[i]->PagingFile = NULL;
+ MmMappedFileMdl[i]->PagingListHead = &MmMappedFileHeader;
+
+ InsertTailList (&MmMappedFileHeader.ListHead,
+ &MmMappedFileMdl[i]->Links);
+ }
+
+ //
+ // Make this a real time thread.
+ //
+
+ (VOID) KeSetPriorityThread (&PsGetCurrentThread()->Tcb,
+ LOW_REALTIME_PRIORITY + 1);
+
+
+ //
+ // Wait for a paging file to be created.
+ //
+
+ KeWaitForSingleObject (MmPagingFileCreated,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ ExFreePool (MmPagingFileCreated);
+
+ //
+ // Start a secondary thread for writing mapped file pages. This
+ // is required as the writing of mapped file pages could cause
+ // page faults resulting in requests for free pages. But there
+ // could be no free pages - hence a dead lock. Rather than deadlock
+ // the whole system waiting on the modified page writer, creating
+ // a secondary thread allows that thread to block without affecting
+ // on going page file writes.
+ //
+
+ KeInitializeEvent (&MmMappedPageWriterEvent, NotificationEvent, FALSE);
+ InitializeListHead(&MmMappedPageWriterList);
+ InitializeObjectAttributes( &ObjectAttributes, NULL, 0, NULL, NULL );
+
+ PsCreateSystemThread (&ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ 0L,
+ NULL,
+ MiMappedPageWriter,
+ NULL );
+ ZwClose (ThreadHandle);
+ MiModifiedPageWriterWorker();
+
+ //
+ // Shutdown in progress, wait forever.
+ //
+
+ {
+ LARGE_INTEGER Forever;
+
+ //
+ // System has shutdown, go into LONG wait.
+ //
+
+ Forever.LowPart = 0;
+ Forever.HighPart = 0xF000000;
+ KeDelayExecutionThread (KernelMode, FALSE, &Forever);
+ }
+
+ return;
+}
+
+VOID
+MiModifiedPageWriterWorker (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Implements the NT modified page writer thread. When the modified
+ page thresh-hold is reached, or memory becomes overcommitted the
+ modified page writer event is set, and this thread becomes active.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+ ULONG NextColor;
+ ULONG i;
+
+ //
+ // Wait for the modified page writer event AND the PFN mutex.
+ //
+
+
+ for (;;) {
+
+ KeWaitForSingleObject (&MmModifiedPageWriterEvent,
+ WrFreePage,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ //
+ // Indicate that the hint values have not been reset in
+ // the paging files.
+ //
+
+ i = 0;
+ do {
+ MmPagingFile[i]->HintSetToZero = FALSE;
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+
+ NextColor = 0;
+
+ LOCK_PFN (OldIrql);
+
+ for (;;) {
+
+ //
+ // Modified page writer was signalled.
+ //
+
+ if ((MmAvailablePages < MmFreeGoal) &&
+ (MmModNoWriteInsert)) {
+
+ //
+ // Remove pages from the modified no write list
+ // that are waiting for the cache manager to flush them.
+ //
+
+ i = 0;
+ while ((MmModifiedNoWritePageListHead.Total != 0) &&
+ (i < 32)) {
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+
+ PageFrameIndex = MmModifiedNoWritePageListHead.Flink;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ ControlArea = Subsection->ControlArea;
+ if (ControlArea->u.Flags.NoModifiedWriting) {
+ MmModNoWriteInsert = FALSE;
+ break;
+ }
+ MiUnlinkPageFromList (Pfn1);
+ MiInsertPageInList (&MmModifiedPageListHead,
+ PageFrameIndex);
+ i += 1;
+ }
+ }
+
+ if (MmModifiedPageListHead.Total == 0) {
+
+ //
+ // No more pages, clear the event and wait again...
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ KeClearEvent (&MmModifiedPageWriterEvent);
+
+ break;
+ }
+
+ //
+ // Determine which type of pages are to most popular,
+ // page file backed pages, or mapped file backed pages.
+ //
+
+ if (MmTotalPagesForPagingFile >=
+ (MmModifiedPageListHead.Total - MmTotalPagesForPagingFile)) {
+
+ //
+ // More pages are destined for the paging file.
+ //
+
+ MI_GET_MODIFIED_PAGE_ANY_COLOR (PageFrameIndex, NextColor);
+
+ } else {
+
+ //
+ // More pages are destined for mapped files.
+ //
+
+ PageFrameIndex = MmModifiedPageListHead.Flink;
+ }
+
+ //
+ // Check to see what type of page (section file backed or page
+ // file backed) and write out that page and more if possible.
+ //
+
+ //
+ // Check to see if this page is destined for a paging file or
+ // a mapped file.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+ if (IsListEmpty (&MmMappedFileHeader.ListHead)) {
+
+ //
+ // Make sure page is destined for paging file as there
+ // are no MDLs for mapped writes free.
+ //
+
+ MI_GET_MODIFIED_PAGE_ANY_COLOR (PageFrameIndex, NextColor);
+
+ //
+ // No pages are destined for the paging file, get the
+ // first page destined for a mapped file.
+ //
+
+ if (PageFrameIndex == MM_EMPTY_LIST) {
+
+ //
+ // Select the first page from the list anyway.
+ //
+
+ PageFrameIndex = MmModifiedPageListHead.Flink;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ }
+ } else if (IsListEmpty(&MmPagingFileHeader.ListHead)) {
+
+ //
+ // Check to see if there are no paging file MDLs
+ // available.
+ //
+
+ while (Pfn1->OriginalPte.u.Soft.Prototype == 0) {
+ PageFrameIndex = Pfn1->u1.Flink;
+ if (PageFrameIndex == MM_EMPTY_LIST) {
+
+ MI_GET_MODIFIED_PAGE_ANY_COLOR (PageFrameIndex,
+ NextColor);
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ break;
+ }
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ }
+ }
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+
+ if (IsListEmpty(&MmMappedFileHeader.ListHead)) {
+
+ //
+ // Reset the event indicating no mapped files in
+ // the list, drop the PFN lock and wait for an
+ // I/O operation to complete with a one second
+ // timeout.
+ //
+
+ KeClearEvent (&MmMappedFileHeader.Event);
+
+ UNLOCK_PFN (OldIrql);
+ KeWaitForSingleObject( &MmMappedFileHeader.Event,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ &Mm30Milliseconds);
+ LOCK_PFN (OldIrql);
+
+ //
+ // Don't go on as the old PageFrameIndex at the
+ // top of the ModifiedList may have changed states.
+ //
+
+ continue;
+ }
+
+ MiGatherMappedPages (Pfn1, PageFrameIndex);
+
+ } else {
+
+ MiGatherPagefilePages (Pfn1, PageFrameIndex);
+ }
+
+ if (MmSystemShutdown) {
+
+ //
+ // Shutdown has returned. Stop the modified page writer.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return;
+ }
+
+ if (!MmWriteAllModifiedPages) {
+ if (((MmAvailablePages > MmFreeGoal) &&
+ (MmModifiedPageListHead.Total < MmFreeGoal))
+ ||
+ (MmAvailablePages > MmMoreThanEnoughFreePages)) {
+
+ //
+ // There are ample pages, clear the event and wait again...
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ KeClearEvent (&MmModifiedPageWriterEvent);
+ break;
+ }
+ }
+ } // end for
+
+ } // end for
+}
+
+VOID
+MiGatherMappedPages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine processes the specified modified page by examing
+ the prototype PTE for that page and the adjacent prototype PTEs
+ building a cluster of modified pages destined for a mapped file.
+ Once the cluster is built, it is sent to the mapped writer thread
+ to be processed.
+
+Arguments:
+
+ Pfn1 - Supplies a pointer to the PFN elemement for the corresponding
+ page.
+
+ PageFrameIndex - Supplies the physical page frame to write.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ PFN lock held.
+
+--*/
+
+{
+ PMMPFN Pfn2;
+ PMMMOD_WRITER_MDL_ENTRY ModWriterEntry;
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ PULONG Page;
+ PMMPTE LastPte;
+ PMMPTE BasePte;
+ PMMPTE NextPte;
+ PMMPTE PointerPte;
+ PMMPTE StartingPte;
+ MMPTE PteContents;
+ LARGE_INTEGER TempOffset;
+ KIRQL OldIrql = 0;
+ KIRQL OldIrql2;
+
+ //
+ // This page is destined for a mapped file, check to see if
+ // there are any phyiscally adjacent pages are also in the
+ // modified page list and write them out at the same time.
+ //
+
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ ControlArea = Subsection->ControlArea;
+
+ if (ControlArea->u.Flags.NoModifiedWriting) {
+
+ //
+ // This page should not be written out, add it to the
+ // tail of the modified NO WRITE list and get the next page.
+ //
+
+ MiUnlinkPageFromList (Pfn1);
+ MiInsertPageInList (MmPageLocationList[ModifiedNoWritePageList],
+ PageFrameIndex);
+ return;
+ }
+
+ if (ControlArea->u.Flags.Image) {
+
+ //
+ // Assert that there are no dangling shared global pages
+ // for an image setion that is not being used.
+ //
+
+ ASSERT ((ControlArea->NumberOfMappedViews != 0) ||
+ (ControlArea->NumberOfSectionReferences != 0) ||
+ (ControlArea->u.Flags.FloppyMedia != 0));
+
+ //
+ // This is an image section, writes are not
+ // allowed to an image section.
+ //
+
+ //
+ // Change page contents to look like it's a demand zero
+ // page and put it back into the modified list.
+ //
+
+ //
+ // Decrement the count for PfnReferences to the
+ // segment as paging file pages are not counted as
+ // "image" references.
+ //
+
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+ MiUnlinkPageFromList (Pfn1);
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ Pfn1->OriginalPte.u.Soft.Prototype = 0;
+ Pfn1->OriginalPte.u.Soft.Transition = 0;
+
+ //
+ // Insert the page at the tail of the list and get
+ // color update performed.
+ //
+
+ MiInsertPageInList (MmPageLocationList[ModifiedPageList],
+ PageFrameIndex);
+ return;
+ }
+
+ if ((ControlArea->u.Flags.HadUserReference == 0) &&
+ (MmAvailablePages > (MmFreeGoal + 40)) &&
+ (MmEnoughMemoryForWrite())) {
+
+ //
+ // This page was modified via the cache manager. Don't
+ // write it out at this time as there are ample pages.
+ //
+
+ MiUnlinkPageFromList (Pfn1);
+ MiInsertFrontModifiedNoWrite (PageFrameIndex);
+ MmModNoWriteInsert = TRUE;
+ return;
+ }
+
+ //
+ // Look at backwards at previous prototype PTEs to see if
+ // this can be clustered into a larger write operation.
+ //
+
+ PointerPte = Pfn1->PteAddress;
+ NextPte = PointerPte - (MmModifiedWriteClusterSize - 1);
+
+ //
+ // Make sure NextPte is in the same page.
+ //
+
+ if (NextPte < (PMMPTE)PAGE_ALIGN (PointerPte)) {
+ NextPte = (PMMPTE)PAGE_ALIGN (PointerPte);
+ }
+
+ //
+ // Make sure NextPte is within the subsection.
+ //
+
+ if (NextPte < Subsection->SubsectionBase) {
+ NextPte = Subsection->SubsectionBase;
+ }
+
+ //
+ // If the prototype PTEs are not currently mapped,
+ // map them via hyperspace. BasePte refers to the
+ // prototype PTEs for nonfaulting references.
+ //
+
+ OldIrql2 = 99;
+ if (MmIsAddressValid (PointerPte)) {
+ BasePte = PointerPte;
+ } else {
+ BasePte = MiMapPageInHyperSpace (Pfn1->PteFrame, &OldIrql2);
+ BasePte = (PMMPTE)((PCHAR)BasePte +
+ BYTE_OFFSET (PointerPte));
+ }
+
+ ASSERT (BasePte->u.Trans.PageFrameNumber == PageFrameIndex);
+
+ PointerPte -= 1;
+ BasePte -= 1;
+
+ //
+ // Don't go before the start of the subsection nor cross
+ // a page boundary.
+ //
+
+ while (PointerPte >= NextPte) {
+
+ PteContents = *BasePte;
+
+ //
+ // If the page is not in transition, exit loop.
+ //
+
+ if ((PteContents.u.Hard.Valid == 1) ||
+ (PteContents.u.Soft.Transition == 0) ||
+ (PteContents.u.Soft.Prototype == 1)) {
+
+ break;
+ }
+
+ Pfn2 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ //
+ // Make sure page is modified and on the modified list.
+ //
+
+ if ((Pfn2->u3.e1.Modified == 0 ) ||
+ (Pfn2->u3.e2.ReferenceCount != 0)) {
+ break;
+ }
+ PageFrameIndex = PteContents.u.Trans.PageFrameNumber;
+ PointerPte -= 1;
+ BasePte -= 1;
+ }
+
+ StartingPte = PointerPte + 1;
+ BasePte = BasePte + 1;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (StartingPte == Pfn1->PteAddress);
+ MiUnlinkPageFromList (Pfn1);
+
+ //
+ // Get an entry from the list and fill it in.
+ //
+
+ ModWriterEntry = (PMMMOD_WRITER_MDL_ENTRY)RemoveHeadList (
+ &MmMappedFileHeader.ListHead);
+
+ ModWriterEntry->File = ControlArea->FilePointer;
+ ModWriterEntry->ControlArea = ControlArea;
+
+ //
+ // Calculate the offset to read into the file.
+ // offset = base + ((thispte - basepte) << PAGE_SHIFT)
+ //
+
+ ModWriterEntry->WriteOffset.QuadPart = MI_STARTING_OFFSET (Subsection,
+ Pfn1->PteAddress);
+
+ MmInitializeMdl(&ModWriterEntry->Mdl,
+ (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT),
+ PAGE_SIZE);
+
+ ModWriterEntry->Mdl.MdlFlags |= MDL_PAGES_LOCKED;
+
+ ModWriterEntry->Mdl.Size = (CSHORT)(sizeof(MDL) +
+ (sizeof(ULONG) * MmModifiedWriteClusterSize));
+
+ Page = &ModWriterEntry->Page[0];
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ //
+ // Clear the modified bit for the page and set the write
+ // in progress bit.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+ Pfn1->u3.e1.WriteInProgress = 1;
+
+ //
+ // Put this physical page into the MDL.
+ //
+
+ *Page = PageFrameIndex;
+
+ //
+ // See if any adjacent pages are also modified and in
+ // the transition state and if so, write them out at
+ // the same time.
+ //
+
+
+ //
+ // Look at the previous PTE, ensuring a page boundary is
+ // not crossed.
+ //
+
+ LastPte = StartingPte + MmModifiedWriteClusterSize;
+
+ //
+ // If BasePte is not in the same page as LatePte,
+ // set last pte to be the last PTE in this page.
+ //
+
+ if (StartingPte < (PMMPTE)PAGE_ALIGN(LastPte)) {
+ LastPte = ((PMMPTE)PAGE_ALIGN(LastPte)) - 1;
+ }
+
+ //
+ // Make sure LastPte is within the subsection.
+ //
+
+ if (LastPte > &Subsection->SubsectionBase[
+ Subsection->PtesInSubsection]) {
+ LastPte = &Subsection->SubsectionBase[
+ Subsection->PtesInSubsection];
+ }
+
+ //
+ // Look forwards.
+ //
+
+ NextPte = BasePte + 1;
+ PointerPte = StartingPte + 1;
+
+ //
+ // Loop until an MDL is filled, the end of a subsection
+ // is reached, or a page boundary is reached.
+ // Note, PointerPte points to the PTE. NextPte points
+ // to where it is mapped in hyperspace (if required).
+ //
+
+ while (PointerPte < LastPte) {
+
+ PteContents = *NextPte;
+
+ //
+ // If the page is not in transition, exit loop.
+ //
+
+ if ((PteContents.u.Hard.Valid == 1) ||
+ (PteContents.u.Soft.Transition == 0) ||
+ (PteContents.u.Soft.Prototype == 1)) {
+
+ break;
+ }
+
+ Pfn2 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ if ((Pfn2->u3.e1.Modified == 0 ) ||
+ (Pfn2->u3.e2.ReferenceCount != 0)) {
+
+ //
+ // Page is not dirty or not on the modified list,
+ // end clustering operation.
+ //
+
+ break;
+ }
+ Page += 1;
+
+ //
+ // Add physical page to MDL.
+ //
+
+ *Page = PteContents.u.Trans.PageFrameNumber;
+ ASSERT (PointerPte == Pfn2->PteAddress);
+ MiUnlinkPageFromList (Pfn2);
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn2->u3.e2.ReferenceCount += 1;
+
+ //
+ // Clear the modified bit for the page and set the
+ // write in progress bit.
+ //
+
+ Pfn2->u3.e1.Modified = 0;
+ Pfn2->u3.e1.WriteInProgress = 1;
+
+ ModWriterEntry->Mdl.ByteCount += PAGE_SIZE;
+
+ NextPte += 1;
+ PointerPte += 1;
+
+ } //end while
+
+ if (OldIrql2 != 99) {
+ MiUnmapPageInHyperSpace (OldIrql2);
+ }
+
+ ASSERT (BYTES_TO_PAGES (ModWriterEntry->Mdl.ByteCount) <= MmModifiedWriteClusterSize);
+
+ //
+ // Make sure the write does not go past the
+ // end of file. (segment size).
+ //
+
+ ModWriterEntry->u.LastByte.QuadPart = ModWriterEntry->WriteOffset.QuadPart +
+ ModWriterEntry->Mdl.ByteCount;
+
+ TempOffset.QuadPart =
+ ((LONGLONG)Subsection->EndingSector << MMSECTOR_SHIFT) +
+ Subsection->u.SubsectionFlags.SectorEndOffset;
+
+ if (ModWriterEntry->u.LastByte.QuadPart > TempOffset.QuadPart) {
+
+ ASSERT ((ULONG)(TempOffset.QuadPart -
+ ModWriterEntry->WriteOffset.QuadPart) >
+ (ModWriterEntry->Mdl.ByteCount - PAGE_SIZE));
+
+ ModWriterEntry->Mdl.ByteCount =
+ (ULONG)(TempOffset.QuadPart -
+ ModWriterEntry->WriteOffset.QuadPart);
+ ModWriterEntry->u.LastByte.QuadPart = TempOffset.QuadPart;
+ }
+
+#if DBG
+ if ((ULONG)ModWriterEntry->Mdl.ByteCount >
+ ((1+MmModifiedWriteClusterSize)*PAGE_SIZE)) {
+ DbgPrint("Mdl %lx, TempOffset %lx %lx Subsection %lx\n",
+ ModWriterEntry->Mdl, TempOffset.LowPart, TempOffset.HighPart, Subsection);
+ DbgBreakPoint();
+ }
+#endif //DBG
+
+ MmInfoCounters.MappedWriteIoCount += 1;
+ MmInfoCounters.MappedPagesWriteCount +=
+ (ModWriterEntry->Mdl.ByteCount >> PAGE_SHIFT);
+
+ //
+ // Increment the count of modified page writes outstanding
+ // in the control area.
+ //
+
+ ControlArea->ModifiedWriteCount += 1;
+
+ //
+ // Increment the number of PFN references. This allows the file
+ // system to purge (i.e. call MmPurgeSection) modified writes.
+ //
+
+ ControlArea->NumberOfPfnReferences += 1;
+
+ ModWriterEntry->FileResource = NULL;
+
+ if (ControlArea->u.Flags.BeingPurged == 1) {
+ UNLOCK_PFN (OldIrql);
+ ModWriterEntry->u.IoStatus.Status = STATUS_FILE_LOCK_CONFLICT;
+ ModWriterEntry->u.IoStatus.Information = 0;
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ MiWriteComplete ((PVOID)ModWriterEntry,
+ &ModWriterEntry->u.IoStatus,
+ 0 );
+ KeLowerIrql (OldIrql);
+ LOCK_PFN (OldIrql);
+ return;
+ }
+
+ //
+ // Send the entry for the MappedPageWriter.
+ //
+
+ InsertTailList (&MmMappedPageWriterList,
+ &ModWriterEntry->Links);
+
+ KeSetEvent (&MmMappedPageWriterEvent, 0, FALSE);
+
+
+#if 0
+
+ UNLOCK_PFN (OldIrql);
+
+ ModWriterEntry->FileResource = NULL;
+
+ if (ModWriterEntry->ControlArea->u.Flags.FailAllIo == 1) {
+ Status = STATUS_UNSUCCESSFUL;
+
+ } else if (FsRtlAcquireFileForModWrite (ModWriterEntry->File,
+ &ModWriterEntry->u.LastByte,
+ &ModWriterEntry->FileResource)) {
+
+ //
+ // Issue the write request.
+ //
+
+ Status = IoAsynchronousPageWrite (
+ ModWriterEntry->File,
+ &ModWriterEntry->Mdl,
+ &ModWriterEntry->WriteOffset,
+ MiWriteComplete,
+ (PVOID)ModWriterEntry,
+ &ModWriterEntry->IoStatus,
+ &ModWriterEntry->Irp );
+ } else {
+
+ //
+ // Unable to get the file system resources, set error status
+ // to lock conflict (ignored by MiWriteComplete) so the APC
+ // routine is explicitly called.
+ //
+
+ Status = STATUS_FILE_LOCK_CONFLICT;
+ }
+
+ if (NT_ERROR(Status)) {
+
+ //
+ // An error has occurred, disable APC's and
+ // call the write completion routine.
+ //
+
+ ModWriterEntry->IoStatus.Status = Status;
+ ModWriterEntry->IoStatus.Information = 0;
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ MiWriteComplete ((PVOID)ModWriterEntry,
+ &ModWriterEntry->IoStatus,
+ 0 );
+ KeLowerIrql (OldIrql);
+ }
+
+ LOCK_PFN (OldIrql);
+#endif //0
+ return;
+}
+
+
+VOID
+MiGatherPagefilePages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine processes the specified modified page by getting
+ that page and gather any other pages on the modified list destined
+ for the paging file in a large write cluster. This cluster is
+ then written to the paging file.
+
+Arguments:
+
+ Pfn1 - Supplies a pointer to the PFN elemement for the corresponding
+ page.
+
+ PageFrameIndex - Supplies the physical page frame to write.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ PFN lock held.
+
+--*/
+
+{
+ PFILE_OBJECT File;
+ PMMMOD_WRITER_MDL_ENTRY ModWriterEntry;
+ PMMPAGING_FILE CurrentPagingFile;
+ NTSTATUS Status;
+ PULONG Page;
+ ULONG StartBit;
+ LARGE_INTEGER StartingOffset;
+ ULONG ClusterSize;
+ ULONG ThisCluster;
+ ULONG LongPte;
+ KIRQL OldIrql = 0;
+ ULONG NextColor;
+ ULONG PageFileFull = FALSE;
+ //MM_WRITE_CLUSTER WriteCluster;
+
+ //
+ // page is destined for the paging file.
+ //
+
+ NextColor = Pfn1->u3.e1.PageColor;
+ //
+ // find the paging file with the most free space and get
+ // a cluster.
+ //
+
+ if (IsListEmpty(&MmPagingFileHeader.ListHead)) {
+
+ //
+ // Reset the event indicating no paging files MDLs in
+ // the list, drop the PFN lock and wait for an
+ // I/O operation to complete.
+ //
+
+ KeClearEvent (&MmPagingFileHeader.Event);
+ UNLOCK_PFN (OldIrql);
+ KeWaitForSingleObject( &MmPagingFileHeader.Event,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ &Mm30Milliseconds);
+ LOCK_PFN (OldIrql);
+
+ //
+ // Don't go on as the old PageFrameIndex at the
+ // top of the ModifiedList may have changed states.
+ //
+
+ return;
+ }
+
+ ModWriterEntry = (PMMMOD_WRITER_MDL_ENTRY)RemoveHeadList (
+ &MmPagingFileHeader.ListHead);
+#if DBG
+ ModWriterEntry->Links.Flink = MM_IO_IN_PROGRESS;
+#endif
+ CurrentPagingFile = ModWriterEntry->PagingFile;
+
+ File = ModWriterEntry->PagingFile->File;
+ ThisCluster = MmModifiedWriteClusterSize;
+
+ do {
+ //
+ // Attempt to cluster MmModifiedWriteClusterSize pages
+ // together. Reduce by one half until we succeed or
+ // can't find a single page free in the paging file.
+ //
+
+ if (((CurrentPagingFile->Hint + MmModifiedWriteClusterSize) >
+ CurrentPagingFile->MinimumSize)
+ &&
+ (CurrentPagingFile->HintSetToZero == FALSE)) {
+
+ CurrentPagingFile->HintSetToZero = TRUE;
+ CurrentPagingFile->Hint = 0;
+ }
+
+ StartBit = RtlFindClearBitsAndSet (
+ CurrentPagingFile->Bitmap,
+ ThisCluster,
+ CurrentPagingFile->Hint);
+
+ if (StartBit != 0xFFFFFFFF) {
+ break;
+ }
+ if (CurrentPagingFile->Hint != 0) {
+
+ //
+ // Start looking from front of the file.
+ //
+
+ CurrentPagingFile->Hint = 0;
+ } else {
+ ThisCluster = ThisCluster >> 1;
+ PageFileFull = 1;
+ }
+
+ } while (ThisCluster != 0);
+
+ if (StartBit == 0xFFFFFFFF) {
+
+ //
+ // Paging file must be full.
+ //
+
+ KdPrint(("MM MODWRITE: page file full\n"));
+ ASSERT(CurrentPagingFile->FreeSpace == 0);
+
+ //
+ // Move this entry to the not enough space list,
+ // and try again.
+ //
+
+ InsertTailList (&MmFreePagingSpaceLow,
+ &ModWriterEntry->Links);
+ ModWriterEntry->CurrentList = &MmFreePagingSpaceLow;
+ MmNumberOfActiveMdlEntries -= 1;
+ MiPageFileFull ();
+ return;
+ }
+
+ CurrentPagingFile->FreeSpace -= ThisCluster;
+ CurrentPagingFile->CurrentUsage += ThisCluster;
+ if (CurrentPagingFile->FreeSpace < 32) {
+ PageFileFull = 1;
+ }
+
+ StartingOffset.QuadPart = (LONGLONG)StartBit << PAGE_SHIFT;
+
+ MmInitializeMdl(&ModWriterEntry->Mdl,
+ (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT),
+ PAGE_SIZE);
+
+ ModWriterEntry->Mdl.MdlFlags |= MDL_PAGES_LOCKED;
+
+ ModWriterEntry->Mdl.Size = (CSHORT)(sizeof(MDL) +
+ sizeof(ULONG) * MmModifiedWriteClusterSize);
+
+ Page = &ModWriterEntry->Page[0];
+
+ ClusterSize = 0;
+
+ //
+ // Search through the modified page list looking for other
+ // pages destined for the paging file and build a cluster.
+ //
+
+ while (ClusterSize != ThisCluster) {
+
+ //
+ // Is this page destined for a paging file?
+ //
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 0) {
+
+#if 0 //********* commented out
+
+ MiClusterWritePages (Pfn1,
+ PageFrameIndex,
+ &WriteCluster,
+ ThisCluster - ClusterSize);
+ do {
+
+ PageFrameIndex = WriteCluster.Cluster[WriteCluster.StartIndex];
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+#endif //0
+ *Page = PageFrameIndex;
+
+ //
+ // Remove the page from the modified list. Note that
+ // write-in-progress marks the state.
+ //
+
+ //
+ // Unlink the page so the same page won't be found
+ // on the modified page list by color.
+ //
+
+ MiUnlinkPageFromList (Pfn1);
+ NextColor = MI_GET_NEXT_COLOR(NextColor);
+
+ MI_GET_MODIFIED_PAGE_BY_COLOR (PageFrameIndex,
+ NextColor);
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ //
+ // Clear the modified bit for the page and set the
+ // write in progress bit.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+ Pfn1->u3.e1.WriteInProgress = 1;
+ ASSERT (Pfn1->OriginalPte.u.Soft.PageFileHigh == 0);
+
+ LongPte = SET_PAGING_FILE_INFO (
+ Pfn1->OriginalPte,
+ CurrentPagingFile->PageFileNumber,
+ StartBit);
+#if DBG
+ if ((StartBit < 8192) &&
+ (CurrentPagingFile->PageFileNumber == 0)) {
+ ASSERT ((MmPagingFileDebug[StartBit] & 1) == 0);
+ MmPagingFileDebug[StartBit] =
+ (((ULONG)Pfn1->PteAddress & 0xFFFFFFF) |
+ (ClusterSize << 28) | 1);
+ }
+#endif //DBG
+
+ //
+ // Change the original PTE contents to refer to
+ // the paging file offset where this was written.
+ //
+
+ Pfn1->OriginalPte.u.Long = LongPte;
+ ClusterSize += 1;
+ Page += 1;
+ StartBit += 1;
+#if 0 // COMMENDTED OUT
+ WriteCluster.Count -= 1;
+ WriteCluster.StartIndex += 1;
+
+ } while (WriteCluster.Count != 0);
+#endif //0
+ } else {
+
+ //
+ // This page was not destined for a paging file,
+ // get another page.
+ //
+ // Get a page of the same color as the one which
+ // was not usable.
+ //
+
+ MI_GET_MODIFIED_PAGE_BY_COLOR (PageFrameIndex,
+ NextColor);
+ }
+
+ if (PageFrameIndex == MM_EMPTY_LIST) {
+ break;
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ } //end while
+
+ if (ClusterSize != ThisCluster) {
+
+ //
+ // A complete cluster could not be located, free the
+ // excess page file space that was reserved and adjust
+ // the size of the packet.
+ //
+
+ RtlClearBits (CurrentPagingFile->Bitmap,
+ StartBit,
+ ThisCluster - ClusterSize );
+
+ CurrentPagingFile->FreeSpace += ThisCluster - ClusterSize;
+ CurrentPagingFile->CurrentUsage -= ThisCluster - ClusterSize;
+
+ //
+ // If their are no pages to write, don't issue a write
+ // request and restart the scan loop.
+ //
+
+ if (ClusterSize == 0) {
+
+ //
+ // No pages to write. Inset the entry back in the
+ // list.
+ //
+
+ if (IsListEmpty (&ModWriterEntry->PagingListHead->ListHead)) {
+ KeSetEvent (&ModWriterEntry->PagingListHead->Event,
+ 0,
+ FALSE);
+ }
+
+ InsertTailList (&ModWriterEntry->PagingListHead->ListHead,
+ &ModWriterEntry->Links);
+
+ return;
+ }
+ }
+
+ if (CurrentPagingFile->PeakUsage <
+ CurrentPagingFile->CurrentUsage) {
+ CurrentPagingFile->PeakUsage =
+ CurrentPagingFile->CurrentUsage;
+ }
+
+ ModWriterEntry->Mdl.ByteCount = ClusterSize * PAGE_SIZE;
+ ModWriterEntry->LastPageToWrite = StartBit - 1;
+
+ MmInfoCounters.DirtyWriteIoCount += 1;
+ MmInfoCounters.DirtyPagesWriteCount += ClusterSize;
+
+ //
+ // For now release the pfn mutex and wait for the write to
+ // complete.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+#if DBG
+ if (MmDebug & MM_DBG_MOD_WRITE) {
+ DbgPrint("MM MODWRITE: modified page write begun @ %08lx by %08lx\n",
+ StartingOffset.LowPart, ModWriterEntry->Mdl.ByteCount);
+ }
+#endif
+
+ //
+ // Issue the write request.
+ //
+
+ Status = IoAsynchronousPageWrite ( File,
+ &ModWriterEntry->Mdl,
+ &StartingOffset,
+ MiWriteComplete,
+ (PVOID)ModWriterEntry,
+ &ModWriterEntry->u.IoStatus,
+ &ModWriterEntry->Irp );
+
+ if (NT_ERROR(Status)) {
+ KdPrint(("MM MODWRITE: modified page write failed %lx\n", Status));
+
+ //
+ // An error has occurred, disable APC's and
+ // call the write completion routine.
+ //
+
+ ModWriterEntry->u.IoStatus.Status = Status;
+ ModWriterEntry->u.IoStatus.Information = 0;
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ MiWriteComplete ((PVOID)ModWriterEntry,
+ &ModWriterEntry->u.IoStatus,
+ 0 );
+ KeLowerIrql (OldIrql);
+ }
+
+ LOCK_PFN (OldIrql);
+
+ if (PageFileFull) {
+ MiPageFileFull ();
+ }
+
+ return;
+}
+
+
+#if 0 // COMMENTED OUT **************************************************
+ULONG ClusterCounts[20]; //fixfix
+ULONG ClusterSizes[20];
+VOID
+MiClusterWritePages (
+ IN PMMPFN Pfn1,
+ IN ULONG PageFrameIndex,
+ IN PMM_WRITE_CLUSTER WriteCluster,
+ IN ULONG Size
+ )
+
+{
+ PMMPTE PointerClusterPte;
+ PMMPTE OriginalPte;
+ PMMPTE StopPte;
+ PMMPTE ThisPage;
+ PMMPTE BasePage;
+ ULONG Start;
+ PMMPFN Pfn2;
+ KIRQL OldIrql = 99;
+
+ Start = MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE;
+ WriteCluster->Cluster[Start] = PageFrameIndex;
+ WriteCluster->Count = 1;
+ ClusterSizes[Size] += 1; //fixfix
+ if (Size == 1) {
+ WriteCluster->StartIndex = Start;
+ return;
+ }
+
+ //
+ // The page points to a page table page which may not be
+ // for the current process. Map the page into hyperspace
+ // reference it through hyperspace.
+ //
+
+ PointerClusterPte = Pfn1->PteAddress;
+ BasePage = (PMMPTE)((ULONG)PointerClusterPte & ~(PAGE_SIZE - 1));
+ ThisPage = BasePage;
+
+ if ((PointerClusterPte < (PMMPTE)PDE_TOP) ||
+ (!MmIsAddressValid (PointerClusterPte))) {
+
+ //
+ // Map page into hyperspace as it is either a page table
+ // page or nonresident paged pool.
+ //
+
+ PointerClusterPte = (PMMPTE)((PCHAR)MiMapPageInHyperSpace (
+ Pfn1->PteFrame, &OldIrql)
+ +
+ BYTE_OFFSET (PointerClusterPte));
+ ThisPage = (PMMPTE)((ULONG)PointerClusterPte & ~(PAGE_SIZE - 1));
+ }
+
+ OriginalPte = PointerClusterPte;
+ ASSERT (PointerClusterPte->u.Trans.PageFrameNumber == PageFrameIndex);
+
+ //
+ // Check backwards and forewards for other pages from this process
+ // destined for the paging file.
+ //
+
+ StopPte = PointerClusterPte - (Size - 1);
+ if (StopPte < ThisPage) {
+ StopPte = ThisPage;
+ }
+
+ while (PointerClusterPte > StopPte) {
+ PointerClusterPte -= 1;
+
+ //
+ // Look for the pointer at start of segment, quit as this is NOT
+ // a prototype PTE. Normal PTEs will not match this.
+ //
+
+ if (BasePage != (PMMPTE)
+ (ULONG)(PointerClusterPte->u.Long & ~(PAGE_SIZE - 1))) {
+
+ if ((PointerClusterPte->u.Hard.Valid == 0) &&
+ (PointerClusterPte->u.Soft.Prototype == 0) &&
+ (PointerClusterPte->u.Soft.Transition == 1)) {
+
+ //
+ // PTE is in transition state, see if it is modified.
+ //
+
+ PageFrameIndex = PointerClusterPte->u.Trans.PageFrameNumber;
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+ ASSERT (Pfn2->OriginalPte.u.Soft.Prototype == 0);
+ if ((Pfn2->u3.e1.Modified != 0 ) &&
+ (Pfn2->u3.e2.ReferenceCount == 0)) {
+
+ Start -= 1;
+ WriteCluster->Count += 1;
+ WriteCluster->Cluster[Start] = PageFrameIndex;
+ }
+ }
+ }
+ break;
+ }
+
+ WriteCluster->StartIndex = Start;
+ PointerClusterPte = OriginalPte + 1;
+ Start = MM_MAXIMUM_DISK_IO_SIZE / PAGE_SIZE;
+
+ //
+ // Remove pages looking forward from PointerClusterPte until
+ // a cluster is filled or a PTE is not on the modified list.
+ //
+
+ ThisPage = (PMMPTE)((PCHAR)ThisPage + PAGE_SIZE);
+
+ while ((WriteCluster->Count < Size) &&
+ (PointerClusterPte < ThisPage)) {
+
+ if ((PointerClusterPte->u.Hard.Valid == 0) &&
+ (PointerClusterPte->u.Soft.Prototype == 0) &&
+ (PointerClusterPte->u.Soft.Transition == 1)) {
+
+ //
+ // PTE is in transition state, see if it is modified.
+ //
+
+ PageFrameIndex = PointerClusterPte->u.Trans.PageFrameNumber;
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+ ASSERT (Pfn2->OriginalPte.u.Soft.Prototype == 0);
+ if ((Pfn2->u3.e1.Modified != 0 ) &&
+ (Pfn2->u3.e2.ReferenceCount == 0)) {
+
+ Start += 1;
+ WriteCluster->Count += 1;
+ WriteCluster->Cluster[Start] = PageFrameIndex;
+ PointerClusterPte += 1;
+ continue;
+ }
+ }
+ break;
+ }
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+ ClusterCounts[WriteCluster->Count] += 1;
+ return;
+}
+#endif // COMMENTED OUT **************************************************
+
+
+VOID
+MiMappedPageWriter (
+ IN PVOID StartContext
+ )
+
+/*++
+
+Routine Description:
+
+ Implements the NT secondary modified page writer thread.
+ Requests for writes to mapped files are sent to this thread.
+ This is required as the writing of mapped file pages could cause
+ page faults resulting in requests for free pages. But there
+ could be no free pages - hence a dead lock. Rather than deadlock
+ the whole system waiting on the modified page writer, creating
+ a secondary thread allows that thread to block without affecting
+ on going page file writes.
+
+Arguments:
+
+ StartContext - not used.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMMOD_WRITER_MDL_ENTRY ModWriterEntry;
+ KIRQL OldIrql = 0;
+ NTSTATUS Status;
+ KEVENT TempEvent;
+
+
+ //
+ // Make this a real time thread.
+ //
+
+ (VOID) KeSetPriorityThread (&PsGetCurrentThread()->Tcb,
+ LOW_REALTIME_PRIORITY + 1);
+
+ //
+ // Let the file system know that we are getting resources.
+ //
+
+ FsRtlSetTopLevelIrpForModWriter();
+
+ KeInitializeEvent (&TempEvent, NotificationEvent, FALSE);
+
+ while (TRUE) {
+ KeWaitForSingleObject (&MmMappedPageWriterEvent,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ LOCK_PFN (OldIrql);
+ if (IsListEmpty (&MmMappedPageWriterList)) {
+ KeClearEvent (&MmMappedPageWriterEvent);
+ UNLOCK_PFN (OldIrql);
+ } else {
+
+ ModWriterEntry = (PMMMOD_WRITER_MDL_ENTRY)RemoveHeadList (
+ &MmMappedPageWriterList);
+
+ UNLOCK_PFN (OldIrql);
+
+
+ if (ModWriterEntry->ControlArea->u.Flags.FailAllIo == 1) {
+ Status = STATUS_UNSUCCESSFUL;
+
+ } else if (FsRtlAcquireFileForModWrite (ModWriterEntry->File,
+ &ModWriterEntry->u.LastByte,
+ &ModWriterEntry->FileResource)) {
+
+ //
+ // Issue the write request.
+ //
+
+ Status = IoAsynchronousPageWrite (
+ ModWriterEntry->File,
+ &ModWriterEntry->Mdl,
+ &ModWriterEntry->WriteOffset,
+ MiWriteComplete,
+ (PVOID)ModWriterEntry,
+ &ModWriterEntry->u.IoStatus,
+ &ModWriterEntry->Irp );
+ } else {
+
+ //
+ // Unable to get the file system resources, set error status
+ // to lock conflict (ignored by MiWriteComplete) so the APC
+ // routine is explicitly called.
+ //
+
+ Status = STATUS_FILE_LOCK_CONFLICT;
+ }
+
+ if (NT_ERROR(Status)) {
+
+ //
+ // An error has occurred, disable APC's and
+ // call the write completion routine.
+ //
+
+ ModWriterEntry->u.IoStatus.Status = Status;
+ ModWriterEntry->u.IoStatus.Information = 0;
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ MiWriteComplete ((PVOID)ModWriterEntry,
+ &ModWriterEntry->u.IoStatus,
+ 0 );
+ KeLowerIrql (OldIrql);
+ }
+#if 0
+ //TEMPORARY code to use synchronous I/O here.
+
+ //
+ // Issue the write request.
+ //
+
+ Status = IoSynchronousPageWrite (
+ ModWriterEntry->File,
+ &ModWriterEntry->Mdl,
+ &ModWriterEntry->WriteOffset,
+ &TempEvent,
+ &ModWriterEntry->u.IoStatus );
+
+ if (NT_ERROR(Status)) {
+ ModWriterEntry->u.IoStatus.Status = Status;
+ ModWriterEntry->u.IoStatus.Information = 0;
+ }
+
+ if (NT_ERROR(ModWriterEntry->u.IoStatus.Status)) {
+ KdPrint(("MM MODWRITE: modified page write failed %lx\n", Status));
+ }
+
+ //
+ // Call the write completion routine.
+ //
+
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ MiWriteComplete ((PVOID)ModWriterEntry,
+ &ModWriterEntry->IoStatus,
+ 0 );
+ KeLowerIrql (OldIrql);
+#endif //0
+
+ }
+
+ }
+}
+
+BOOLEAN
+MmDisableModifiedWriteOfSection (
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointer
+ )
+
+/*++
+
+Routine Description:
+
+ This function disables page writing by the modified page writer for
+ the section which is mapped by the specified file object pointer.
+
+ This should only be used for files which CANNOT be mapped by user
+ programs, e.g., volume files, directory files, etc.
+
+Arguments:
+
+ SectionObjectPointer - Supplies a pointer to the section objects
+
+
+Return Value:
+
+ Returns TRUE if the operation was a success, FALSE if either
+ the there is no section or the section already has a view.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ KIRQL OldIrql;
+ BOOLEAN state = 1;
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea = ((PCONTROL_AREA)(SectionObjectPointer->DataSectionObject));
+
+ if (ControlArea != NULL) {
+ if (ControlArea->NumberOfMappedViews == 0) {
+
+ //
+ // There are no views to this section, indicate no modifed
+ // page writing is allowed.
+ //
+
+ ControlArea->u.Flags.NoModifiedWriting = 1;
+ } else {
+
+ //
+ // Return the current modified page writing state.
+ //
+
+ state = ControlArea->u.Flags.NoModifiedWriting;
+ }
+ } else {
+
+ //
+ // This file no longer has an associated segment.
+ //
+
+ state = 0;
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return state;
+}
+
+
+#define ROUND_UP(VALUE,ROUND) ((ULONG)(((ULONG)VALUE + \
+ ((ULONG)ROUND - 1L)) & (~((ULONG)ROUND - 1L))))
+NTSTATUS
+MmGetPageFileInformation (
+ OUT PVOID SystemInformation,
+ IN ULONG SystemInformationLength,
+ OUT PULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns information about the currently active paging
+ files.
+
+Arguments:
+
+ SystemInformation - Returns the paging file information.
+
+ SystemInfomrationLength - Supplies the length of the SystemInformation
+ buffer.
+
+ Length - Returns the length of the paging file information placed in the
+ buffer.
+
+Return Value:
+
+ Returns the status of the operation.
+
+--*/
+
+{
+ PSYSTEM_PAGEFILE_INFORMATION PageFileInfo;
+ ULONG NextEntryOffset = 0;
+ ULONG TotalSize = 0;
+ ULONG i;
+
+ PAGED_CODE();
+
+ *Length = 0;
+ PageFileInfo = (PSYSTEM_PAGEFILE_INFORMATION)SystemInformation;
+
+ PageFileInfo->TotalSize = 0;
+
+ for (i = 0; i < MmNumberOfPagingFiles; i++) {
+ PageFileInfo = (PSYSTEM_PAGEFILE_INFORMATION)(
+ (PUCHAR)PageFileInfo + NextEntryOffset);
+ NextEntryOffset = sizeof(SYSTEM_PAGEFILE_INFORMATION);
+ TotalSize += sizeof(SYSTEM_PAGEFILE_INFORMATION);
+
+ if (TotalSize > SystemInformationLength) {
+ return STATUS_INFO_LENGTH_MISMATCH;
+ }
+
+ PageFileInfo->TotalSize = MmPagingFile[i]->Size;
+ PageFileInfo->TotalInUse = MmPagingFile[i]->CurrentUsage;
+ PageFileInfo->PeakUsage = MmPagingFile[i]->PeakUsage;
+ PageFileInfo->PageFileName.Length =
+ MmPagingFile[i]->PageFileName.Length;
+ PageFileInfo->PageFileName.MaximumLength =
+ MmPagingFile[i]->PageFileName.Length + sizeof(WCHAR);
+ PageFileInfo->PageFileName.Buffer = (PWCHAR)(PageFileInfo + 1);
+ TotalSize += ROUND_UP (PageFileInfo->PageFileName.MaximumLength,
+ sizeof(ULONG));
+ NextEntryOffset += ROUND_UP (PageFileInfo->PageFileName.MaximumLength,
+ sizeof(ULONG));
+
+ if (TotalSize > SystemInformationLength) {
+ return STATUS_INFO_LENGTH_MISMATCH;
+ }
+ RtlMoveMemory(PageFileInfo->PageFileName.Buffer,
+ MmPagingFile[i]->PageFileName.Buffer,
+ MmPagingFile[i]->PageFileName.Length);
+ PageFileInfo->PageFileName.Buffer[
+ MmPagingFile[i]->PageFileName.Length/sizeof(WCHAR)] = UNICODE_NULL;
+ PageFileInfo->NextEntryOffset = NextEntryOffset;
+ }
+ PageFileInfo->NextEntryOffset = 0;
+ *Length = TotalSize;
+ return(STATUS_SUCCESS);
+}
+
+
+NTSTATUS
+MiCheckPageFileMapping (
+ IN PFILE_OBJECT File
+ )
+
+/*++
+
+Routine Description:
+
+ Non-pagable routine to check to see if a given file has
+ no sections and therefore is eligible to become a paging file.
+
+Arguments:
+
+ File - Supplies a pointer to the file object.
+
+Return Value:
+
+ Returns STATUS_SUCCESS if file can be used as paging file.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+
+ if ((File->SectionObjectPointer->DataSectionObject != NULL) ||
+ (File->SectionObjectPointer->ImageSectionObject != NULL)) {
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_INCOMPATIBLE_FILE_MAP;
+ }
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+
+}
+
+
+VOID
+MiInsertPageFileInList (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Non-pagable routine to add a page file into the list
+ of system wide page files.
+
+Arguments:
+
+ None, implicitly found through page file structures.
+
+Return Value:
+
+ None. Operation cannot fail.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ ULONG Count;
+
+ LOCK_PFN (OldIrql);
+
+ MmNumberOfPagingFiles += 1;
+ Count = MmNumberOfPagingFiles;
+
+ if (IsListEmpty (&MmPagingFileHeader.ListHead)) {
+ KeSetEvent (&MmPagingFileHeader.Event, 0, FALSE);
+ }
+
+ InsertTailList (&MmPagingFileHeader.ListHead,
+ &MmPagingFile[MmNumberOfPagingFiles - 1]->Entry[0]->Links);
+
+ MmPagingFile[MmNumberOfPagingFiles - 1]->Entry[0]->CurrentList =
+ &MmPagingFileHeader.ListHead;
+
+ InsertTailList (&MmPagingFileHeader.ListHead,
+ &MmPagingFile[MmNumberOfPagingFiles - 1]->Entry[1]->Links);
+
+ MmPagingFile[MmNumberOfPagingFiles - 1]->Entry[1]->CurrentList =
+ &MmPagingFileHeader.ListHead;
+
+ MmNumberOfActiveMdlEntries += 2;
+
+ UNLOCK_PFN (OldIrql);
+
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ if (Count == 1) {
+
+ //
+ // We have just created the first paging file. Start the
+ // modified page writer.
+ //
+
+ MmTotalCommitLimit =
+ MmPagingFile[MmNumberOfPagingFiles - 1]->FreeSpace + MmOverCommit;
+
+ //
+ // Keep commit limit above 20mb so we can boot with a small paging
+ // file and clean things up.
+ //
+
+ if (MmTotalCommitLimit < 5500) {
+ MmOverCommit2 = 5500 - MmTotalCommitLimit;
+ MmTotalCommitLimit = 5500;
+ }
+
+
+ } else {
+
+ //
+ // Balance overcommitment in the case an extension was granted.
+ //
+
+ if (MmOverCommit2 > MmPagingFile[MmNumberOfPagingFiles - 1]->FreeSpace) {
+ MmOverCommit2 -= MmPagingFile[MmNumberOfPagingFiles - 1]->FreeSpace;
+ } else {
+ MmTotalCommitLimit +=
+ MmPagingFile[MmNumberOfPagingFiles - 1]->FreeSpace - MmOverCommit2;
+ MmOverCommit2 = 0;
+ }
+ }
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ return;
+}
+
+VOID
+MiPageFileFull (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is called when no space can be found in a paging file.
+ It looks through all the paging files to see if ample space is
+ available and if not, tries to expand the paging files.
+
+ If more than 90% of all paging files is used, the commitment limit
+ is set to the total and then 100 pages are added.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ ULONG i;
+ ULONG Total = 0;
+ ULONG Free = 0;
+ KIRQL OldIrql = 0;
+
+ MM_PFN_LOCK_ASSERT();
+
+ i = 0;
+ do {
+ Total += MmPagingFile[i]->Size;
+ Free += MmPagingFile[i]->FreeSpace;
+ i += 1;
+ } while (i < MmNumberOfPagingFiles);
+
+ //
+ // Check to see if more than 90% of the total space has been used.
+ //
+
+ if (((Total >> 5) + (Total >> 4)) >= Free) {
+
+ //
+ // Try to expand the paging files.
+ //
+
+ //
+ // Check commit limits.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
+
+ //
+ // Check commit limits and set the limit to what is now used.
+ //
+
+ if (MmTotalCommittedPages <= (MmTotalCommitLimit + 50)) {
+
+ //
+ // The total commit limit is less than the number of committed
+ // pages + 50. Reset commit limit.
+ //
+
+ if (MmTotalCommittedPages < MmTotalCommitLimit) {
+ MmPageFileFullExtend += MmTotalCommitLimit - MmTotalCommittedPages;
+ MmTotalCommittedPages = MmTotalCommitLimit;
+ }
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+
+ //
+ // Charge 100 pages against the commitment.
+ //
+
+ MiChargeCommitmentCantExpand (100, TRUE);
+
+ //
+ // Display a popup once.
+ //
+
+ if (!MmPageFileFull) {
+ try {
+ MiCauseOverCommitPopup (1, 0);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+ }
+
+ MmPageFileFull += 100;
+ } else {
+
+ //
+ // Commit limit is lower than the number of committed pages.
+ //
+
+ ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
+ }
+
+ LOCK_PFN (OldIrql);
+ }
+ return;
+}
+
+VOID
+MiFlushAllPages (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Forces a write of all modified pages.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. No locks held. Apc level or less.
+
+--*/
+
+{
+ ULONG j = 40;
+
+ MmWriteAllModifiedPages = TRUE;
+ KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE);
+
+ do {
+ KeDelayExecutionThread (KernelMode, FALSE, &Mm30Milliseconds);
+ j -= 1;
+ } while ((MmModifiedPageListHead.Total > 50) && (j > 0));
+
+ MmWriteAllModifiedPages = FALSE;
+ return;
+}
diff --git a/private/ntos/mm/mp/makefile b/private/ntos/mm/mp/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/mm/mp/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/mm/mp/sources b/private/ntos/mm/mp/sources
new file mode 100644
index 000000000..dbeb18d62
--- /dev/null
+++ b/private/ntos/mm/mp/sources
@@ -0,0 +1,29 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+NT_UP=0
+
+TARGETPATH=..\..\mpobj
+
+!include ..\sources.inc
diff --git a/private/ntos/mm/pagfault.c b/private/ntos/mm/pagfault.c
new file mode 100644
index 000000000..db2d1c9ba
--- /dev/null
+++ b/private/ntos/mm/pagfault.c
@@ -0,0 +1,3406 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ pagfault.c
+
+Abstract:
+
+ This module contains the pager for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#define STATUS_ISSUE_PAGING_IO (0xC0033333)
+#define STATUS_PTE_CHANGED 0x87303000
+#define STATUS_REFAULT 0xC7303001
+
+#if DBG
+extern ULONG MmPagingFileDebug[8192];
+#endif
+
+extern MMPTE MmSharedUserDataPte;
+
+MMINPAGE_SUPPORT_LIST MmInPageSupportList;
+
+
+VOID
+MiHandleBankedSection (
+ IN PVOID VirtualAddress,
+ IN PMMVAD Vad
+ );
+
+NTSTATUS
+MiCompleteProtoPteFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte
+ );
+
+
+NTSTATUS
+MiDispatchFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine dispatches a page fault to the appropriate
+ routine to complete the fault.
+
+Arguments:
+
+ StoreInstruction - Supplies TRUE if the instruction is trying
+ to modify the faulting address (i.e. write
+ access required).
+
+ VirtualAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ PointerProtoPte - Supplies a pointer to the prototype PTE to fault in,
+ NULL if no prototype PTE exists.
+
+ Process - Supplies a pointer to the process object. If this
+ parameter is NULL, then the fault is for system
+ space and the Process's working set lock is not held.
+
+Return Value:
+
+ status.
+
+Environment:
+
+ Kernel mode, working set lock held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ NTSTATUS status;
+ PMMINPAGE_SUPPORT ReadBlock;
+ MMPTE SavedPte;
+ PMMINPAGE_SUPPORT CapturedEvent;
+ KIRQL OldIrql;
+ PULONG Page;
+ ULONG PageFrameIndex;
+ LONG NumberOfBytes;
+ PMMPTE CheckPte;
+ PMMPTE ReadPte;
+ PMMPFN PfnClusterPage;
+ PMMPFN Pfn1;
+
+ProtoPteNotResident:
+
+ if (PointerProtoPte != NULL) {
+
+ //
+ // Acquire the PFN lock to synchronize access to prototype PTEs.
+ // This is required as the working set lock will not prevent
+ // multiple processes from operating on the same prototype PTE.
+ //
+
+ LOCK_PFN (OldIrql)
+
+ //
+ // Make sure the prototptes are in memory. For
+ // user mode faults, this should already be the case.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte)) {
+ CheckPte = MiGetPteAddress (PointerProtoPte);
+
+ if (CheckPte->u.Hard.Valid == 0) {
+
+ ASSERT (Process == NULL);
+
+ //
+ // The page that contains the prototype PTE is not in memory.
+ //
+
+ VirtualAddress = PointerProtoPte,
+ PointerPte = CheckPte;
+ PointerProtoPte = NULL;
+ UNLOCK_PFN (OldIrql);
+ goto ProtoPteNotResident;
+ }
+ }
+
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // PTE was already made valid by the cache manager support
+ // routines.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+
+ ReadPte = PointerProtoPte;
+ status = MiResolveProtoPteFault (StoreInstruction,
+ VirtualAddress,
+ PointerPte,
+ PointerProtoPte,
+ &ReadBlock,
+ Process);
+ //
+ // Returns with PFN lock released.
+ //
+
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+
+ } else {
+
+ TempPte = *PointerPte;
+ ASSERT (TempPte.u.Long != 0);
+ ASSERT (TempPte.u.Hard.Valid == 0);
+
+ if (TempPte.u.Soft.Transition != 0) {
+
+ //
+ // This is a transition page.
+ //
+
+ status = MiResolveTransitionFault (VirtualAddress,
+ PointerPte,
+ Process,
+ FALSE);
+
+ } else if (TempPte.u.Soft.PageFileHigh == 0) {
+
+ //
+ // Demand zero fault.
+ //
+
+ status = MiResolveDemandZeroFault (VirtualAddress,
+ PointerPte,
+ Process,
+ FALSE);
+ } else {
+
+ //
+ // Page resides in paging file.
+ //
+
+ ReadPte = PointerPte;
+ LOCK_PFN (OldIrql);
+ status = MiResolvePageFileFault (VirtualAddress,
+ PointerPte,
+ &ReadBlock,
+ Process);
+ }
+ }
+
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+ if (NT_SUCCESS(status)) {
+ return status;
+ }
+
+ if (status == STATUS_ISSUE_PAGING_IO) {
+
+ SavedPte = *ReadPte;
+
+ CapturedEvent = (PMMINPAGE_SUPPORT)ReadBlock->Pfn->u1.Event;
+
+ if (Process != NULL) {
+ UNLOCK_WS (Process);
+ } else {
+ UNLOCK_SYSTEM_WS(APC_LEVEL);
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_PAGEFAULT) {
+ DbgPrint ("MMFAULT: va: %8lx size: %lx process: %s file: %Z\n",
+ VirtualAddress,
+ ReadBlock->Mdl.ByteCount,
+ Process ? Process->ImageFileName : "SystemVa",
+ &ReadBlock->FilePointer->FileName
+ );
+ }
+#endif //DBG
+
+ //
+ // Issue the read request.
+ //
+
+ status = IoPageRead ( ReadBlock->FilePointer,
+ &ReadBlock->Mdl,
+ &ReadBlock->ReadOffset,
+ &ReadBlock->Event,
+ &ReadBlock->IoStatus);
+
+
+ if (!NT_SUCCESS(status)) {
+
+ //
+ // Set the event as the I/O system doesn't set it on errors.
+ //
+
+
+ ReadBlock->IoStatus.Status = status;
+ ReadBlock->IoStatus.Information = 0;
+ KeSetEvent (&ReadBlock->Event,
+ 0,
+ FALSE);
+ }
+
+ //
+ // Wait for the I/O operation.
+ //
+
+ status = MiWaitForInPageComplete (ReadBlock->Pfn,
+ ReadPte,
+ VirtualAddress,
+ &SavedPte,
+ CapturedEvent,
+ Process);
+
+ //
+ // MiWaitForInPageComplete RETURNS WITH THE WORKING SET LOCK
+ // AND PFN LOCK HELD!!!
+ //
+
+ //
+ // This is the thread which owns the event, clear the event field
+ // in the PFN database.
+ //
+
+ Pfn1 = ReadBlock->Pfn;
+ Page = &ReadBlock->Page[0];
+ NumberOfBytes = (LONG)ReadBlock->Mdl.ByteCount;
+ CheckPte = ReadBlock->BasePte;
+
+ while (NumberOfBytes > 0) {
+
+ //
+ // Don't remove the page we just brought in the
+ // satisfy this page fault.
+ //
+
+ if (CheckPte != ReadPte) {
+ PfnClusterPage = MI_PFN_ELEMENT (*Page);
+ ASSERT (PfnClusterPage->PteFrame == Pfn1->PteFrame);
+
+ if (PfnClusterPage->u3.e1.ReadInProgress != 0) {
+
+ PfnClusterPage->u3.e1.ReadInProgress = 0;
+
+ if (PfnClusterPage->u3.e1.InPageError == 0) {
+ PfnClusterPage->u1.Event = (PKEVENT)NULL;
+ }
+ }
+ MiDecrementReferenceCount (*Page);
+ } else {
+ PageFrameIndex = *Page;
+ }
+
+ CheckPte += 1;
+ Page += 1;
+ NumberOfBytes -= PAGE_SIZE;
+ }
+
+ if (status != STATUS_SUCCESS) {
+ MiDecrementReferenceCount (PageFrameIndex);
+
+ if (status == STATUS_PTE_CHANGED) {
+
+ //
+ // State of PTE changed during i/o operation, just
+ // return success and refault.
+ //
+
+ UNLOCK_PFN (APC_LEVEL);
+ return STATUS_SUCCESS;
+
+ } else {
+
+ //
+ // An I/O error occurred during the page read
+ // operation. All the pages which were just
+ // put into transition should be put onto the
+ // free list if InPageError is set, and their
+ // PTEs restored to the proper contents.
+ //
+
+ Page = &ReadBlock->Page[0];
+
+ NumberOfBytes = ReadBlock->Mdl.ByteCount;
+
+ while (NumberOfBytes > 0) {
+
+ PfnClusterPage = MI_PFN_ELEMENT (*Page);
+
+ if (PfnClusterPage->u3.e1.InPageError == 1) {
+
+ if (PfnClusterPage->u3.e2.ReferenceCount == 0) {
+
+ PfnClusterPage->u3.e1.InPageError = 0;
+ ASSERT (PfnClusterPage->u3.e1.PageLocation ==
+ StandbyPageList);
+
+ MiUnlinkPageFromList (PfnClusterPage);
+ MiRestoreTransitionPte (*Page);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ *Page);
+ }
+ }
+ Page += 1;
+ NumberOfBytes -= PAGE_SIZE;
+ }
+ UNLOCK_PFN (APC_LEVEL);
+ return status;
+ }
+ }
+
+ //
+ // Pte is still in transition state, same protection, etc.
+ //
+
+ ASSERT (Pfn1->u3.e1.InPageError == 0);
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ MI_MAKE_TRANSITION_PTE_VALID (TempPte, ReadPte);
+ if (StoreInstruction && TempPte.u.Hard.Write) {
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+ *ReadPte = TempPte;
+
+ if (PointerProtoPte != NULL) {
+
+ //
+ // The prototype PTE PTE has been made valid, now make the
+ // original PTE valid.
+ //
+
+ if (PointerPte->u.Hard.Valid == 0) {
+#if DBG
+ NTSTATUS oldstatus = status;
+#endif //DBG
+
+ //
+ // PTE is not valid, continue with operation.
+ //
+
+ status = MiCompleteProtoPteFault (StoreInstruction,
+ VirtualAddress,
+ PointerPte,
+ PointerProtoPte);
+
+ //
+ // Returns with PFN lock release!
+ //
+
+#if DBG
+ if (PointerPte->u.Hard.Valid == 0) {
+ DbgPrint ("MM:PAGFAULT - va %lx %lx %lx status:%lx\n",
+ VirtualAddress, PointerPte, PointerProtoPte, oldstatus);
+ }
+#endif //DBG
+ }
+ } else {
+
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+
+ UNLOCK_PFN (APC_LEVEL);
+ MiAddValidPageToWorkingSet (VirtualAddress,
+ ReadPte,
+ Pfn1,
+ 0);
+ }
+
+ //
+ // Note, this routine could release and reacquire the PFN lock!
+ //
+
+ LOCK_PFN (OldIrql);
+ MiFlushInPageSupportBlock();
+ UNLOCK_PFN (APC_LEVEL);
+
+ if (status == STATUS_SUCCESS) {
+ status = STATUS_PAGE_FAULT_PAGING_FILE;
+ }
+ }
+
+ if ((status == STATUS_REFAULT) ||
+ (status == STATUS_PTE_CHANGED)) {
+ status = STATUS_SUCCESS;
+ }
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+ return status;
+}
+
+
+NTSTATUS
+MiResolveDemandZeroFault (
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PEPROCESS Process,
+ IN ULONG PrototypePte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine resolves a demand zero page fault.
+
+ If the PrototypePte argument is true, the PFN lock is
+ held, the lock cannot be dropped, and the page should
+ not be added to the working set at this time.
+
+Arguments:
+
+ VirtualAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ Process - Supplies a pointer to the process object. If this
+ parameter is NULL, then the fault is for system
+ space and the Process's working set lock is not held.
+
+ PrototypePte - Supplies TRUE if this is a prototype PTE.
+
+Return Value:
+
+ status, either STATUS_SUCCESS or STATUS_REFAULT.
+
+Environment:
+
+ Kernel mode, PFN lock held conditionally.
+
+--*/
+
+
+{
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ MMPTE TempPte;
+ ULONG PageColor;
+ KIRQL OldIrql;
+ ULONG NeedToZero = FALSE;
+
+ //
+ // Check to see if a page is available, if a wait is
+ // returned, do not continue, just return success.
+ //
+
+ if (!PrototypePte) {
+ LOCK_PFN (OldIrql);
+ }
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ if (!MiEnsureAvailablePageOrWait (Process,
+ VirtualAddress)) {
+
+ if ((Process != NULL) && (!PrototypePte)) {
+
+ //
+ // If a fork operation is in progress and the faulting thread
+ // is not the thread performning the fork operation, block until
+ // the fork is completed.
+ //
+
+ if ((Process->ForkInProgress != NULL) &&
+ (Process->ForkInProgress != PsGetCurrentThread())) {
+ MiWaitForForkToComplete (Process);
+ UNLOCK_PFN (APC_LEVEL);
+ return STATUS_REFAULT;
+ }
+
+ Process->NumberOfPrivatePages += 1;
+ PageColor = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
+ &Process->NextPageColor);
+ ASSERT (PointerPte <= (PMMPTE)PDE_TOP);
+
+ PageFrameIndex = MiRemoveZeroPageIfAny (PageColor);
+ if (PageFrameIndex == 0) {
+ PageFrameIndex = MiRemoveAnyPage (PageColor);
+ NeedToZero = TRUE;
+ }
+
+ } else {
+ PageColor = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress,
+ &MmSystemPageColor);
+ //
+ // As this is a system page, there is no need to
+ // remove a page of zeroes, it must be initialized by
+ // the system before used.
+ //
+
+ if (PrototypePte) {
+ PageFrameIndex = MiRemoveZeroPage (PageColor);
+ } else {
+ PageFrameIndex = MiRemoveAnyPage (PageColor);
+ }
+ }
+
+ MmInfoCounters.DemandZeroCount += 1;
+
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+
+ if (!PrototypePte) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+
+ if (NeedToZero) {
+ MiZeroPhysicalPage (PageFrameIndex, PageColor);
+ }
+
+ //
+ // As this page is demand zero, set the modified bit in the
+ // PFN database element and set the dirty bit in the PTE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ PointerPte->u.Soft.Protection,
+ PointerPte);
+
+ if (TempPte.u.Hard.Write != 0) {
+ MI_SET_PTE_DIRTY (TempPte);
+ }
+
+ *PointerPte = TempPte;
+ if (!PrototypePte) {
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ MiAddValidPageToWorkingSet (VirtualAddress,
+ PointerPte,
+ Pfn1,
+ 0);
+ }
+ return STATUS_PAGE_FAULT_DEMAND_ZERO;
+ }
+
+ if (!PrototypePte) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+ return STATUS_REFAULT;
+}
+
+
+NTSTATUS
+MiResolveTransitionFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PEPROCESS CurrentProcess,
+ IN ULONG PfnLockHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine resolves a transition page fault.
+
+Arguments:
+
+ VirtualAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ Process - Supplies a pointer to the process object. If this
+ parameter is NULL, then the fault is for system
+ space and the Process's working set lock is not held.
+
+Return Value:
+
+ status, either STATUS_SUCCESS, STATUS_REFAULT or an I/O status
+ code.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ MMPTE TempPte;
+ NTSTATUS status;
+ NTSTATUS PfnStatus;
+ PMMINPAGE_SUPPORT CapturedEvent;
+ KIRQL OldIrql;
+
+ //
+ // ***********************************************************
+ // Transition PTE.
+ // ***********************************************************
+ //
+
+ //
+ // A transition PTE is either on the free or modified list,
+ // on neither list because of its ReferenceCount
+ // or currently being read in from the disk (read in progress).
+ // If the page is read in progress, this is a collided page
+ // and must be handled accordingly.
+ //
+
+ if (!PfnLockHeld) {
+ LOCK_PFN (OldIrql);
+ }
+
+ TempPte = *PointerPte;
+
+ if ((TempPte.u.Soft.Valid == 0) &&
+ (TempPte.u.Soft.Prototype == 0) &&
+ (TempPte.u.Soft.Transition == 1)) {
+
+ //
+ // Still in transition format.
+ //
+
+ MmInfoCounters.TransitionCount += 1;
+
+ PageFrameIndex = TempPte.u.Trans.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ if (Pfn1->u3.e1.InPageError) {
+
+ //
+ // There was an in-page read error and there are other
+ // threads collidiing for this page, delay to let the
+ // other threads complete and return.
+ //
+
+ ASSERT (!NT_SUCCESS(Pfn1->u1.ReadStatus));
+ if (!PfnLockHeld) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+ return Pfn1->u1.ReadStatus;
+ }
+
+ if (Pfn1->u3.e1.ReadInProgress) {
+
+ //
+ // Collided page fault.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_COLLIDED_PAGE) {
+ DbgPrint("MM:collided page fault\n");
+ }
+#endif
+
+ //
+ // Increment the reference count for the page so it won't be
+ // reused until all collisions have been completed.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ CapturedEvent = (PMMINPAGE_SUPPORT)Pfn1->u1.Event;
+ CapturedEvent->WaitCount += 1;
+
+ UNLOCK_PFN (APC_LEVEL);
+
+ if (CurrentProcess != NULL) {
+ UNLOCK_WS (CurrentProcess);
+ } else {
+ UNLOCK_SYSTEM_WS (APC_LEVEL);
+ }
+
+ status = MiWaitForInPageComplete (Pfn1,
+ PointerPte,
+ FaultingAddress,
+ &TempPte,
+ CapturedEvent,
+ CurrentProcess);
+ //
+ // MiWaitForInPageComplete RETURNS WITH THE WORKING SET LOCK
+ // AND PFN LOCK HELD!!!
+ //
+
+ ASSERT (Pfn1->u3.e1.ReadInProgress == 0);
+
+ if (status != STATUS_SUCCESS) {
+ PfnStatus = Pfn1->u1.ReadStatus;
+ MiDecrementReferenceCount (PageFrameIndex);
+
+ //
+ // Check to see if an I/O error occurred on this page.
+ // If so, try to free the physical page, wait a
+ // half second and return a status of PTE_CHANGED.
+ // This will result in a success being returned to
+ // the user and the fault will occur again and should
+ // not be a transition fault this time.
+ //
+
+ if (Pfn1->u3.e1.InPageError == 1) {
+ status = PfnStatus;
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ Pfn1->u3.e1.InPageError = 0;
+ ASSERT (Pfn1->u3.e1.PageLocation ==
+ StandbyPageList);
+
+ MiUnlinkPageFromList (Pfn1);
+ MiRestoreTransitionPte (PageFrameIndex);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PageFrameIndex);
+ }
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_COLLIDED_PAGE) {
+ DbgPrint("MM:decrement ref count - pte changed\n");
+ MiFormatPfn(Pfn1);
+ }
+#endif
+ if (!PfnLockHeld) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+ return status;
+ }
+
+ } else {
+
+ //
+ // PTE refers to a normal transition PTE.
+ //
+
+ ASSERT (Pfn1->u3.e1.InPageError == 0);
+ if (Pfn1->u3.e1.PageLocation == ActiveAndValid) {
+
+ //
+ // This PTE must be a page table page which was removed
+ // from the working set because none of the PTEs within the
+ // page table page were valid, but some are still in the
+ // transition state. Make the page valid without incrementing
+ // the refererence count, but increment the share count.
+ //
+
+ ASSERT ((Pfn1->PteAddress >= (PMMPTE)PDE_BASE) &&
+ (Pfn1->PteAddress <= (PMMPTE)PDE_TOP));
+
+ //
+ // Don't increment the valid pte count for the
+ // page table page.
+ //
+
+ ASSERT (Pfn1->u2.ShareCount != 0);
+ ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
+
+ } else {
+
+ MiUnlinkPageFromList (Pfn1);
+
+ //
+ // Update the PFN database, the share count is now 1 and
+ // the reference count is incremented as the share count
+ // just went from zero to 1.
+ //
+ ASSERT (Pfn1->u2.ShareCount == 0);
+ Pfn1->u3.e2.ReferenceCount += 1;
+ }
+ }
+
+ //
+ // Join with collided page fault code to handle updating
+ // the transition PTE.
+ //
+
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+
+ MI_MAKE_TRANSITION_PTE_VALID (TempPte, PointerPte);
+
+ //
+ // If the modified field is set in the PFN database and this
+ // page is not copy on modify, then set the dirty bit.
+ // This can be done as the modified page will not be
+ // written to the paging file until this PTE is made invalid.
+ //
+
+ if (Pfn1->u3.e1.Modified && TempPte.u.Hard.Write &&
+ (TempPte.u.Hard.CopyOnWrite == 0)) {
+ MI_SET_PTE_DIRTY (TempPte);
+ } else {
+ MI_SET_PTE_CLEAN (TempPte);
+ }
+
+ *PointerPte = TempPte;
+
+ if (!PfnLockHeld) {
+
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+
+ UNLOCK_PFN (APC_LEVEL);
+
+ MiAddValidPageToWorkingSet (FaultingAddress,
+ PointerPte,
+ Pfn1,
+ 0);
+ }
+ return STATUS_PAGE_FAULT_TRANSITION;
+ } else {
+ if (!PfnLockHeld) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+ }
+ return STATUS_REFAULT;
+}
+
+
+NTSTATUS
+MiResolvePageFileFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine builds the MDL and other structures to allow a
+ read opertion on a page file for a page fault.
+
+Arguments:
+
+ FaulingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ ReadBlock - Supplies the address of the read block which
+ needs to be completed before an I/O can be
+ issued.
+
+ Process - Supplies a pointer to the process object. If this
+ parameter is NULL, then the fault is for system
+ space and the Process's working set lock is not held.
+
+Return Value:
+
+ status. A status value of STATUS_ISSUE_PAGING_IO is returned
+ if this function completes successfully.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ LARGE_INTEGER StartingOffset;
+ ULONG PageFrameIndex;
+ ULONG PageFileNumber;
+ ULONG WorkingSetIndex;
+ ULONG PageColor;
+ MMPTE TempPte;
+ PETHREAD CurrentThread;
+ PMMINPAGE_SUPPORT ReadBlockLocal;
+
+ // **************************************************
+ // Page File Read
+ // **************************************************
+
+ //
+ // Calculate the VBN for the in-page operation.
+ //
+
+ CurrentThread = PsGetCurrentThread();
+ TempPte = *PointerPte;
+
+ ASSERT (TempPte.u.Hard.Valid == 0);
+ ASSERT (TempPte.u.Soft.Prototype == 0);
+ ASSERT (TempPte.u.Soft.Transition == 0);
+
+ PageFileNumber = GET_PAGING_FILE_NUMBER (TempPte);
+ StartingOffset.LowPart = GET_PAGING_FILE_OFFSET (TempPte);
+
+ ASSERT (StartingOffset.LowPart <= MmPagingFile[PageFileNumber]->Size);
+
+ StartingOffset.HighPart = 0;
+ StartingOffset.QuadPart = StartingOffset.QuadPart << PAGE_SHIFT;
+
+ MM_PFN_LOCK_ASSERT();
+ if (MiEnsureAvailablePageOrWait (Process,
+ FaultingAddress)) {
+
+ //
+ // A wait operation was performed which dropped the locks,
+ // repeat this fault.
+ //
+
+ UNLOCK_PFN (APC_LEVEL);
+ return STATUS_REFAULT;
+ }
+
+ ReadBlockLocal = MiGetInPageSupportBlock (FALSE);
+ if (ReadBlockLocal == NULL) {
+ UNLOCK_PFN (APC_LEVEL);
+ return STATUS_REFAULT;
+ }
+ MmInfoCounters.PageReadCount += 1;
+ MmInfoCounters.PageReadIoCount += 1;
+
+ *ReadBlock = ReadBlockLocal;
+
+ //fixfix can any of this be moved to after pfn lock released?
+
+ ReadBlockLocal->FilePointer = MmPagingFile[PageFileNumber]->File;
+
+#if DBG
+
+ if (((StartingOffset.LowPart >> PAGE_SHIFT) < 8192) && (PageFileNumber == 0)) {
+
+ if (((MmPagingFileDebug[StartingOffset.LowPart>>PAGE_SHIFT] - 1) << 4) !=
+ ((ULONG)PointerPte << 4)) {
+ if (((MmPagingFileDebug[StartingOffset.LowPart>>PAGE_SHIFT] - 1) << 4) !=
+ ((ULONG)(MiGetPteAddress(FaultingAddress)) << 4)) {
+
+ DbgPrint("MMINPAGE: Missmatch PointerPte %lx Offset %lx info %lx\n",
+ PointerPte, StartingOffset.LowPart,
+ MmPagingFileDebug[StartingOffset.LowPart>>PAGE_SHIFT]);
+ DbgBreakPoint();
+ }
+ }
+ }
+#endif //DBG
+
+ ReadBlockLocal->ReadOffset = StartingOffset;
+
+ //
+ // Get a page and put the PTE into the transition state with the
+ // read-in-progress flag set.
+ //
+
+ if (Process == NULL) {
+ PageColor = MI_GET_PAGE_COLOR_FROM_VA(FaultingAddress);
+ } else {
+ PageColor = MI_PAGE_COLOR_VA_PROCESS (FaultingAddress,
+ &Process->NextPageColor);
+ }
+
+ ReadBlockLocal->BasePte = PointerPte;
+
+ KeClearEvent (&ReadBlockLocal->Event);
+
+ //
+ // Build MDL for request.
+ //
+
+ MmInitializeMdl(&ReadBlockLocal->Mdl, PAGE_ALIGN(FaultingAddress), PAGE_SIZE);
+ ReadBlockLocal->Mdl.MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
+
+ if ((PointerPte < (PMMPTE)PTE_BASE) ||
+ (PointerPte > (PMMPTE)PDE_TOP)) {
+ WorkingSetIndex = 0xFFFFFFFF;
+ } else {
+ WorkingSetIndex = 1;
+ }
+
+ PageFrameIndex = MiRemoveAnyPage (PageColor);
+ ReadBlockLocal->Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ ReadBlockLocal->Page[0] = PageFrameIndex;
+
+ MiInitializeReadInProgressPfn (
+ &ReadBlockLocal->Mdl,
+ PointerPte,
+ &ReadBlockLocal->Event,
+ WorkingSetIndex);
+ UNLOCK_PFN (APC_LEVEL);
+
+ return STATUS_ISSUE_PAGING_IO;
+}
+
+NTSTATUS
+MiResolveProtoPteFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine resolves a prototype PTE fault.
+
+Arguments:
+
+ VirtualAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ PointerProtoPte - Supplies a pointer to the prototype PTE to fault in.
+
+ ReadBlock - Supplies the address of the read block which
+ needs to be completed before an I/O can be
+ issued.
+
+ Process - Supplies a pointer to the process object. If this
+ parameter is NULL, then the fault is for system
+ space and the Process's working set lock is not held.
+
+Return Value:
+
+ status, either STATUS_SUCCESS, STATUS_REFAULT, or an I/O status
+ code.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+{
+
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ NTSTATUS status;
+ ULONG CopyOnWrite;
+ MMWSLE ProtoProtect;
+ PMMPTE ContainingPageTablePointer;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+ ULONG PfnHeld = FALSE;
+
+ //
+ // Acquire the pfn database mutex as the routine to locate a working
+ // set entry decrements the share count of pfn elements.
+ //
+
+ MM_PFN_LOCK_ASSERT();
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ DbgPrint("MM:actual fault %lx va %lx\n",PointerPte, FaultingAddress);
+ MiFormatPte(PointerPte);
+ }
+#endif //DBG
+
+ ASSERT (PointerPte->u.Soft.Prototype == 1);
+ TempPte = *PointerProtoPte;
+
+ //
+ // The page containing the prototype PTE is resident,
+ // handle the fault referring to the prototype PTE.
+ // If the prototype PTE is already valid, make this
+ // PTE valid and up the share count etc.
+ //
+
+ if (TempPte.u.Hard.Valid) {
+
+ //
+ // Prototype PTE is valid.
+ //
+
+ PageFrameIndex = TempPte.u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->u2.ShareCount += 1;
+ status = STATUS_SUCCESS;
+
+ //
+ // Count this as a transition fault.
+ //
+
+ MmInfoCounters.TransitionCount += 1;
+ PfnHeld = TRUE;
+
+ } else {
+
+ //
+ // Check to make sure the prototype PTE is committed.
+ //
+
+ if (TempPte.u.Long == 0) {
+
+#if DBG
+ if (MmDebug & MM_DBG_STOP_ON_ACCVIO) {
+ DbgPrint("MM:access vio2 - %lx\n",FaultingAddress);
+ MiFormatPte(PointerPte);
+ DbgBreakPoint();
+ }
+#endif //DEBUG
+
+ UNLOCK_PFN (APC_LEVEL);
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ //
+ // If the PTE indicates that the protection field to be
+ // checked is in the prototype PTE, check it now.
+ //
+
+ CopyOnWrite = FALSE;
+
+ if (PointerPte->u.Soft.PageFileHigh != 0xFFFFF) {
+ if (PointerPte->u.Proto.ReadOnly == 0) {
+
+ //
+ // Check for kernel mode access, we have already verified
+ // that the user has access to the virtual address.
+ //
+
+#if 0 // removed this assert since mapping drivers via MmMapViewInSystemSpace
+ // file violates the assert.
+
+ {
+ PSUBSECTION Sub;
+ if (PointerProtoPte->u.Soft.Prototype == 1) {
+ Sub = MiGetSubsectionAddress (PointerProtoPte);
+ ASSERT (Sub->u.SubsectionFlags.Protection ==
+ PointerProtoPte->u.Soft.Protection);
+ }
+ }
+
+#endif //DBG
+
+ status = MiAccessCheck (PointerProtoPte,
+ StoreInstruction,
+ KernelMode,
+ PointerProtoPte->u.Soft.Protection);
+
+ if (status != STATUS_SUCCESS) {
+#if DBG
+ if (MmDebug & MM_DBG_STOP_ON_ACCVIO) {
+ DbgPrint("MM:access vio3 - %lx\n",FaultingAddress);
+ MiFormatPte(PointerPte);
+ MiFormatPte(PointerProtoPte);
+ DbgBreakPoint();
+ }
+#endif
+ UNLOCK_PFN (APC_LEVEL);
+ return status;
+ }
+ if ((PointerProtoPte->u.Soft.Protection & MM_COPY_ON_WRITE_MASK) ==
+ MM_COPY_ON_WRITE_MASK) {
+ CopyOnWrite = TRUE;
+ }
+ }
+ } else {
+ if ((PointerPte->u.Soft.Protection & MM_COPY_ON_WRITE_MASK) ==
+ MM_COPY_ON_WRITE_MASK) {
+ CopyOnWrite = TRUE;
+ }
+ }
+
+ if ((!IS_PTE_NOT_DEMAND_ZERO(TempPte)) && (CopyOnWrite)) {
+
+ //
+ // The prototype PTE is demand zero and copy on
+ // write. Make this PTE a private demand zero PTE.
+ //
+
+ ASSERT (Process != NULL);
+
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+
+ UNLOCK_PFN (APC_LEVEL);
+
+ status = MiResolveDemandZeroFault (FaultingAddress,
+ PointerPte,
+ Process,
+ FALSE);
+ return status;
+ }
+
+ //
+ // Make the prototype PTE valid, the prototype PTE is in
+ // one of 4 case:
+ // demand zero
+ // transition
+ // paging file
+ // mapped file
+ //
+
+ if (TempPte.u.Soft.Prototype == 1) {
+
+ //
+ // Mapped File.
+ //
+
+ status = MiResolveMappedFileFault (FaultingAddress,
+ PointerProtoPte,
+ ReadBlock,
+ Process);
+
+ //
+ // Returns with PFN lock held.
+ //
+
+ PfnHeld = TRUE;
+
+ } else if (TempPte.u.Soft.Transition == 1) {
+
+ //
+ // Transition.
+ //
+
+ status = MiResolveTransitionFault (FaultingAddress,
+ PointerProtoPte,
+ Process,
+ TRUE);
+
+ //
+ // Returns with PFN lock held.
+ //
+
+ PfnHeld = TRUE;
+
+ } else if (TempPte.u.Soft.PageFileHigh == 0) {
+
+ //
+ // Demand Zero
+ //
+
+ status = MiResolveDemandZeroFault (FaultingAddress,
+ PointerProtoPte,
+ Process,
+ TRUE);
+
+ //
+ // Returns with PFN lock held!
+ //
+
+ PfnHeld = TRUE;
+
+ } else {
+
+ //
+ // Paging file.
+ //
+
+ status = MiResolvePageFileFault (FaultingAddress,
+ PointerProtoPte,
+ ReadBlock,
+ Process);
+
+ //
+ // Returns with PFN lock released.
+ //
+ ASSERT (KeGetCurrentIrql() == APC_LEVEL);
+ }
+ }
+
+ if (NT_SUCCESS(status)) {
+
+ MM_PFN_LOCK_ASSERT();
+
+ //
+ // The prototype PTE is valid, complete the fault.
+ //
+
+ PageFrameIndex = PointerProtoPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->u3.e1.PrototypePte = 1;
+
+ //
+ // Prototype PTE is now valid, make the PTE valid.
+ //
+
+ ASSERT (PointerProtoPte->u.Hard.Valid == 1);
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ //
+ // A PTE just went from not present, not transition to
+ // present. The share count and valid count must be
+ // updated in the page table page which contains this
+ // Pte.
+ //
+
+ ContainingPageTablePointer = MiGetPteAddress(PointerPte);
+ Pfn2 = MI_PFN_ELEMENT(ContainingPageTablePointer->u.Hard.PageFrameNumber);
+ Pfn2->u2.ShareCount += 1;
+
+ ProtoProtect.u1.Long = 0;
+ if (PointerPte->u.Soft.PageFileHigh == 0xFFFFF) {
+
+ //
+ // The protection code for the prototype PTE comes from this
+ // PTE.
+ //
+
+ ProtoProtect.u1.e1.Protection = PointerPte->u.Soft.Protection;
+
+ } else {
+
+ //
+ // Take the protection from the prototype PTE.
+ //
+
+ ProtoProtect.u1.e1.Protection = Pfn1->OriginalPte.u.Soft.Protection;
+ ProtoProtect.u1.e1.SameProtectAsProto = 1;
+ }
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ ProtoProtect.u1.e1.Protection,
+ PointerPte);
+
+ //
+ // If this is a store instruction and the page is not copy on
+ // write, then set the modified bit in the PFN database and
+ // the dirty bit in the PTE. The PTE is not set dirty even
+ // if the modified bit is set so writes to the page can be
+ // tracked for FlushVirtualMemory.
+ //
+
+ if ((StoreInstruction) && (TempPte.u.Hard.CopyOnWrite == 0)) {
+ Pfn1->u3.e1.Modified = 1;
+ MI_SET_PTE_DIRTY (TempPte);
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+ }
+
+ *PointerPte = TempPte;
+
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+
+ UNLOCK_PFN (APC_LEVEL);
+ MiAddValidPageToWorkingSet (FaultingAddress,
+ PointerPte,
+ Pfn1,
+ ProtoProtect.u1.Long);
+
+ ASSERT (PointerPte == MiGetPteAddress(FaultingAddress));
+ } else {
+ if (PfnHeld) {
+ UNLOCK_PFN (APC_LEVEL);
+ }
+ }
+
+ return status;
+}
+
+
+NTSTATUS
+MiCompleteProtoPteFault (
+ IN BOOLEAN StoreInstruction,
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPTE PointerProtoPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine completes a prototype PTE fault. It is invoked
+ after a read operation has completed bringing the data into
+ memory.
+
+Arguments:
+
+ StoreInstruction - Supplies TRUE if the instruction is trying
+ to modify the faulting address (i.e. write
+ access required).
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ PointerProtoPte - Supplies a pointer to the prototype PTE to fault in,
+ NULL if no prototype PTE exists.
+
+Return Value:
+
+ status.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+{
+ MMPTE TempPte;
+ MMWSLE ProtoProtect;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPTE ContainingPageTablePointer;
+ KIRQL OldIrql;
+
+ MM_PFN_LOCK_ASSERT();
+
+ PageFrameIndex = PointerProtoPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+ Pfn1->u3.e1.PrototypePte = 1;
+
+ //
+ // Prototype PTE is now valid, make the PTE valid.
+ //
+
+ ASSERT (PointerProtoPte->u.Hard.Valid == 1);
+
+ //
+ // A PTE just went from not present, not transition to
+ // present. The share count and valid count must be
+ // updated in the page table page which contains this
+ // Pte.
+ //
+
+ ContainingPageTablePointer = MiGetPteAddress(PointerPte);
+ Pfn2 = MI_PFN_ELEMENT(ContainingPageTablePointer->u.Hard.PageFrameNumber);
+ Pfn2->u2.ShareCount += 1;
+
+ ProtoProtect.u1.Long = 0;
+ if (PointerPte->u.Soft.PageFileHigh == 0xFFFFF) {
+
+ //
+ // The protection code for the prototype PTE comes from this
+ // PTE.
+ //
+
+ ProtoProtect.u1.e1.Protection = PointerPte->u.Soft.Protection;
+
+ } else {
+
+ //
+ // Take the protection from the prototype PTE.
+ //
+
+ ProtoProtect.u1.e1.Protection = Pfn1->OriginalPte.u.Soft.Protection;
+ ProtoProtect.u1.e1.SameProtectAsProto = 1;
+ }
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ ProtoProtect.u1.e1.Protection,
+ PointerPte);
+
+ //
+ // If this is a store instruction and the page is not copy on
+ // write, then set the modified bit in the PFN database and
+ // the dirty bit in the PTE. The PTE is not set dirty even
+ // if the modified bit is set so writes to the page can be
+ // tracked for FlushVirtualMemory.
+ //
+
+ if ((StoreInstruction) && (TempPte.u.Hard.CopyOnWrite == 0)) {
+ Pfn1->u3.e1.Modified = 1;
+ MI_SET_PTE_DIRTY (TempPte);
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+ }
+
+ *PointerPte = TempPte;
+
+ if (Pfn1->u1.WsIndex == 0) {
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+ }
+
+ UNLOCK_PFN (APC_LEVEL);
+
+ MiAddValidPageToWorkingSet (FaultingAddress,
+ PointerPte,
+ Pfn1,
+ ProtoProtect.u1.Long);
+
+ ASSERT (PointerPte == MiGetPteAddress(FaultingAddress));
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MiResolveMappedFileFault (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN PMMINPAGE_SUPPORT *ReadBlock,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine builds the MDL and other structures to allow a
+ read opertion on a mapped file for a page fault.
+
+Arguments:
+
+ FaulingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies the PTE for the faulting address.
+
+ ReadBlock - Supplies the address of the read block which
+ needs to be completed before an I/O can be
+ issued.
+
+Return Value:
+
+ status. A status value of STATUS_ISSUE_PAGING_IO is returned
+ if this function completes successfully.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ PSUBSECTION Subsection;
+ PMDL Mdl;
+ ULONG ReadSize;
+ PETHREAD CurrentThread;
+ PULONG Page;
+ PULONG EndPage;
+ PMMPTE BasePte;
+ PMMPTE CheckPte;
+ LARGE_INTEGER StartingOffset;
+ LARGE_INTEGER TempOffset;
+ PULONG FirstMdlPage;
+ PMMINPAGE_SUPPORT ReadBlockLocal;
+ ULONG PageColor;
+ ULONG ClusterSize = 0;
+
+ ASSERT (PointerPte->u.Soft.Prototype == 1);
+
+ // *********************************************
+ // Mapped File (subsection format)
+ // *********************************************
+
+
+ if (MiEnsureAvailablePageOrWait (Process, FaultingAddress)) {
+
+ //
+ // A wait operation was performed which dropped the locks,
+ // repeat this fault.
+ //
+
+ return STATUS_REFAULT;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ MiFormatPte (PointerPte);
+ }
+#endif //DBG
+
+ //
+ // Calculate address of subsection for this prototype PTE.
+ //
+
+ Subsection = MiGetSubsectionAddress (PointerPte);
+
+#ifdef LARGE_PAGES
+
+ //
+ // Check to see if this subsection maps a large page, if
+ // so, just fill the TB and return a status of PTE_CHANGED.
+ //
+
+ if (Subsection->u.SubsectionFlags.LargePages == 1) {
+ KeFlushEntireTb (TRUE, TRUE);
+ KeFillLargeEntryTb ((PHARDWARE_PTE)(Subsection + 1),
+ FaultingAddress,
+ Subsection->StartingSector);
+
+ return STATUS_REFAULT;
+ }
+#endif //LARGE_PAGES
+
+ if (Subsection->ControlArea->u.Flags.FailAllIo) {
+ return STATUS_IN_PAGE_ERROR;
+ }
+
+ CurrentThread = PsGetCurrentThread();
+
+ ReadBlockLocal = MiGetInPageSupportBlock (FALSE);
+ if (ReadBlockLocal == NULL) {
+ return STATUS_REFAULT;
+ }
+ *ReadBlock = ReadBlockLocal;
+
+ //
+ // Build MDL for request.
+ //
+
+ Mdl = &ReadBlockLocal->Mdl;
+
+ FirstMdlPage = &ReadBlockLocal->Page[0];
+ Page = FirstMdlPage;
+
+#if DBG
+ RtlFillMemoryUlong( Page, (MM_MAXIMUM_READ_CLUSTER_SIZE+1) * 4, 0xf1f1f1f1);
+#endif //DBG
+
+ ReadSize = PAGE_SIZE;
+ BasePte = PointerPte;
+
+ //
+ // Should we attempt to perform page fault clustering?
+ //
+
+ if ((!CurrentThread->DisablePageFaultClustering) &&
+ (Subsection->ControlArea->u.Flags.NoModifiedWriting == 0)) {
+
+ if ((MmAvailablePages > (MmFreeGoal * 2))
+ ||
+ ((((PointerPte - 1) == (PMMPTE)(PsGetCurrentProcess()->LastProtoPteFault)) ||
+ (Subsection->ControlArea->u.Flags.Image != 0) ||
+ (CurrentThread->ForwardClusterOnly)) &&
+ (MmAvailablePages > (MM_MAXIMUM_READ_CLUSTER_SIZE + 16)))) {
+
+ //
+ // Cluster up to n pages. This one + n-1.
+ //
+
+ if (Subsection->ControlArea->u.Flags.Image == 0) {
+ ASSERT (CurrentThread->ReadClusterSize <=
+ MM_MAXIMUM_READ_CLUSTER_SIZE);
+ ClusterSize = CurrentThread->ReadClusterSize;
+ } else {
+ ClusterSize = MmDataClusterSize;
+ if (Subsection->u.SubsectionFlags.Protection &
+ MM_PROTECTION_EXECUTE_MASK ) {
+ ClusterSize = MmCodeClusterSize;
+ }
+ }
+ EndPage = Page + ClusterSize;
+
+ CheckPte = PointerPte + 1;
+
+ //
+ // Try to cluster within the page of PTEs.
+ //
+
+ while ((((ULONG)CheckPte & (PAGE_SIZE - 1)) != 0)
+ && (Page < EndPage) &&
+ (CheckPte <
+ &Subsection->SubsectionBase[Subsection->PtesInSubsection])
+ && (CheckPte->u.Long == BasePte->u.Long)) {
+
+ Subsection->ControlArea->NumberOfPfnReferences += 1;
+ ReadSize += PAGE_SIZE;
+ Page += 1;
+ CheckPte += 1;
+ }
+
+ if ((Page < EndPage) && (!CurrentThread->ForwardClusterOnly)) {
+
+ //
+ // Attempt to cluster going backwards from the PTE.
+ //
+
+ CheckPte = PointerPte - 1;
+
+ while ((((ULONG)CheckPte & (PAGE_SIZE - 1)) !=
+ (PAGE_SIZE - sizeof(MMPTE))) &&
+ (Page < EndPage) &&
+ (CheckPte >= Subsection->SubsectionBase) &&
+ (CheckPte->u.Long == BasePte->u.Long)) {
+
+ Subsection->ControlArea->NumberOfPfnReferences += 1;
+ ReadSize += PAGE_SIZE;
+ Page += 1;
+ CheckPte -= 1;
+ }
+ BasePte = CheckPte + 1;
+ }
+ }
+ }
+
+ //
+ //
+ // Calculate the offset to read into the file.
+ // offset = base + ((thispte - basepte) << PAGE_SHIFT)
+ //
+
+ StartingOffset.QuadPart = MI_STARTING_OFFSET (Subsection,
+ BasePte);
+
+ TempOffset.QuadPart = ((LONGLONG)Subsection->EndingSector << MMSECTOR_SHIFT) +
+ Subsection->u.SubsectionFlags.SectorEndOffset;
+
+ ASSERT (StartingOffset.QuadPart < TempOffset.QuadPart);
+
+ //
+ // Remove pages to fill in the MDL. This is done here as the
+ // base PTE has been determined and can be used for virtual
+ // aliasing checks.
+ //
+
+ EndPage = FirstMdlPage;
+ CheckPte = BasePte;
+
+ while (EndPage < Page) {
+ if (Process == NULL) {
+ PageColor = MI_GET_PAGE_COLOR_FROM_PTE(CheckPte);
+ } else {
+ PageColor = MI_PAGE_COLOR_PTE_PROCESS (CheckPte,
+ &Process->NextPageColor);
+ }
+ *EndPage = MiRemoveAnyPage (PageColor);
+ EndPage += 1;
+ CheckPte += 1;
+ }
+
+ if (Process == NULL) {
+ PageColor = MI_GET_PAGE_COLOR_FROM_PTE(CheckPte);
+ } else {
+ PageColor = MI_PAGE_COLOR_PTE_PROCESS (CheckPte,
+ &Process->NextPageColor);
+ }
+
+ //
+ // Check to see if the read will go past the end of the file,
+ // if so, correct the read size and get a zeroed page.
+ //
+
+ MmInfoCounters.PageReadIoCount += 1;
+ MmInfoCounters.PageReadCount += ReadSize >> PAGE_SHIFT;
+
+ if ((Subsection->ControlArea->u.Flags.Image) &&
+ ((StartingOffset.QuadPart + ReadSize) > TempOffset.QuadPart)) {
+
+ ASSERT ((ULONG)(TempOffset.QuadPart - StartingOffset.QuadPart)
+ > (ReadSize - PAGE_SIZE));
+
+ ReadSize = (ULONG)(TempOffset.QuadPart - StartingOffset.QuadPart);
+
+ PageFrameIndex = MiRemoveZeroPage (PageColor);
+
+ } else {
+
+ //
+ // We are reading a complete page, no need to get a zeroed page.
+ //
+
+ PageFrameIndex = MiRemoveAnyPage (PageColor);
+ }
+
+ //
+ // Increment the PFN reference count in the control area for
+ // the subsection (PFN MUTEX is required to modify this field).
+ //
+
+ Subsection->ControlArea->NumberOfPfnReferences += 1;
+ *Page = PageFrameIndex;
+
+ PageFrameIndex = *(FirstMdlPage + (PointerPte - BasePte));
+
+ //
+ // Get a page and put the PTE into the transition state with the
+ // read-in-progress flag set.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ KeClearEvent (&ReadBlockLocal->Event);
+
+ //
+ // Initialize MDL for request.
+ //
+
+ MmInitializeMdl(Mdl,
+ MiGetVirtualAddressMappedByPte (BasePte),
+ ReadSize);
+ Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
+
+#if DBG
+ if (ReadSize > ((ClusterSize + 1) << PAGE_SHIFT)) {
+ KeBugCheckEx (MEMORY_MANAGEMENT, 0x777,(ULONG)Mdl, (ULONG)Subsection,
+ (ULONG)TempOffset.LowPart);
+ }
+#endif //DBG
+
+ MiInitializeReadInProgressPfn (
+ Mdl,
+ BasePte,
+ &ReadBlockLocal->Event,
+ 0xFFFFFFFF);
+
+ ReadBlockLocal->ReadOffset = StartingOffset;
+ ReadBlockLocal->FilePointer = Subsection->ControlArea->FilePointer;
+ ReadBlockLocal->BasePte = BasePte;
+ ReadBlockLocal->Pfn = Pfn1;
+
+ return STATUS_ISSUE_PAGING_IO;
+}
+
+
+NTSTATUS
+MiWaitForInPageComplete (
+ IN PMMPFN Pfn2,
+ IN PMMPTE PointerPte,
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPteContents,
+ IN PMMINPAGE_SUPPORT InPageSupport,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ Waits for a page read to complete.
+
+Arguments:
+
+ Pfn - Supplies a pointer to the pfn element for the page being read.
+
+ PointerPte - Supplies a pointer to the pte that is in the transition
+ state.
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPteContents - Supplies the contents of the PTE before the
+ working set lock was released.
+
+ InPageSupport - Supplies a pointer to the inpage support structure
+ for this read operation.
+
+Return Value:
+
+ Returns the status of the in page.
+
+Environment:
+
+ Kernel mode, APC's disabled. Neither the working set lock nor
+ the pfn lock may be held.
+
+--*/
+
+{
+ PMMPTE NewPointerPte;
+ PMMPTE ProtoPte;
+ PMMPFN Pfn1;
+ PMMPFN Pfn;
+ PULONG Va;
+ PULONG Page;
+ PULONG LastPage;
+ ULONG Offset;
+ ULONG Protection;
+ PMDL Mdl;
+ KIRQL OldIrql;
+ NTSTATUS status;
+
+ //
+ // Wait for the I/O to complete. Note that we can't wait for all
+ // the objects simultaneously as other threads/processes could be
+ // waiting for the same event. The first thread which completes
+ // the wait and gets the PFN mutex may reuse the event for another
+ // fault before this thread completes its wait.
+ //
+
+ KeWaitForSingleObject( &InPageSupport->Event,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ if (CurrentProcess != NULL) {
+ LOCK_WS (CurrentProcess);
+ } else {
+ LOCK_SYSTEM_WS (OldIrql);
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Check to see if this is the first thread to complete the in-page
+ // operation.
+ //
+
+ Pfn = InPageSupport->Pfn;
+ if (Pfn2 != Pfn) {
+ Pfn2->u3.e1.ReadInProgress = 0;
+ }
+ if (Pfn->u3.e1.ReadInProgress) {
+
+ Mdl = &InPageSupport->Mdl;
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+#if DBG
+ Mdl->MdlFlags |= MDL_LOCK_HELD;
+#endif //DBG
+
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+
+#if DBG
+ Mdl->MdlFlags &= ~MDL_LOCK_HELD;
+#endif //DBG
+ }
+
+ Pfn->u3.e1.ReadInProgress = 0;
+ Pfn->u1.Event = (PKEVENT)NULL;
+
+ //
+ // Check the IO_STATUS_BLOCK to ensure the in-page completed successfully.
+ //
+
+ if (!NT_SUCCESS(InPageSupport->IoStatus.Status)) {
+
+ if (InPageSupport->IoStatus.Status == STATUS_END_OF_FILE) {
+
+ //
+ // An attempt was made to read past the end of file
+ // zero all the remaining bytes in the read.
+ //
+
+ Page = (PULONG)(Mdl + 1);
+ LastPage = Page + ((Mdl->ByteCount - 1) >> PAGE_SHIFT);
+
+ while (Page <= LastPage) {
+#if MM_NUMBER_OF_COLORS > 1
+ {
+ PMMPFN PfnColor;
+ PfnColor = MI_PFN_ELEMENT(*Page);
+ MiZeroPhysicalPage (*Page, PfnColor->u3.e1.PageColor);
+ }
+#else
+ MiZeroPhysicalPage (*Page, 0);
+#endif
+ Page += 1;
+ }
+
+ } else {
+
+ //
+ // In page io error occurred.
+ //
+
+ if (InPageSupport->IoStatus.Status != STATUS_VERIFY_REQUIRED) {
+ KdPrint(("MM:in page i/o error %X\n",
+ InPageSupport->IoStatus.Status));
+
+ //
+ // If this page is for paged pool or for paged
+ // kernel code, or page table pages, bugcheck.
+ //
+
+ if (((PointerPte > MiGetPteAddress (MM_HIGHEST_USER_ADDRESS))
+ &&
+ (PointerPte < MiGetPteAddress (MM_SYSTEM_CACHE_START)))
+ ||
+ ((PointerPte < (PMMPTE)PDE_TOP)
+ &&
+ (PointerPte >= MiGetPteAddress (MM_SYSTEM_CACHE_END)))) {
+ KeBugCheckEx (KERNEL_DATA_INPAGE_ERROR,
+ (ULONG)PointerPte,
+ InPageSupport->IoStatus.Status,
+ (ULONG)FaultingAddress,
+ PointerPte->u.Long);
+ }
+
+ }
+
+ Page = (PULONG)(Mdl + 1);
+ LastPage = Page + ((Mdl->ByteCount - 1) >> PAGE_SHIFT);
+
+ while (Page <= LastPage) {
+ Pfn1 = MI_PFN_ELEMENT (*Page);
+ Pfn1->u3.e1.InPageError = 1;
+ Pfn1->u1.ReadStatus = InPageSupport->IoStatus.Status;
+#if DBG
+ {
+ KIRQL Old;
+ Va = (PULONG)((ULONG)MiMapPageInHyperSpace (*Page,&Old));
+ RtlFillMemoryUlong (Va, PAGE_SIZE, 0x50444142);
+ MiUnmapPageInHyperSpace (Old);
+ }
+#endif //DBG
+ Page += 1;
+ }
+ status = InPageSupport->IoStatus.Status;
+ MiFreeInPageSupportBlock (InPageSupport);
+ return status;
+ }
+ } else {
+
+ if (InPageSupport->IoStatus.Information != Mdl->ByteCount) {
+
+ ASSERT (InPageSupport->IoStatus.Information != 0);
+
+ //
+ // Less than a full page was read - zero the remainder
+ // of the page.
+ //
+
+ Page = (PULONG)(Mdl + 1);
+ LastPage = Page + ((Mdl->ByteCount - 1) >> PAGE_SHIFT);
+ Page = Page + ((InPageSupport->IoStatus.Information - 1) >> PAGE_SHIFT);
+
+ Offset = BYTE_OFFSET (InPageSupport->IoStatus.Information);
+
+ if (Offset != 0) {
+ KIRQL Old;
+ Va = (PULONG)((ULONG)MiMapPageInHyperSpace (*Page, &Old)
+ + Offset);
+
+ RtlZeroMemory (Va, PAGE_SIZE - Offset);
+ MiUnmapPageInHyperSpace (Old);
+ }
+
+ //
+ // Zero any remaining pages within the MDL.
+ //
+
+ Page += 1;
+
+ while (Page <= LastPage) {
+#if MM_NUMBER_OF_COLORS > 1
+ {
+ PMMPFN PfnColor;
+ PfnColor = MI_PFN_ELEMENT(*Page);
+ MiZeroPhysicalPage (*Page, PfnColor->u3.e1.PageColor);
+ }
+#else
+ MiZeroPhysicalPage (*Page, 0);
+#endif
+ Page += 1;
+ }
+ }
+ }
+ } else {
+
+ //
+ // Another thread has already serviced the read, check the
+ // io-error flag in the PFN database to ensure the in-page
+ // was successful.
+ //
+
+ if (Pfn2->u3.e1.InPageError == 1) {
+ ASSERT (!NT_SUCCESS(Pfn2->u1.ReadStatus));
+ MiFreeInPageSupportBlock (InPageSupport);
+ return Pfn2->u1.ReadStatus;
+ }
+ }
+
+ MiFreeInPageSupportBlock (InPageSupport);
+
+ //
+ // Check to see if the faulting PTE has changed.
+ //
+
+ NewPointerPte = MiFindActualFaultingPte (FaultingAddress);
+
+ //
+ // If this PTE is in prototype PTE format, make the pointer to the
+ // pte point to the prototype PTE.
+ //
+
+ if (NewPointerPte == (PMMPTE)NULL) {
+ return STATUS_PTE_CHANGED;
+ }
+
+ if (NewPointerPte != PointerPte) {
+
+ //
+ // Check to make sure the NewPointerPte is not a prototype PTE
+ // which refers to the page being made valid.
+ //
+
+ if (NewPointerPte->u.Soft.Prototype == 1) {
+ if (NewPointerPte->u.Soft.PageFileHigh == 0xFFFFF) {
+
+ ProtoPte = MiCheckVirtualAddress (FaultingAddress,
+ &Protection);
+
+ } else {
+ ProtoPte = MiPteToProto (NewPointerPte);
+ }
+
+ //
+ // Make sure the prototype PTE refers the the PTE made valid.
+ //
+
+ if (ProtoPte != PointerPte) {
+ return STATUS_PTE_CHANGED;
+ }
+
+ //
+ // If the only difference is the owner mask, everything is
+ // okay.
+ //
+
+ if (ProtoPte->u.Long != PointerPteContents->u.Long) {
+ return STATUS_PTE_CHANGED;
+ }
+ } else {
+ return STATUS_PTE_CHANGED;
+ }
+ } else {
+
+ if (NewPointerPte->u.Long != PointerPteContents->u.Long) {
+ return STATUS_PTE_CHANGED;
+ }
+ }
+ return STATUS_SUCCESS;
+}
+
+PMMPTE
+MiFindActualFaultingPte (
+ IN PVOID FaultingAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This routine locates the actual PTE which must be made resident in
+ to complete this fault. Note that for certain cases multiple faults
+ are required to make the final page resident.
+
+Arguments:
+
+ FaultingAddress - Supplies the virtual address which caused the
+ fault.
+
+ PointerPte - Supplies the pointer to the PTE which is in prototype
+ PTE format.
+
+
+Return Value:
+
+
+Environment:
+
+ Kernel mode, APC's disabled, working set mutex held.
+
+--*/
+
+{
+ PMMPTE ProtoPteAddress;
+ PMMPTE PointerPteForProto;
+ PMMPTE PointerPte;
+ PMMPTE PointerFaultingPte;
+ ULONG Protection;
+
+ if (MI_IS_PHYSICAL_ADDRESS(FaultingAddress)) {
+ return NULL;
+ }
+
+ PointerPte = MiGetPdeAddress (FaultingAddress);
+
+ if (PointerPte->u.Hard.Valid == 0) {
+
+ //
+ // Page table page is not valid.
+ //
+
+ return PointerPte;
+ }
+
+ PointerPte = MiGetPteAddress (FaultingAddress);
+
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // Page is already valid, no need to fault it in.
+ //
+
+ return (PMMPTE)NULL;
+ }
+
+ if (PointerPte->u.Soft.Prototype == 0) {
+
+ //
+ // Page is not a prototype PTE, make this PTE valid.
+ //
+
+ return PointerPte;
+ }
+
+ //
+ // Check to see if the PTE which maps the prototype PTE is valid.
+ //
+
+ if (PointerPte->u.Soft.PageFileHigh == 0xFFFFF) {
+
+ //
+ // Protection is here, PTE must be located in VAD.
+ //
+
+ ProtoPteAddress = MiCheckVirtualAddress (FaultingAddress,
+ &Protection);
+
+ ASSERT (ProtoPteAddress != NULL);
+
+ } else {
+
+ //
+ // Protection is in ProtoPte.
+ //
+
+ ProtoPteAddress = MiPteToProto (PointerPte);
+ }
+
+ PointerPteForProto = MiGetPteAddress (ProtoPteAddress);
+ PointerFaultingPte = MiFindActualFaultingPte (ProtoPteAddress);
+
+ if (PointerFaultingPte == (PMMPTE)NULL) {
+ return PointerPte;
+ } else {
+ return PointerFaultingPte;
+ }
+
+}
+
+PMMPTE
+MiCheckVirtualAddress (
+ IN PVOID VirtualAddress,
+ OUT PULONG ProtectCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines the virtual address descriptors to see
+ if the specified virtual address is contained within any of
+ the descriptors. If a virtual address descriptor is found
+ with contains the specified virtual address, a PTE is built
+ from information within the virtual address descriptor and
+ returned to the caller.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to locate within
+ a virtual address descriptor.
+
+Return Value:
+
+ Returns the PTE which corresponds to the supplied virtual address.
+ If not virtual address descriptor is found, a zero pte is returned.
+
+ Note that a PTE address of 0xffffffff is returned if the page
+ fault was for
+
+Environment:
+
+ Kernel mode, APC's disabled, working set mutex held.
+
+--*/
+
+{
+ PMMVAD Vad;
+ PSUBSECTION Subsection;
+
+ if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
+
+ Vad = MiLocateAddress (VirtualAddress);
+ if (Vad == (PMMVAD)NULL) {
+
+#if defined(MM_SHARED_USER_DATA_VA)
+ if (PAGE_ALIGN(VirtualAddress) == (PVOID) MM_SHARED_USER_DATA_VA) {
+
+ //
+ // This is the page that is double mapped between
+ // user mode and kernel mode. Map in as read only.
+ // On MIPS this is hardwired in the TB.
+ //
+
+ *ProtectCode = MM_READONLY;
+ return &MmSharedUserDataPte;
+ }
+#endif
+
+ *ProtectCode = MM_NOACCESS;
+ return NULL;
+ }
+
+ //
+ // A virtual address descriptor which contains the virtual address
+ // has been located. Build the PTE from the information within
+ // the virtual address descriptor.
+ //
+
+#ifdef LARGE_PAGES
+
+ if (Vad->u.VadFlags.LargePages == 1) {
+
+ KIRQL OldIrql;
+
+ //
+ // The first prototype PTE points to the subsection for the
+ // large page mapping.
+
+ Subsection = (PSUBSECTION)Vad->FirstPrototypePte;
+
+ ASSERT (Subsection->u.SubsectionFlags.LargePages == 1);
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KeFlushEntireTb (TRUE, TRUE);
+ KeFillLargeEntryTb ((PHARDWARE_PTE)(Subsection + 1),
+ VirtualAddress,
+ Subsection->StartingSector);
+ KeLowerIrql (OldIrql);
+ *ProtectCode = MM_LARGE_PAGES;
+ return NULL;
+ }
+#endif //LARGE_PAGES
+
+ if (Vad->u.VadFlags.PhysicalMapping == 1) {
+
+ //
+ // This is a banked section.
+ //
+
+ MiHandleBankedSection (VirtualAddress, Vad);
+ *ProtectCode = MM_NOACCESS;
+ return NULL;
+ }
+
+ if (Vad->u.VadFlags.PrivateMemory == 1) {
+
+ //
+ // This is a private region of memory. Check to make
+ // sure the virtual address has been committed. Note that
+ // addresses are dense from the bottom up.
+ //
+
+ if (Vad->u.VadFlags.MemCommit == 1) {
+ *ProtectCode = Vad->u.VadFlags.Protection;
+ return NULL;
+ }
+
+ //
+ // The address is reserved but not committed.
+ //
+
+ *ProtectCode = MM_NOACCESS;
+ return NULL;
+
+ } else {
+
+ //
+ // This virtual address descriptor refers to a
+ // section, calculate the address of the prototype PTE
+ // and construct a pointer to the PTE.
+ //
+ //*******************************************************
+ //*******************************************************
+ // well here's an interesting problem, how do we know
+ // how to set the attributes on the PTE we are creating
+ // when we can't look at the prototype PTE without
+ // potentially incuring a page fault. In this case
+ // PteTemplate would be zero.
+ //*******************************************************
+ //*******************************************************
+ //
+
+ if (Vad->u.VadFlags.ImageMap == 1) {
+
+ //
+ // PTE and proto PTEs have the same protection for images.
+ //
+
+ *ProtectCode = MM_UNKNOWN_PROTECTION;
+ } else {
+ *ProtectCode = Vad->u.VadFlags.Protection;
+ }
+ return (PMMPTE)MiGetProtoPteAddress(Vad,VirtualAddress);
+ }
+
+ } else if (((ULONG)VirtualAddress >= PTE_BASE) &&
+ ((ULONG)VirtualAddress < PDE_TOP)) {
+
+ //
+ // The virtual address is within the space occupied by PDEs,
+ // make the PDE valid.
+ //
+
+ if (((PMMPTE)VirtualAddress >= MiGetPteAddress (MM_PAGED_POOL_START)) &&
+ ((PMMPTE)VirtualAddress <= MmLastPteForPagedPool)) {
+
+ *ProtectCode = MM_NOACCESS;
+ return NULL;
+ }
+
+ *ProtectCode = MM_READWRITE;
+ return NULL;
+ }
+
+ //
+ // Address is in system space.
+ //
+
+ *ProtectCode = MM_NOACCESS;
+ return NULL;
+}
+
+NTSTATUS
+FASTCALL
+MiCheckPdeForPagedPool (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function copies the Page Table Entry for the corresponding
+ virtual address from the system process's page directory.
+
+ This allows page table pages to be lazily evalulated for things
+ like paged pool.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address in question.
+
+Return Value:
+
+ Either success or access violation.
+
+Environment:
+
+ Kernel mode, DISPATCH level or below.
+
+--*/
+{
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ NTSTATUS status = STATUS_SUCCESS;
+
+ if (((PMMPTE)VirtualAddress >= MiGetPteAddress (MM_SYSTEM_RANGE_START)) &&
+ ((PMMPTE)VirtualAddress <= (PMMPTE)PDE_TOP)) {
+
+ //
+ // Pte for paged pool.
+ //
+
+ PointerPde = MiGetPteAddress (VirtualAddress);
+ status = STATUS_WAIT_1;
+ } else if (VirtualAddress < (PVOID)MM_SYSTEM_RANGE_START) {
+
+ return STATUS_ACCESS_VIOLATION;
+
+ } else {
+
+ //
+ // Virtual address in paged pool range.
+ //
+
+ PointerPde = MiGetPdeAddress (VirtualAddress);
+ }
+
+ //
+ // Locate the PDE for this page and make it valid.
+ //
+
+ if (PointerPde->u.Hard.Valid == 0) {
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ *PointerPde = MmSystemPagePtes [((ULONG)PointerPde &
+ ((sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)];
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPde, PointerPte, FALSE);
+ }
+ return status;
+}
+
+VOID
+MiInitializePfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG ModifiedState
+ )
+
+/*++
+
+Routine Description:
+
+ This function intialize the specified PFN element to the
+ active and valid state.
+
+Arguments:
+
+ PageFrameIndex - Supplies the page frame number of which to initialize.
+
+ PointerPte - Supplies the pointer to the PTE which caused the
+ page fault.
+
+ ModifiedState - Supplies the state to set the modified field in the PFN
+ element for this page, either 0 or 1.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, PFN mutex held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPTE PteFramePointer;
+ ULONG PteFramePage;
+
+ MM_PFN_LOCK_ASSERT();
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->PteAddress = PointerPte;
+
+ //
+ // If the PTE is currently valid, an address space is being built,
+ // just make the original PTE demand zero.
+ //
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ if (MI_IS_CACHING_DISABLED (PointerPte)) {
+ Pfn1->OriginalPte.u.Soft.Protection = MM_READWRITE | MM_NOCACHE;
+ }
+
+ } else {
+ Pfn1->OriginalPte = *PointerPte;
+ ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)));
+ }
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+#if DBG
+ if (Pfn1->u3.e2.ReferenceCount > 1) {
+ DbgPrint("MM:incrementing ref count > 1 \n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+#endif
+
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.Modified = ModifiedState;
+
+ //
+ // Determine the page frame number of the page table page which
+ // contains this PTE.
+ //
+
+ PteFramePointer = MiGetPteAddress(PointerPte);
+ PteFramePage = PteFramePointer->u.Hard.PageFrameNumber;
+ ASSERT (PteFramePage != 0);
+ Pfn1->PteFrame = PteFramePage;
+
+ //
+ // Increment the share count for the page table page containing
+ // this PTE.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (PteFramePage);
+
+ Pfn2->u2.ShareCount += 1;
+
+ return;
+}
+
+VOID
+MiInitializeReadInProgressPfn (
+ IN PMDL Mdl,
+ IN PMMPTE BasePte,
+ IN PKEVENT Event,
+ IN ULONG WorkingSetIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This function intialize the specified PFN element to the
+ transition / read-in-progress state for an in-page operation.
+
+
+Arguments:
+
+ Mdl - supplies a pointer to the MDL.
+
+ BasePte - Supplies the pointer to the PTE which the first page in
+ in the MDL maps.
+
+ Event - Supplies the event which is to be set when the I/O operation
+ completes.
+
+ WorkingSetIndex - Supplies the working set index flag, a value of
+ 0xFFFFFFF indicates no WSLE is required because
+ this is a prototype PTE.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, PFN mutex held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPTE PteFramePointer;
+ ULONG PteFramePage;
+ MMPTE TempPte;
+ LONG NumberOfBytes;
+ PULONG Page;
+
+ MM_PFN_LOCK_ASSERT();
+
+ Page = (PULONG)(Mdl + 1);
+
+ NumberOfBytes = Mdl->ByteCount;
+
+ while (NumberOfBytes > 0) {
+
+ Pfn1 = MI_PFN_ELEMENT (*Page);
+ Pfn1->u1.Event = Event;
+ Pfn1->PteAddress = BasePte;
+ Pfn1->OriginalPte = *BasePte;
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e1.ReadInProgress = 1;
+
+ if (WorkingSetIndex == -1) {
+ Pfn1->u3.e1.PrototypePte = 1;
+ }
+
+ //
+ // Determine the page frame number of the page table page which
+ // contains this PTE.
+ //
+
+ PteFramePointer = MiGetPteAddress(BasePte);
+ PteFramePage = PteFramePointer->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PteFramePage;
+
+ //
+ // Put the PTE into the transition state, no cache flush needed as
+ // PTE is still not valid.
+ //
+
+ MI_MAKE_TRANSITION_PTE (TempPte,
+ *Page,
+ BasePte->u.Soft.Protection,
+ BasePte);
+ *BasePte = TempPte;
+
+ //
+ // Increment the share count for the page table page containing
+ // this PTE as the PTE just went into the transition state.
+ //
+
+ ASSERT (PteFramePage != 0);
+ Pfn2 = MI_PFN_ELEMENT (PteFramePage);
+ Pfn2->u2.ShareCount += 1;
+
+ NumberOfBytes -= PAGE_SIZE;
+ Page += 1;
+ BasePte += 1;
+ }
+
+ return;
+}
+
+VOID
+MiInitializeTransitionPfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG WorkingSetIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This function intialize the specified PFN element to the
+ transition state. Main use is by MapImageFile to make the
+ page which contains the image header transition in the
+ prototype PTEs.
+
+Arguments:
+
+ PageFrameIndex - supplies the page frame index to be initialized.
+
+ PointerPte - supplies an invalid, non-transition PTE to initialize.
+
+ WorkingSetIndex - Supplies the working set index flag, a value of
+ 0xFFFFFFF indicates no WSLE is required because
+ this is a prototype PTE.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, PFN mutex held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPTE PteFramePointer;
+ ULONG PteFramePage;
+ MMPTE TempPte;
+
+ MM_PFN_LOCK_ASSERT();
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u1.Event = NULL;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->OriginalPte = *PointerPte;
+ ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)));
+
+ //
+ // Don't change the reference count (it should already be 1).
+ //
+
+ Pfn1->u2.ShareCount = 0;
+
+ if (WorkingSetIndex == -1) {
+ Pfn1->u3.e1.PrototypePte = 1;
+ }
+ Pfn1->u3.e1.PageLocation = TransitionPage;
+
+ //
+ // Determine the page frame number of the page table page which
+ // contains this PTE.
+ //
+
+ PteFramePointer = MiGetPteAddress(PointerPte);
+ PteFramePage = PteFramePointer->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PteFramePage;
+
+ //
+ // Put the PTE into the transition state, no cache flush needed as
+ // PTE is still not valid.
+ //
+
+ MI_MAKE_TRANSITION_PTE (TempPte,
+ PageFrameIndex,
+ PointerPte->u.Soft.Protection,
+ PointerPte);
+
+ *PointerPte = TempPte;
+
+ //
+ // Increment the share count for the page table page containing
+ // this PTE as the PTE just went into the transition state.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (PteFramePage);
+ ASSERT (PteFramePage != 0);
+ Pfn2->u2.ShareCount += 1;
+
+ return;
+}
+
+VOID
+MiInitializeCopyOnWritePfn (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG WorkingSetIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This function intialize the specified PFN element to the
+ active and valid state for a copy on write operation.
+
+ In this case the page table page which contains the PTE has
+ the proper ShareCount.
+
+Arguments:
+
+ PageFrameIndex - Supplies the page frame number of which to initialize.
+
+ PointerPte - Supplies the pointer to the PTE which caused the
+ page fault.
+
+ WorkingSetIndex - Supplies the working set index for the coresponding
+ virtual address.
+
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, PFN mutex held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPTE PteFramePointer;
+ ULONG PteFramePage;
+ PVOID VirtualAddress;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->PteAddress = PointerPte;
+
+ //
+ // Get the protection for the page.
+ //
+
+ VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ Pfn1->OriginalPte.u.Long = 0;
+ Pfn1->OriginalPte.u.Soft.Protection =
+ MI_MAKE_PROTECT_NOT_WRITE_COPY (
+ MmWsle[WorkingSetIndex].u1.e1.Protection);
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u1.WsIndex = WorkingSetIndex;
+
+ //
+ // Determine the page frame number of the page table page which
+ // contains this PTE.
+ //
+
+ PteFramePointer = MiGetPteAddress(PointerPte);
+ PteFramePage = PteFramePointer->u.Hard.PageFrameNumber;
+ ASSERT (PteFramePage != 0);
+
+ Pfn1->PteFrame = PteFramePage;
+
+ //
+ // Set the modified flag in the PFN database as we are writing
+ // into this page and the dirty bit is already set in the PTE.
+ //
+
+ Pfn1->u3.e1.Modified = 1;
+
+ return;
+}
+
+BOOLEAN
+MmIsAddressValid (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ For a given virtual address this function returns TRUE if no page fault
+ will occur for a read operation on the address, FALSE otherwise.
+
+ Note that after this routine was called, if appropriate locks are not
+ held, a non-faulting address could fault.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ TRUE if a no page fault would be generated reading the virtual address,
+ FALSE otherwise.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+
+ //
+ // If this is within the physical addressing range, just return TRUE.
+ //
+
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return TRUE;
+ }
+#endif // _MIPS_ || _ALPHA_ || _PPC_
+
+ PointerPte = MiGetPdeAddress (VirtualAddress);
+ if (PointerPte->u.Hard.Valid == 0) {
+ return FALSE;
+ }
+#ifdef _X86_
+ if (PointerPte->u.Hard.LargePage == 1) {
+ return TRUE;
+ }
+#endif //_X86_
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ if (PointerPte->u.Hard.Valid == 0) {
+ return FALSE;
+ }
+ return TRUE;
+}
+
+VOID
+MiInitializePfnForOtherProcess (
+ IN ULONG PageFrameIndex,
+ IN PMMPTE PointerPte,
+ IN ULONG ContainingPageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function intialize the specified PFN element to the
+ active and valid state with the dirty bit on in the PTE and
+ the PFN database marked as modified.
+
+ As this PTE is not visible from the current process, the containing
+ page frame must be supplied at the PTE contents field for the
+ PFN database element are set to demand zero.
+
+Arguments:
+
+ PageFrameIndex - Supplies the page frame number of which to initialize.
+
+ PointerPte - Supplies the pointer to the PTE which caused the
+ page fault.
+
+ ContainingPageFrame - Supplies the page frame number of the page
+ table page which contains this PTE.
+ If the ContainingPageFrame is 0, then
+ the ShareCount for the
+ containing page is not incremented.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, PFN mutex held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+#if DBG
+ if (Pfn1->u3.e2.ReferenceCount > 1) {
+ DbgPrint("MM:incrementing ref count > 1 \n");
+ MiFormatPfn(Pfn1);
+ MiFormatPte(PointerPte);
+ }
+#endif
+
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.Modified = 1;
+
+ //
+ // Increment the share count for the page table page containing
+ // this PTE.
+ //
+
+ if (ContainingPageFrame != 0) {
+ Pfn1->PteFrame = ContainingPageFrame;
+ Pfn2 = MI_PFN_ELEMENT (ContainingPageFrame);
+ Pfn2->u2.ShareCount += 1;
+ }
+ return;
+}
+
+VOID
+MiAddValidPageToWorkingSet (
+ IN PVOID VirtualAddress,
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn1,
+ IN ULONG WsleMask
+ )
+
+/*++
+
+Routine Description:
+
+ This routine adds the specified virtual address into the
+ appropriate working set list.
+
+Arguments:
+
+ VirtualAddress - Supplies the address to add to the working set list.
+
+ PointerPte - Supplies a pointer to the pte that is now valid.
+
+ Pfn1 - Supplies the PFN database element for the physical page
+ mapped by the virtual address.
+
+ WsleMask - Supplies a mask (protection and flags) to OR into the
+ working set list entry.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ ULONG WorkingSetIndex;
+ PEPROCESS Process;
+ PMMSUPPORT WsInfo;
+ PMMWSLE Wsle;
+
+ ASSERT ((PointerPte >= (PMMPTE)PTE_BASE) &&
+ (PointerPte <= (PMMPTE)PDE_TOP));
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+
+ if ((VirtualAddress <= (PVOID)MM_HIGHEST_USER_ADDRESS) ||
+ ((VirtualAddress >= (PVOID)PTE_BASE) &&
+ (VirtualAddress < (PVOID)HYPER_SPACE_END))) {
+
+ //
+ // Per process working set.
+ //
+
+ Process = PsGetCurrentProcess();
+ WsInfo = &Process->Vm;
+ Wsle = MmWsle;
+
+ } else {
+
+ //
+ // System cache working set.
+ //
+
+ WsInfo = &MmSystemCacheWs;
+ Wsle = MmSystemCacheWsle;
+ }
+
+ WorkingSetIndex = MiLocateAndReserveWsle (WsInfo);
+ MiUpdateWsle (&WorkingSetIndex,
+ VirtualAddress,
+ WsInfo->VmWorkingSetList,
+ Pfn1);
+ Wsle[WorkingSetIndex].u1.Long |= WsleMask;
+
+#if DBG
+ if ((VirtualAddress >= (PVOID)MM_SYSTEM_CACHE_START) &&
+ (VirtualAddress < (PVOID)MM_SYSTEM_CACHE_END)) {
+ ASSERT (MmSystemCacheWsle[WorkingSetIndex].u1.e1.SameProtectAsProto);
+ }
+#endif //DBG
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, VirtualAddress, FALSE);
+ return;
+}
+
+PMMINPAGE_SUPPORT
+MiGetInPageSupportBlock (
+ ULONG OkToReleasePfn
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+ OkToReleasePfn - Supplies true if the PFN lock can be release then
+ reacquired. false if after it is released and
+ reacquired, null should be returned
+
+Return Value:
+
+ NULL if PFN lock was released (unless oktorelease is TRUE).
+ otherwize a pointer to an INPAGE_SUUPORT.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PMMINPAGE_SUPPORT Support;
+ PLIST_ENTRY NextEntry;
+
+ MM_PFN_LOCK_ASSERT();
+
+ if (MmInPageSupportList.Count == 0) {
+ ASSERT (IsListEmpty(&MmInPageSupportList.ListHead));
+ UNLOCK_PFN (APC_LEVEL);
+ Support = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ sizeof(MMINPAGE_SUPPORT),
+ 'nImM');
+ KeInitializeEvent (&Support->Event, NotificationEvent, FALSE);
+ LOCK_PFN (OldIrql);
+ if (!OkToReleasePfn) {
+
+ MmInPageSupportList.Count += 1;
+ Support->u.Thread = NULL;
+ InsertTailList (&MmInPageSupportList.ListHead,
+ &Support->ListEntry);
+ return NULL;
+ }
+ } else {
+ ASSERT (!IsListEmpty(&MmInPageSupportList.ListHead));
+ MmInPageSupportList.Count -= 1;
+ NextEntry = RemoveHeadList (&MmInPageSupportList.ListHead);
+ Support = CONTAINING_RECORD (NextEntry,
+ MMINPAGE_SUPPORT,
+ ListEntry );
+ }
+ Support->WaitCount = 1;
+ Support->u.Thread = PsGetCurrentThread();
+ Support->ListEntry.Flink = NULL;
+ return Support;
+}
+
+
+VOID
+MiFreeInPageSupportBlock (
+ IN PMMINPAGE_SUPPORT Support
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns the in page support block to a list
+ of freed blocks.
+
+Arguments:
+
+ Support - Supplies the in page support block to put on the free list.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT (Support->u.Thread != NULL);
+ ASSERT (Support->WaitCount != 0);
+ ASSERT (Support->ListEntry.Flink == NULL);
+ Support->WaitCount -= 1;
+ if (Support->WaitCount == 0) {
+ Support->u.Thread = NULL;
+ InsertTailList (&MmInPageSupportList.ListHead,
+ &Support->ListEntry);
+ MmInPageSupportList.Count += 1;
+ }
+ return;
+}
+
+
+VOID
+MiFlushInPageSupportBlock (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the number of freed in page support blocks,
+ and if more than 4, frees the blocks back to the NonPagedPool.
+
+
+ ****** NB: The PFN LOCK is RELEASED and reacquired during this call ******
+
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+#define MMMAX_INPAGE_SUPPORT 4
+
+{
+ KIRQL OldIrql;
+ PMMINPAGE_SUPPORT Support[10];
+ ULONG i = 0;
+ PLIST_ENTRY NextEntry;
+
+ MM_PFN_LOCK_ASSERT();
+
+ while ((MmInPageSupportList.Count > MMMAX_INPAGE_SUPPORT) && (i < 10)) {
+ NextEntry = RemoveHeadList (&MmInPageSupportList.ListHead);
+ Support[i] = CONTAINING_RECORD (NextEntry,
+ MMINPAGE_SUPPORT,
+ ListEntry );
+ Support[i]->ListEntry.Flink = NULL;
+ i += 1;
+ MmInPageSupportList.Count -= 1;
+ }
+
+ if (i == 0) {
+ return;
+ }
+
+ UNLOCK_PFN (APC_LEVEL);
+
+ do {
+ i -= 1;
+ ExFreePool(Support[i]);
+ } while (i > 0);
+
+ LOCK_PFN (OldIrql);
+
+ return;
+}
+
+VOID
+MiHandleBankedSection (
+ IN PVOID VirtualAddress,
+ IN PMMVAD Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This routine invalidates a bank of video memory, calls out to the
+ video driver and then enables the next bank of video memory.
+
+Arguments:
+
+ VirtualAddress - Supplies the address of the faulting page.
+
+ Vad - Supplies the VAD which maps the range.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ PMMBANKED_SECTION Bank;
+ PMMPTE PointerPte;
+ ULONG BankNumber;
+ ULONG size;
+
+ Bank = Vad->Banked;
+ size = Bank->BankSize;
+
+ RtlFillMemory (Bank->CurrentMappedPte,
+ size >> (PAGE_SHIFT - PTE_SHIFT),
+ (UCHAR)ZeroPte.u.Long);
+
+ //
+ // Flush the TB as we have invalidated all the PTEs in this range
+ //
+
+ KeFlushEntireTb (TRUE, FALSE);
+
+ //
+ // Calculate new bank address and bank number.
+ //
+
+ PointerPte = MiGetPteAddress (
+ (PVOID)((ULONG)VirtualAddress & ~(size - 1)));
+ Bank->CurrentMappedPte = PointerPte;
+
+ BankNumber = ((ULONG)PointerPte - (ULONG)Bank->BasedPte) >> Bank->BankShift;
+
+ (Bank->BankedRoutine)(BankNumber, BankNumber, Bank->Context);
+
+ //
+ // Set the new range valid.
+ //
+
+ RtlMoveMemory (PointerPte,
+ &Bank->BankTemplate[0],
+ size >> (PAGE_SHIFT - PTE_SHIFT));
+
+ return;
+}
+#if DBG
+VOID
+MiCheckFileState (
+ IN PMMPFN Pfn
+ )
+
+{
+ PSUBSECTION Subsection;
+ LARGE_INTEGER StartingOffset;
+
+ if (Pfn->u3.e1.PrototypePte == 0) {
+ return;
+ }
+ if (Pfn->OriginalPte.u.Soft.Prototype == 0) {
+ return;
+ }
+ Subsection = MiGetSubsectionAddress (&(Pfn->OriginalPte));
+ if (Subsection->ControlArea->u.Flags.NoModifiedWriting) {
+ return;
+ }
+ StartingOffset.QuadPart = MI_STARTING_OFFSET (Subsection,
+ Pfn->PteAddress);
+ DbgPrint("file: %lx offset: %lx\n",
+ Subsection->ControlArea->FilePointer,
+ StartingOffset.LowPart);
+ return;
+}
+#endif //DBG
diff --git a/private/ntos/mm/pfndec.c b/private/ntos/mm/pfndec.c
new file mode 100644
index 000000000..7008e982e
--- /dev/null
+++ b/private/ntos/mm/pfndec.c
@@ -0,0 +1,613 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ pfndec.c
+
+Abstract:
+
+ This module contains the routines to decrement the share count and
+ the reference counts within the Page Frame Database.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+ULONG MmFrontOfList;
+
+
+VOID
+FASTCALL
+MiDecrementShareCount2 (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the share count within the PFN element
+ for the specified physical page. If the share count becomes
+ zero the corresponding PTE is coverted to the transition state
+ and the reference count is decremented and the ValidPte count
+ of the PTEframe is decremented.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number of which to decrement
+ the share count.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PMMPFN PfnX;
+ KIRQL OldIrql;
+
+ ASSERT ((PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex > 0));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount -= 1;
+
+ ASSERT (Pfn1->u2.ShareCount < 0xF000000);
+
+ if (Pfn1->u2.ShareCount == 0) {
+
+ //
+ // The share count is now zero, decrement the reference count
+ // for the PFN element and turn the referenced PTE into
+ // the transition state if it refers to a prototype PTE.
+ // PTEs which are not prototype PTE do not need to be placed
+ // into transition as they are placed in transition when
+ // they are removed from the working set (working set free routine).
+ //
+
+ //
+ // If the PTE referenced by this PFN element is actually
+ // a prototype PTE, it must be mapped into hyperspace and
+ // then operated on.
+ //
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ OldIrql = 99;
+ if (MmIsAddressValid (Pfn1->PteAddress)) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The address is not valid in this process, map it into
+ // hyperspace so it can be operated upon.
+ //
+
+ PointerPte = (PMMPTE)MiMapPageInHyperSpace(Pfn1->PteFrame,
+ &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ TempPte = *PointerPte;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn1->OriginalPte.u.Soft.Protection);
+ *PointerPte = TempPte;
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+
+ //
+ // There is no need to flush the translation buffer at this
+ // time as we only invalidated a prototytpe PTE.
+ //
+
+ }
+
+ //
+ // Change the page location to inactive (from active and valid).
+ //
+
+ Pfn1->u3.e1.PageLocation = TransitionPage;
+
+ //
+ // Decrement the reference count as the share count is now zero.
+ //
+
+ MiDecrementReferenceCount (PageFrameIndex);
+ }
+
+ return;
+}
+#if 0
+
+VOID
+FASTCALL
+MiDecrementShareCount (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the share count within the PFN element
+ for the specified physical page. If the share count becomes
+ zero the corresponding PTE is coverted to the transition state
+ and the reference count is decremented and the ValidPte count
+ of the PTEframe is decremented.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number of which to decrement
+ the share count.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PMMPFN PfnX;
+ KIRQL OldIrql;
+
+ ASSERT ((PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex > 0));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount -= 1;
+
+ ASSERT (Pfn1->u2.ShareCount < 0xF000000);
+
+ if (Pfn1->u2.ShareCount == 0) {
+
+ //
+ // The share count is now zero, decrement the reference count
+ // for the PFN element and turn the referenced PTE into
+ // the transition state if it refers to a prototype PTE.
+ // PTEs which are not prototype PTE do not need to be placed
+ // into transition as they are placed in transition when
+ // they are removed from the working set (working set free routine).
+ //
+
+ //
+ // If the PTE referenced by this PFN element is actually
+ // a prototype PTE, it must be mapped into hyperspace and
+ // then operated on.
+ //
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ OldIrql = 99;
+ if (MmIsAddressValid (Pfn1->PteAddress)) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The address is not valid in this process, map it into
+ // hyperspace so it can be operated upon.
+ //
+
+ PointerPte = (PMMPTE)MiMapPageInHyperSpace(Pfn1->PteFrame,
+ &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ TempPte = *PointerPte;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn1->OriginalPte.u.Soft.Protection);
+ *PointerPte = TempPte;
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+
+ //
+ // There is no need to flush the translation buffer at this
+ // time as we only invalidated a prototytpe PTE.
+ //
+
+ }
+
+ //
+ // Change the page location to inactive (from active and valid).
+ //
+
+ Pfn1->u3.e1.PageLocation = TransitionPage;
+
+ //
+ // Decrement the valid pte count for the PteFrame page.
+ //
+
+#if DBG
+ PfnX = MI_PFN_ELEMENT (Pfn1->PteFrame);
+
+ ASSERT (PfnX->u2.ShareCount != 0);
+#endif //DBG
+
+ //
+ // Decrement the reference count as the share count is now zero.
+ //
+
+ MiDecrementReferenceCount (PageFrameIndex);
+ }
+
+ return;
+}
+
+VOID
+FASTCALL
+MiDecrementShareCountOnly (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the share count within the PFN element
+ for the specified physical page. If the share count becomes
+ zero the corresponding PTE is coverted to the transition state
+ and the reference count is decremented; the ValidPte count
+ of the corresponding PTE FRAME field is not updated.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number of which to decrement
+ the share count.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ ASSERT ((PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex > 0));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount -= 1;
+
+ ASSERT (Pfn1->u2.ShareCount < 0xF000000);
+
+ if (Pfn1->u2.ShareCount == 0) {
+
+ //
+ // The share count is now zero, decrement the reference count
+ // for the PFN element and turn the referenced PTE into
+ // the transition state if it refers to a prototype PTE.
+ // PTEs which are not prototype PTE do not need to be placed
+ // into transition as they are placed in transition when
+ // they are removed from the working set (working set free routine).
+ //
+
+ //
+ // If the PTE referenced by this PFN element is actually
+ // a prototype PTE, it must be mapped into hyperspace and
+ // then operated on.
+ //
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ OldIrql = 99;
+ if (MmIsAddressValid (Pfn1->PteAddress)) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The address is not valid in this process, map it into
+ // hyperspace so it can be operated upon.
+ //
+
+ PointerPte = (PMMPTE)MiMapPageInHyperSpace(Pfn1->PteFrame,
+ &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ TempPte = *PointerPte;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn1->OriginalPte.u.Soft.Protection);
+ *PointerPte = TempPte;
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+
+ //
+ // There is no need to flush the translation buffer at this
+ // time as we only invalidated a prototytpe PTE.
+ //
+
+ }
+
+ //
+ // Change the page location to inactive (from active and valid).
+ //
+
+ Pfn1->u3.e1.PageLocation = TransitionPage;
+
+ //
+ // Decrement the reference count as the share count is now zero.
+ //
+
+ MiDecrementReferenceCount (PageFrameIndex);
+ }
+
+ return;
+
+}
+
+VOID
+FASTCALL
+MiDecrementShareAndValidCount (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the share count and the valid count
+ within the PFN element
+ for the specified physical page. If the share count becomes
+ zero the corresponding PTE is coverted to the transition state
+ and the reference count is decremented.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number of which to decrement
+ the share count.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ ASSERT ((PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex > 0));
+
+ ASSERT (Pfn1->u2.ShareCount != 0);
+
+ Pfn1->u2.ShareCount -= 1;
+
+ ASSERT (Pfn1->u2.ShareCount < (ULONG)0xF000000);
+
+ if (Pfn1->u2.ShareCount == 0) {
+
+ //
+ // The share count is now zero, decrement the reference count
+ // for the PFN element and turn the referenced PTE into
+ // the transition state if it refers to a prototype PTE.
+ // PTEs which are not prototype PTE do not need to be placed
+ // into transition as they are placed in transition when
+ // they are removed from the working set (working set free routine).
+ //
+
+ //
+ // If the PTE referenced by this PFN element is actually
+ // a prototype PTE, it must be mapped into hyperspace and
+ // then operated on.
+ //
+
+ if (Pfn1->u3.e1.PrototypePte) {
+
+ OldIrql = 99;
+ if (MmIsAddressValid (Pfn1->PteAddress)) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The address is not valid in this process, map it into
+ // hyperspace so it can be operated upon.
+ //
+
+ PointerPte = (PMMPTE)MiMapPageInHyperSpace(Pfn1->PteFrame,
+ &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ TempPte = *PointerPte;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn1->OriginalPte.u.Soft.Protection);
+ *PointerPte = TempPte;
+
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql);
+ }
+
+ //
+ // There is no need to flush the translation buffer at this
+ // time as we only invalidated a prototytpe PTE.
+ //
+
+ }
+
+ //
+ // Change the page location to inactive (from active and valid).
+ //
+
+ Pfn1->u3.e1.PageLocation = TransitionPage;
+
+ //
+ // Decrement the reference count as the share count is now zero.
+ //
+
+ KdPrint(("MM:shareandvalid decremented share to 0 pteframe = %lx\n",
+ Pfn1->PteFrame));
+
+ MiDecrementReferenceCount (PageFrameIndex);
+ }
+
+ return;
+}
+#endif // 0
+
+VOID
+FASTCALL
+MiDecrementReferenceCount (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine decrements the reference count for the specified page.
+ If the reference count becomes zero, the page is placed on the
+ appropriate list (free, modified, standby or bad). If the page
+ is placed on the free or standby list, the number of available
+ pages is incremented and if it transitions from zero to one, the
+ available page event is set.
+
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number of which to
+ decrement the reference count.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT ((PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex > 0));
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn1->u3.e2.ReferenceCount != 0);
+ Pfn1->u3.e2.ReferenceCount -= 1;
+
+
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+
+ //
+ // The reference count is not zero, return.
+ //
+
+ return;
+ }
+
+ //
+ // The reference count is now zero, put the page on some
+ // list.
+ //
+
+
+ if (Pfn1->u2.ShareCount != 0) {
+
+ KeBugCheckEx (PFN_LIST_CORRUPT,
+ 7,
+ PageFrameIndex,
+ Pfn1->u2.ShareCount,
+ 0);
+ return;
+ }
+
+ ASSERT (Pfn1->u3.e1.PageLocation != ActiveAndValid);
+
+#ifdef PARITY
+ if (Pfn1->u3.e1.ParityError == 1) {
+
+ //
+ // This page has parity (ECC) errors, put it on the
+ // bad page list.
+ //
+
+ MiInsertPageInList (MmPageLocationList[BadPageList], PageFrameIndex);
+ return;
+ }
+#endif
+
+ if (MI_IS_PFN_DELETED (Pfn1)) {
+
+ //
+ // There is no referenced PTE for this page, delete
+ // the page file space, if any, and place
+ // the page on the free list.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ return;
+ }
+
+ //
+ // Place the page on the modified or standby list depending
+ // on the state of the modify bit in the PFN element.
+ //
+
+ if (Pfn1->u3.e1.Modified == 1) {
+ MiInsertPageInList (MmPageLocationList[ModifiedPageList], PageFrameIndex);
+ } else {
+ if (!MmFrontOfList) {
+ MiInsertPageInList (MmPageLocationList[StandbyPageList],
+ PageFrameIndex);
+ } else {
+ MiInsertStandbyListAtFront (PageFrameIndex);
+ }
+ }
+
+ return;
+}
diff --git a/private/ntos/mm/pfnlist.c b/private/ntos/mm/pfnlist.c
new file mode 100644
index 000000000..b2e1daeaf
--- /dev/null
+++ b/private/ntos/mm/pfnlist.c
@@ -0,0 +1,1707 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ pfnlist.c
+
+Abstract:
+
+ This module contains the routines to manipulate pages on the
+ within the Page Frame Database.
+
+Author:
+
+ Lou Perazzoli (loup) 4-Apr-1989
+
+Revision History:
+
+--*/
+#include "mi.h"
+
+#define MM_LOW_LIMIT 2
+#define MM_HIGH_LIMIT 19
+
+KEVENT MmAvailablePagesEventHigh;
+
+extern ULONG MmPeakCommitment;
+
+extern ULONG MmExtendedCommit;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ULONG MmColorSearch;
+#endif
+
+#if DBG
+VOID
+MiMemoryUsage (VOID);
+
+VOID
+MiDumpReferencedPages (VOID);
+
+#endif //DBG
+
+ULONG
+MiCompressPage (
+ IN PVOID Page
+ );
+
+
+#pragma alloc_text(PAGELK,MiUnlinkFreeOrZeroedPage)
+
+VOID
+MiRemovePageByColor (
+ IN ULONG Page,
+ IN ULONG PageColor
+ );
+
+
+VOID
+FASTCALL
+MiInsertPageInList (
+ IN PMMPFNLIST ListHead,
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure inserts a page at the end of the specified list (free,
+ standby, bad, zeroed, modified).
+
+
+Arguments:
+
+ ListHead - Supplies the list of the list in which to insert the
+ specified physical page.
+
+ PageFrameIndex - Supplies the physical page number to insert in the
+ list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG last;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG Color;
+ ULONG PrimaryColor;
+
+ MM_PFN_LOCK_ASSERT();
+ ASSERT ((PageFrameIndex != 0) && (PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex >= MmLowestPhysicalPage));
+
+ //
+ // Check to ensure the reference count for the page
+ // is zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+
+#if DBG
+ if (MmDebug & MM_DBG_PAGE_REF_COUNT) {
+
+ PMMPTE PointerPte;
+ KIRQL OldIrql = 99;
+
+ if ((ListHead->ListName == StandbyPageList) ||
+ (ListHead->ListName == ModifiedPageList)) {
+
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (MmIsAddressValid (Pfn1->PteAddress))) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The page containing the prototype PTE is not valid,
+ // map the page into hyperspace and reference it that way.
+ //
+
+ PointerPte = MiMapPageInHyperSpace (Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ ASSERT ((PointerPte->u.Trans.PageFrameNumber == PageFrameIndex) ||
+ (PointerPte->u.Hard.PageFrameNumber == PageFrameIndex));
+ ASSERT (PointerPte->u.Soft.Transition == 1);
+ ASSERT (PointerPte->u.Soft.Prototype == 0);
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql)
+ }
+ }
+ }
+#endif //DBG
+
+#if DBG
+ if ((ListHead->ListName == StandbyPageList) ||
+ (ListHead->ListName == ModifiedPageList)) {
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)) {
+ KeBugCheckEx (MEMORY_MANAGEMENT, 0x8888, 0,0,0);
+ }
+ }
+#endif //DBG
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+
+ ListHead->Total += 1; // One more page on the list.
+
+ //
+ // On MIPS R4000 modified pages destined for the paging file are
+ // kept on sperate lists which group pages of the same color
+ // together
+ //
+
+ if ((ListHead == &MmModifiedPageListHead) &&
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)) {
+
+ //
+ // This page is destined for the paging file (not
+ // a mapped file). Change the list head to the
+ // appropriate colored list head.
+ //
+
+ ListHead = &MmModifiedPageListByColor [Pfn1->u3.e1.PageColor];
+ ListHead->Total += 1;
+ MmTotalPagesForPagingFile += 1;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ if (ListHead->ListName <= FreePageList) {
+ ListHead = &MmFreePagesByPrimaryColor [ListHead->ListName] [Pfn1->u3.e1.PageColor];
+ }
+
+ if (ListHead == &MmStandbyPageListHead) {
+ ListHead = &MmStandbyPageListByColor [Pfn1->u3.e1.PageColor];
+ ListHead->Total += 1;
+ }
+#endif // > 1
+
+ last = ListHead->Blink;
+ if (last == MM_EMPTY_LIST) {
+
+ //
+ // List is empty add the page to the ListHead.
+ //
+
+ ListHead->Flink = PageFrameIndex;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT (last);
+ Pfn2->u1.Flink = PageFrameIndex;
+ }
+
+ ListHead->Blink = PageFrameIndex;
+ Pfn1->u1.Flink = MM_EMPTY_LIST;
+ Pfn1->u2.Blink = last;
+ Pfn1->u3.e1.PageLocation = ListHead->ListName;
+
+ //
+ // If the page was placed on the free, standby or zeroed list,
+ // update the count of usable pages in the system. If the count
+ // transitions from 0 to 1, the event associated with available
+ // pages should become true.
+ //
+
+ if (ListHead->ListName <= StandbyPageList) {
+ MmAvailablePages += 1;
+
+ //
+ // A page has just become available, check to see if the
+ // page wait events should be signalled.
+ //
+
+ if (MmAvailablePages == MM_LOW_LIMIT) {
+ KeSetEvent (&MmAvailablePagesEvent, 0, FALSE);
+ } else if (MmAvailablePages == MM_HIGH_LIMIT) {
+ KeSetEvent (&MmAvailablePagesEventHigh, 0, FALSE);
+ }
+
+ if (ListHead->ListName <= FreePageList) {
+
+ //
+ // We are adding a page to the free or zeroed page list.
+ // Add the page to the end of the correct colored page list.
+ //
+
+ Color = MI_GET_SECONDARY_COLOR (PageFrameIndex, Pfn1);
+ ASSERT (Pfn1->u3.e1.PageColor == MI_GET_COLOR_FROM_SECONDARY(Color));
+
+ if (MmFreePagesByColor[ListHead->ListName][Color].Flink ==
+ MM_EMPTY_LIST) {
+
+ //
+ // This list is empty, add this as the first and last
+ // entry.
+ //
+
+ MmFreePagesByColor[ListHead->ListName][Color].Flink =
+ PageFrameIndex;
+ MmFreePagesByColor[ListHead->ListName][Color].Blink =
+ (PVOID)Pfn1;
+ } else {
+ Pfn2 = (PMMPFN)MmFreePagesByColor[ListHead->ListName][Color].Blink;
+ Pfn2->OriginalPte.u.Long = PageFrameIndex;
+ MmFreePagesByColor[ListHead->ListName][Color].Blink = (PVOID)Pfn1;
+ }
+ Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST;
+ }
+
+ if ((ListHead->ListName == FreePageList) &&
+ (MmFreePageListHead.Total >= MmMinimumFreePagesToZero) &&
+ (MmZeroingPageThreadActive == FALSE)) {
+
+ //
+ // There are enough pages on the free list, start
+ // the zeroing page thread.
+ //
+
+ MmZeroingPageThreadActive = TRUE;
+ KeSetEvent (&MmZeroingPageEvent, 0, FALSE);
+ }
+ return;
+ }
+
+ //
+ // Check to see if their are too many modified pages.
+ //
+
+ if (ListHead->ListName == ModifiedPageList) {
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 0) {
+ ASSERT (Pfn1->OriginalPte.u.Soft.PageFileHigh == 0);
+ }
+ PsGetCurrentProcess()->ModifiedPageCount += 1;
+ if (MmModifiedPageListHead.Total >= MmModifiedPageMaximum ) {
+
+ //
+ // Start the modified page writer.
+ //
+
+ KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE);
+ }
+ }
+
+ return;
+}
+
+
+VOID
+FASTCALL
+MiInsertStandbyListAtFront (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure inserts a page at the front of the standby list.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to insert in the
+ list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG first;
+ IN PMMPFNLIST ListHead;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ KIRQL OldIrql;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+
+ MM_PFN_LOCK_ASSERT();
+ ASSERT ((PageFrameIndex != 0) && (PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex >= MmLowestPhysicalPage));
+
+ //
+ // Check to ensure the reference count for the page
+ // is zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+
+#if DBG
+ if (MmDebug & MM_DBG_PAGE_REF_COUNT) {
+
+ PMMPTE PointerPte;
+ KIRQL OldIrql = 99;
+
+ if ((Pfn1->u3.e1.PrototypePte == 1) &&
+ (MmIsAddressValid (Pfn1->PteAddress))) {
+ PointerPte = Pfn1->PteAddress;
+ } else {
+
+ //
+ // The page containing the prototype PTE is not valid,
+ // map the page into hyperspace and reference it that way.
+ //
+
+ PointerPte = MiMapPageInHyperSpace (Pfn1->PteFrame, &OldIrql);
+ PointerPte = (PMMPTE)((ULONG)PointerPte +
+ MiGetByteOffset(Pfn1->PteAddress));
+ }
+
+ ASSERT ((PointerPte->u.Trans.PageFrameNumber == PageFrameIndex) ||
+ (PointerPte->u.Hard.PageFrameNumber == PageFrameIndex));
+ ASSERT (PointerPte->u.Soft.Transition == 1);
+ ASSERT (PointerPte->u.Soft.Prototype == 0);
+ if (OldIrql != 99) {
+ MiUnmapPageInHyperSpace (OldIrql)
+ }
+ }
+#endif //DBG
+
+#if DBG
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)) {
+ KeBugCheckEx (MEMORY_MANAGEMENT, 0x8889, 0,0,0);
+ }
+#endif //DBG
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u3.e1.PrototypePte == 1);
+
+ MmStandbyPageListHead.Total += 1; // One more page on the list.
+
+ ListHead = &MmStandbyPageListHead;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+
+ ListHead = &MmStandbyPageListByColor [Pfn1->u3.e1.PageColor];
+ ListHead->Total += 1;
+#endif // > 1
+
+ first = ListHead->Flink;
+ if (first == MM_EMPTY_LIST) {
+
+ //
+ // List is empty add the page to the ListHead.
+ //
+
+ ListHead->Blink = PageFrameIndex;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT (first);
+ Pfn2->u2.Blink = PageFrameIndex;
+ }
+
+ ListHead->Flink = PageFrameIndex;
+ Pfn1->u2.Blink = MM_EMPTY_LIST;
+ Pfn1->u1.Flink = first;
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+
+ //
+ // If the page was placed on the free, standby or zeroed list,
+ // update the count of usable pages in the system. If the count
+ // transitions from 0 to 1, the event associated with available
+ // pages should become true.
+ //
+
+ MmAvailablePages += 1;
+
+ //
+ // A page has just become available, check to see if the
+ // page wait events should be signalled.
+ //
+
+ if (MmAvailablePages == MM_LOW_LIMIT) {
+ KeSetEvent (&MmAvailablePagesEvent, 0, FALSE);
+ } else if (MmAvailablePages == MM_HIGH_LIMIT) {
+ KeSetEvent (&MmAvailablePagesEventHigh, 0, FALSE);
+ }
+ return;
+}
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemovePageFromList (
+ IN PMMPFNLIST ListHead
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a page from the head of the specified list (free,
+ standby, zeroed, modified). Note, that is makes no sense to remove
+ a page from the head of the bad list.
+
+ This routine clears the flags word in the PFN database, hence the
+ PFN information for this page must be initialized.
+
+Arguments:
+
+ ListHead - Supplies the list of the list in which to remove the
+ specified physical page.
+
+Return Value:
+
+ The physical page number removed from the specified list.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG Color;
+
+ MM_PFN_LOCK_ASSERT();
+
+ //
+ // If the specified list is empty return MM_EMPTY_LIST.
+ //
+
+ if (ListHead->Total == 0) {
+
+ KdPrint(("MM:Attempting to remove page from empty list\n"));
+ KeBugCheckEx (PFN_LIST_CORRUPT, 1, (ULONG)ListHead, MmAvailablePages, 0);
+ return 0;
+ }
+
+ ASSERT (ListHead->ListName != ModifiedPageList);
+
+ //
+ // Decrement the count of pages on the list and remove the first
+ // page from the list.
+ //
+
+ ListHead->Total -= 1;
+ PageFrameIndex = ListHead->Flink;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ListHead->Flink = Pfn1->u1.Flink;
+
+ //
+ // Zero the flink and blink in the pfn database element.
+ //
+
+ Pfn1->u1.Flink = 0;
+ Pfn1->u2.Blink = 0;
+
+ //
+ // If the last page was removed (the ListHead->Flink is now
+ // MM_EMPTY_LIST) make the listhead->Blink MM_EMPTY_LIST as well.
+ //
+
+ if (ListHead->Flink == MM_EMPTY_LIST) {
+ ListHead->Blink = MM_EMPTY_LIST;
+ } else {
+
+ //
+ // Make the PFN element point to MM_EMPTY_LIST signifying this
+ // is the last page in the list.
+ //
+
+ Pfn2 = MI_PFN_ELEMENT (ListHead->Flink);
+ Pfn2->u2.Blink = MM_EMPTY_LIST;
+ }
+
+ //
+ // Check to see if we now have one less page available.
+ //
+
+ if (ListHead->ListName <= StandbyPageList) {
+ MmAvailablePages -= 1;
+
+ if (ListHead->ListName == StandbyPageList) {
+
+ //
+ // This page is currently in transition, restore the PTE to
+ // its original contents so this page can be reused.
+ //
+
+ MiRestoreTransitionPte (PageFrameIndex);
+ }
+
+ if (MmAvailablePages < MmMinimumFreePages) {
+
+ //
+ // Obtain free pages.
+ //
+
+ MiObtainFreePages();
+ }
+ }
+
+ ASSERT ((PageFrameIndex != 0) &&
+ (PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex >= MmLowestPhysicalPage));
+
+ //
+ // Zero the PFN flags longword.
+ //
+
+ Color = Pfn1->u3.e1.PageColor;
+ Pfn1->u3.e2.ShortFlags = 0;
+ Pfn1->u3.e1.PageColor = Color;
+ Color = MI_GET_SECONDARY_COLOR (PageFrameIndex, Pfn1);
+
+ if (ListHead->ListName <= FreePageList) {
+
+ //
+ // Update the color lists.
+ //
+
+ ASSERT (MmFreePagesByColor[ListHead->ListName][Color].Flink == PageFrameIndex);
+ MmFreePagesByColor[ListHead->ListName][Color].Flink =
+ Pfn1->OriginalPte.u.Long;
+ }
+
+ return PageFrameIndex;
+}
+
+VOID
+FASTCALL
+MiUnlinkPageFromList (
+ IN PMMPFN Pfn
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a page from the middle of a list. This is
+ designed for the faulting of transition pages from the standby and
+ modified list and making the active and valid again.
+
+Arguments:
+
+ Pfn - Supplies a pointer to the PFN database element for the physical
+ page to remove from the list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPFNLIST ListHead;
+ ULONG Previous;
+ ULONG Next;
+ PMMPFN Pfn2;
+
+ MM_PFN_LOCK_ASSERT();
+
+ //
+ // Page not on standby or modified list, check to see if the
+ // page is currently being written by the modified page
+ // writer, if so, just return this page. The reference
+ // count for the page will be incremented, so when the modified
+ // page write completes, the page will not be put back on
+ // the list, rather, it will remain active and valid.
+ //
+
+ if (Pfn->u3.e2.ReferenceCount > 0) {
+
+ //
+ // The page was not on any "transition lists", check to see
+ // if this is has I/O in progress.
+ //
+
+ if (Pfn->u2.ShareCount == 0) {
+#if DBG
+ if (MmDebug & MM_DBG_PAGE_IN_LIST) {
+ DbgPrint("unlinking page not in list...\n");
+ MiFormatPfn(Pfn);
+ }
+#endif
+ return;
+ }
+ KdPrint(("MM:attempt to remove page from wrong page list\n"));
+ KeBugCheckEx (PFN_LIST_CORRUPT,
+ 2,
+ Pfn - MmPfnDatabase,
+ MmHighestPhysicalPage,
+ Pfn->u3.e2.ReferenceCount);
+ return;
+ }
+
+ ListHead = MmPageLocationList[Pfn->u3.e1.PageLocation];
+
+ //
+ // On MIPS R4000 modified pages destined for the paging file are
+ // kept on sperate lists which group pages of the same color
+ // together
+ //
+
+ if ((ListHead == &MmModifiedPageListHead) &&
+ (Pfn->OriginalPte.u.Soft.Prototype == 0)) {
+
+ //
+ // This page is destined for the paging file (not
+ // a mapped file). Change the list head to the
+ // appropriate colored list head.
+ //
+
+ ListHead->Total -= 1;
+ MmTotalPagesForPagingFile -= 1;
+ ListHead = &MmModifiedPageListByColor [Pfn->u3.e1.PageColor];
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ if (ListHead == &MmStandbyPageListHead) {
+
+ //
+ // This page is destined for the paging file (not
+ // a mapped file). Change the list head to the
+ // appropriate colored list head.
+ //
+
+ ListHead->Total -= 1;
+ ListHead = &MmStandbyPageListByColor [Pfn->u3.e1.PageColor];
+ }
+#endif //MM_MAXIMUM_NUMBER_OF_COLORS > 1
+
+ ASSERT (Pfn->u3.e1.WriteInProgress == 0);
+ ASSERT (Pfn->u3.e1.ReadInProgress == 0);
+ ASSERT (ListHead->Total != 0);
+
+ Next = Pfn->u1.Flink;
+ Pfn->u1.Flink = 0;
+ Previous = Pfn->u2.Blink;
+ Pfn->u2.Blink = 0;
+
+ if (Next == MM_EMPTY_LIST) {
+ ListHead->Blink = Previous;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Next);
+ Pfn2->u2.Blink = Previous;
+ }
+
+ if (Previous == MM_EMPTY_LIST) {
+ ListHead->Flink = Next;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Previous);
+ Pfn2->u1.Flink = Next;
+ }
+
+ ListHead->Total -= 1;
+
+ //
+ // Check to see if we now have one less page available.
+ //
+
+ if (ListHead->ListName <= StandbyPageList) {
+ MmAvailablePages -= 1;
+
+ if (MmAvailablePages < MmMinimumFreePages) {
+
+ //
+ // Obtain free pages.
+ //
+
+ MiObtainFreePages();
+
+ }
+ }
+
+ return;
+}
+
+VOID
+MiUnlinkFreeOrZeroedPage (
+ IN ULONG Page
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a page from the middle of a list. This is
+ designed for the faulting of transition pages from the standby and
+ modified list and making the active and valid again.
+
+Arguments:
+
+ Pfn - Supplies a pointer to the PFN database element for the physical
+ page to remove from the list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPFNLIST ListHead;
+ ULONG Previous;
+ ULONG Next;
+ PMMPFN Pfn2;
+ PMMPFN Pfn;
+ ULONG Color;
+
+ Pfn = MI_PFN_ELEMENT (Page);
+
+ MM_PFN_LOCK_ASSERT();
+
+ ListHead = MmPageLocationList[Pfn->u3.e1.PageLocation];
+ ASSERT (ListHead->Total != 0);
+ ListHead->Total -= 1;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ ListHead = &MmFreePagesByPrimaryColor [ListHead->ListName] [Pfn->u3.e1.PageColor];
+#endif
+
+ ASSERT (ListHead->ListName <= FreePageList);
+ ASSERT (Pfn->u3.e1.WriteInProgress == 0);
+ ASSERT (Pfn->u3.e1.ReadInProgress == 0);
+
+ Next = Pfn->u1.Flink;
+ Pfn->u1.Flink = 0;
+ Previous = Pfn->u2.Blink;
+ Pfn->u2.Blink = 0;
+
+ if (Next == MM_EMPTY_LIST) {
+ ListHead->Blink = Previous;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Next);
+ Pfn2->u2.Blink = Previous;
+ }
+
+ if (Previous == MM_EMPTY_LIST) {
+ ListHead->Flink = Next;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Previous);
+ Pfn2->u1.Flink = Next;
+ }
+
+ //
+ // We are removing a page from the middle of the free or zeroed page list.
+ // The secondary color tables must be updated at this time.
+ //
+
+ Color = MI_GET_SECONDARY_COLOR (Page, Pfn);
+ ASSERT (Pfn->u3.e1.PageColor == MI_GET_COLOR_FROM_SECONDARY(Color));
+
+ //
+ // Walk down the list and remove the page.
+ //
+
+ Next = MmFreePagesByColor[ListHead->ListName][Color].Flink;
+ if (Next == Page) {
+ MmFreePagesByColor[ListHead->ListName][Color].Flink =
+ Pfn->OriginalPte.u.Long;
+ } else {
+
+ //
+ // Walk the list to find the parent.
+ //
+
+ for (; ; ) {
+ Pfn2 = MI_PFN_ELEMENT (Next);
+ Next = Pfn2->OriginalPte.u.Long;
+ if (Page == Next) {
+ Pfn2->OriginalPte.u.Long = Pfn->OriginalPte.u.Long;
+ if (Pfn->OriginalPte.u.Long == MM_EMPTY_LIST) {
+ MmFreePagesByColor[ListHead->ListName][Color].Blink = Pfn2;
+ }
+ break;
+ }
+ }
+ }
+
+ MmAvailablePages -= 1;
+ return;
+}
+
+
+
+ULONG
+FASTCALL
+MiEnsureAvailablePageOrWait (
+ IN PEPROCESS Process,
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure ensures that a physical page is available on
+ the zeroed, free or standby list such that the next call the remove a
+ page absolutely will not block. This is necessary as blocking would
+ require a wait which could cause a deadlock condition.
+
+ If a page is available the function returns immediately with a value
+ of FALSE indicating no wait operation was performed. If no physical
+ page is available, the thread inters a wait state and the function
+ returns the value TRUE when the wait operation completes.
+
+Arguments:
+
+ Process - Supplies a pointer to the current process if, and only if,
+ the working set mutex is held currently held and should
+ be released if a wait operation is issued. Supplies
+ the value NULL otherwise.
+
+ VirtualAddress - Supplies the virtual address for the faulting page.
+ If the value is NULL, the page is treated as a
+ user mode address.
+
+Return Value:
+
+ FALSE - if a page was immediately available.
+ TRUE - if a wait operation occurred before a page became available.
+
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PVOID Event;
+ NTSTATUS Status;
+ KIRQL OldIrql;
+ KIRQL Ignore;
+ ULONG Limit;
+ ULONG Relock;
+
+ MM_PFN_LOCK_ASSERT();
+
+ if (MmAvailablePages >= MM_HIGH_LIMIT) {
+
+ //
+ // Pages are available.
+ //
+
+ return FALSE;
+ }
+
+ //
+ // If this fault is for paged pool (or pageable kernel space,
+ // including page table pages), let it use the last page.
+ //
+
+ if (((PMMPTE)VirtualAddress > MiGetPteAddress(HYPER_SPACE)) ||
+ ((VirtualAddress > MM_HIGHEST_USER_ADDRESS) &&
+ (VirtualAddress < (PVOID)PTE_BASE))) {
+
+ //
+ // This fault is in the system, use 1 page as the limit.
+ //
+
+ if (MmAvailablePages >= MM_LOW_LIMIT) {
+
+ //
+ // Pages are available.
+ //
+
+ return FALSE;
+ }
+ Limit = MM_LOW_LIMIT;
+ Event = (PVOID)&MmAvailablePagesEvent;
+ } else {
+ Limit = MM_HIGH_LIMIT;
+ Event = (PVOID)&MmAvailablePagesEventHigh;
+ }
+
+ while (MmAvailablePages < Limit) {
+ KeClearEvent ((PKEVENT)Event);
+ UNLOCK_PFN (APC_LEVEL);
+
+ if (Process != NULL) {
+ UNLOCK_WS (Process);
+ } else {
+ Relock = FALSE;
+ if (MmSystemLockOwner == PsGetCurrentThread()) {
+ UNLOCK_SYSTEM_WS (APC_LEVEL);
+ Relock = TRUE;
+ }
+ }
+
+ //
+ // Wait for ALL the objects to become available.
+ //
+
+ //
+ // Wait for 7 minutes then bugcheck.
+ //
+
+ Status = KeWaitForSingleObject(Event,
+ WrFreePage,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)&MmSevenMinutes);
+
+ if (Status == STATUS_TIMEOUT) {
+ KeBugCheckEx (NO_PAGES_AVAILABLE,
+ MmModifiedPageListHead.Total,
+ MmNumberOfPhysicalPages,
+ MmExtendedCommit,
+ MmTotalCommittedPages);
+ return TRUE;
+ }
+
+ if (Process != NULL) {
+ LOCK_WS (Process);
+ } else {
+ if (Relock) {
+ LOCK_SYSTEM_WS (Ignore);
+ }
+ }
+
+ LOCK_PFN (OldIrql);
+ }
+
+ return TRUE;
+}
+
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemoveZeroPage (
+ IN ULONG PageColor
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a zero page from either the zeroed, free
+ or standby lists (in that order). If no pages exist on the zeroed
+ or free list a transition page is removed from the standby list
+ and the PTE (may be a prototype PTE) which refers to this page is
+ changed from transition back to its original contents.
+
+ If the page is not obtained from the zeroed list, it is zeroed.
+
+ Note, if no pages exist to satisfy this request an exception is
+ raised.
+
+Arguments:
+
+ PageColor - Supplies the page color for which this page is destined.
+ This is used for checking virtual address aligments to
+ determine if the D cache needs flushing before the page
+ can be reused.
+
+Return Value:
+
+ The physical page number removed from the specified list.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG Page;
+ PMMPFN Pfn1;
+ ULONG i;
+ ULONG Color;
+ ULONG PrimaryColor;
+ PMMCOLOR_TABLES ColorTable;
+
+ MM_PFN_LOCK_ASSERT();
+ ASSERT(MmAvailablePages != 0);
+
+ //
+ // Attempt to remove a page from the zeroed page list. If a page
+ // is available, then remove it and return its page frame index.
+ // Otherwise, attempt to remove a page from the free page list or
+ // the standby list.
+ //
+ // N.B. It is not necessary to change page colors even if the old
+ // color is not equal to the new color. The zero page thread
+ // ensures that all zeroed pages are removed from all caches.
+ //
+
+ if (MmFreePagesByColor[ZeroedPageList][PageColor].Flink != MM_EMPTY_LIST) {
+
+ //
+ // Remove the first entry on the zeroed by color list.
+ //
+
+ Page = MmFreePagesByColor[ZeroedPageList][PageColor].Flink;
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == ZeroedPageList);
+#endif //DBG
+
+ MiRemovePageByColor (Page, PageColor);
+
+#if DBG
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+
+ } else {
+
+ //
+ // No color with the specified color exits, try a zeroed
+ // page of the primary color.
+ //
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ PrimaryColor = MI_GET_COLOR_FROM_SECONDARY(PageColor);
+ if (MmFreePagesByPrimaryColor[ZeroedPageList][PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MmFreePagesByPrimaryColor[ZeroedPageList][PrimaryColor].Flink;
+#else
+ if (MmZeroedPageListHead.Flink != MM_EMPTY_LIST) {
+ Page = MmZeroedPageListHead.Flink;
+#endif
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == ZeroedPageList);
+#endif //DBG
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+ MiRemovePageByColor (Page, Color);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+ }
+ //
+ // No zeroed page at the right color exist, try a free page of the
+ // secondary color.
+ //
+
+ if (MmFreePagesByColor[FreePageList][PageColor].Flink != MM_EMPTY_LIST) {
+
+ //
+ // Remove the first entry on the free list by color.
+ //
+
+ Page = MmFreePagesByColor[FreePageList][PageColor].Flink;
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == FreePageList);
+#endif //DBG
+
+ MiRemovePageByColor (Page, PageColor);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ goto ZeroPage;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ if (MmFreePagesByPrimaryColor[FreePageList][PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MmFreePagesByPrimaryColor[FreePageList][PrimaryColor].Flink;
+#else
+ if (MmFreePageListHead.Flink != MM_EMPTY_LIST) {
+ Page = MmFreePageListHead.Flink;
+#endif
+
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+ MiRemovePageByColor (Page, Color);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ goto ZeroPage;
+ }
+ }
+
+#if MM_NUMBER_OF_COLORS < 2
+ ASSERT (MmZeroedPageListHead.Total == 0);
+ ASSERT (MmFreePageListHead.Total == 0);
+#endif //NUMBER_OF_COLORS
+
+ if (MmZeroedPageListHead.Total != 0) {
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ Page = MmFreePagesByPrimaryColor[ZeroedPageList][MmColorSearch].Flink;
+ if (Page != MM_EMPTY_LIST) {
+ break;
+ }
+ }
+ ASSERT (Page != MM_EMPTY_LIST);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == ZeroedPageList);
+#endif //DBG
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+ MiRemovePageByColor (Page, Color);
+#else
+ Page = MiRemovePageFromList(&MmZeroedPageListHead);
+#endif
+
+ MI_CHECK_PAGE_ALIGNMENT(Page, PageColor & MM_COLOR_MASK);
+
+ } else {
+
+ //
+ // Attempt to remove a page from the free list. If a page is
+ // available, then remove it. Otherwise, attempt to remove a
+ // page from the standby list.
+ //
+
+ if (MmFreePageListHead.Total != 0) {
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ Page = MmFreePagesByPrimaryColor[FreePageList][MmColorSearch].Flink;
+ if (Page != MM_EMPTY_LIST) {
+ break;
+ }
+ }
+ ASSERT (Page != MM_EMPTY_LIST);
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == FreePageList);
+#endif //DBG
+ MiRemovePageByColor (Page, Color);
+#else
+ Page = MiRemovePageFromList(&MmFreePageListHead);
+#endif
+
+ } else {
+
+ //
+ // Remove a page from the standby list and restore the original
+ // contents of the PTE to free the last reference to the physical
+ // page.
+ //
+
+ ASSERT (MmStandbyPageListHead.Total != 0);
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ if (MmStandbyPageListByColor[PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MiRemovePageFromList(&MmStandbyPageListByColor[PrimaryColor]);
+ } else {
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ if (MmStandbyPageListByColor[MmColorSearch].Flink != MM_EMPTY_LIST) {
+ Page = MiRemovePageFromList(&MmStandbyPageListByColor[MmColorSearch]);
+ break;
+ }
+ }
+ }
+ MmStandbyPageListHead.Total -= 1;
+#else
+ Page = MiRemovePageFromList(&MmStandbyPageListHead);
+#endif //MM_MAXIMUM_NUMBER_OF_COLORS > 1
+
+ }
+
+ //
+ // Zero the page removed from the free or standby list.
+ //
+
+ZeroPage:
+
+ Pfn1 = MI_PFN_ELEMENT(Page);
+#if defined(MIPS) || defined(_ALPHA_)
+ HalZeroPage((PVOID)((PageColor & MM_COLOR_MASK) << PAGE_SHIFT),
+ (PVOID)((ULONG)(Pfn1->u3.e1.PageColor) << PAGE_SHIFT),
+ Page);
+#elif defined(_PPC_)
+ KeZeroPage(Page);
+#else
+
+ MiZeroPhysicalPage (Page, 0);
+
+#endif //MIPS
+ Pfn1->u3.e1.PageColor = PageColor & MM_COLOR_MASK;
+
+ }
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT (Page);
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+
+ return Page;
+}
+
+ULONG //PageFrameIndex
+FASTCALL
+MiRemoveAnyPage (
+ IN ULONG PageColor
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a page from either the free, zeroed,
+ or standby lists (in that order). If no pages exist on the zeroed
+ or free list a transition page is removed from the standby list
+ and the PTE (may be a prototype PTE) which refers to this page is
+ changed from transition back to its original contents.
+
+ Note, if no pages exist to satisfy this request an exception is
+ raised.
+
+Arguments:
+
+ PageColor - Supplies the page color for which this page is destined.
+ This is used for checking virtual address aligments to
+ determine if the D cache needs flushing before the page
+ can be reused.
+
+Return Value:
+
+ The physical page number removed from the specified list.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG Page;
+ PMMPFN Pfn1;
+ ULONG PrimaryColor;
+ ULONG Color;
+ PMMCOLOR_TABLES ColorTable;
+ ULONG i;
+
+ MM_PFN_LOCK_ASSERT();
+ ASSERT(MmAvailablePages != 0);
+
+ //
+ // Check the free page list, and if a page is available
+ // remove it and return its value.
+ //
+
+ if (MmFreePagesByColor[FreePageList][PageColor].Flink != MM_EMPTY_LIST) {
+
+ //
+ // Remove the first entry on the free by color list.
+ //
+
+ Page = MmFreePagesByColor[FreePageList][PageColor].Flink;
+ MiRemovePageByColor (Page, PageColor);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+
+ } else if (MmFreePagesByColor[ZeroedPageList][PageColor].Flink
+ != MM_EMPTY_LIST) {
+
+ //
+ // Remove the first entry on the zeroed by color list.
+ //
+
+ Page = MmFreePagesByColor[ZeroedPageList][PageColor].Flink;
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e1.PageLocation == ZeroedPageList);
+#endif //DBG
+
+ MiRemovePageByColor (Page, PageColor);
+ return Page;
+ } else {
+
+ //
+ // Try the free page list by primary color.
+ //
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ PrimaryColor = MI_GET_COLOR_FROM_SECONDARY(PageColor);
+ if (MmFreePagesByPrimaryColor[FreePageList][PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MmFreePagesByPrimaryColor[FreePageList][PrimaryColor].Flink;
+#else
+ if (MmFreePageListHead.Flink != MM_EMPTY_LIST) {
+ Page = MmFreePageListHead.Flink;
+#endif
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e1.PageLocation == FreePageList);
+#endif //DBG
+ MiRemovePageByColor (Page, Color);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ } else if (MmFreePagesByPrimaryColor[ZeroedPageList][PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MmFreePagesByPrimaryColor[ZeroedPageList][PrimaryColor].Flink;
+#else
+ } else if (MmZeroedPageListHead.Flink != MM_EMPTY_LIST) {
+ Page = MmZeroedPageListHead.Flink;
+#endif
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+ MiRemovePageByColor (Page, Color);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageColor == (PageColor & MM_COLOR_MASK));
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+ }
+ }
+
+ if (MmFreePageListHead.Total != 0) {
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ Page = MmFreePagesByPrimaryColor[FreePageList][MmColorSearch].Flink;
+ if (Page != MM_EMPTY_LIST) {
+ break;
+ }
+ }
+ ASSERT (Page != MM_EMPTY_LIST);
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == FreePageList);
+#endif //DBG
+ MiRemovePageByColor (Page, Color);
+#else
+ Page = MiRemovePageFromList(&MmFreePageListHead);
+#endif
+
+ } else {
+
+ //
+ // Check the zeroed page list, and if a page is available
+ // remove it and return its value.
+ //
+
+ if (MmZeroedPageListHead.Total != 0) {
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ Page = MmFreePagesByPrimaryColor[ZeroedPageList][MmColorSearch].Flink;
+ if (Page != MM_EMPTY_LIST) {
+ break;
+ }
+ }
+ ASSERT (Page != MM_EMPTY_LIST);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT(Page);
+ ASSERT (Pfn1->u3.e1.PageLocation == ZeroedPageList);
+#endif //DBG
+ Color = MI_GET_SECONDARY_COLOR (Page, MI_PFN_ELEMENT(Page));
+ MiRemovePageByColor (Page, Color);
+#else
+ Page = MiRemovePageFromList(&MmZeroedPageListHead);
+#endif
+
+ } else {
+
+ //
+ // No pages exist on the free or zeroed list, use the
+ // standby list.
+ //
+
+ ASSERT(MmStandbyPageListHead.Total != 0);
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ if (MmStandbyPageListByColor[PrimaryColor].Flink != MM_EMPTY_LIST) {
+ Page = MiRemovePageFromList(&MmStandbyPageListByColor[PrimaryColor]);
+ } else {
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmColorSearch = (MmColorSearch + 1) & (MM_MAXIMUM_NUMBER_OF_COLORS - 1);
+ if (MmStandbyPageListByColor[MmColorSearch].Flink != MM_EMPTY_LIST) {
+ Page = MiRemovePageFromList(&MmStandbyPageListByColor[MmColorSearch]);
+ break;
+ }
+ }
+ }
+ MmStandbyPageListHead.Total -= 1;
+#else
+ Page = MiRemovePageFromList(&MmStandbyPageListHead);
+#endif //MM_MAXIMUM_NUMBER_OF_COLORS > 1
+
+ }
+ }
+
+ MI_CHECK_PAGE_ALIGNMENT(Page, PageColor & MM_COLOR_MASK);
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT (Page);
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+ ASSERT (Pfn1->u2.ShareCount == 0);
+#endif //DBG
+ return Page;
+}
+
+
+VOID
+MiRemovePageByColor (
+ IN ULONG Page,
+ IN ULONG Color
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure removes a page from the middle of the free or
+ zered page list.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to unlink from the
+ list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ PMMPFNLIST ListHead;
+ PMMPFNLIST PrimaryListHead;
+ ULONG Previous;
+ ULONG Next;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG PrimaryColor;
+
+ MM_PFN_LOCK_ASSERT();
+
+ Pfn1 = MI_PFN_ELEMENT (Page);
+ PrimaryColor = Pfn1->u3.e1.PageColor;
+
+ ListHead = MmPageLocationList[Pfn1->u3.e1.PageLocation];
+ ListHead->Total -= 1;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ PrimaryListHead =
+ &MmFreePagesByPrimaryColor[Pfn1->u3.e1.PageLocation][PrimaryColor];
+#else
+ PrimaryListHead = ListHead;
+#endif
+
+ Next = Pfn1->u1.Flink;
+ Pfn1->u1.Flink = 0;
+ Previous = Pfn1->u2.Blink;
+ Pfn1->u2.Blink = 0;
+
+ if (Next == MM_EMPTY_LIST) {
+ PrimaryListHead->Blink = Previous;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Next);
+ Pfn2->u2.Blink = Previous;
+ }
+
+ if (Previous == MM_EMPTY_LIST) {
+ PrimaryListHead->Flink = Next;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT(Previous);
+ Pfn2->u1.Flink = Next;
+ }
+
+ //
+ // Zero the flags longword, but keep the color information.
+ //
+
+ Pfn1->u3.e2.ShortFlags = 0;
+ Pfn1->u3.e1.PageColor = PrimaryColor;
+
+ //
+ // Update the color lists.
+ //
+
+ MmFreePagesByColor[ListHead->ListName][Color].Flink =
+ Pfn1->OriginalPte.u.Long;
+
+ //
+ // Note that we now have one less page available.
+ //
+
+ MmAvailablePages -= 1;
+
+ if (MmAvailablePages < MmMinimumFreePages) {
+
+ //
+ // Obtain free pages.
+ //
+
+ MiObtainFreePages();
+
+ }
+
+ return;
+}
+
+
+VOID
+FASTCALL
+MiInsertFrontModifiedNoWrite (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure inserts a page at the FRONT of the modified no
+ write list.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to insert in the
+ list.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Must be holding the PFN database mutex with APC's disabled.
+
+--*/
+
+{
+ ULONG first;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+
+ MM_PFN_LOCK_ASSERT();
+ ASSERT ((PageFrameIndex != 0) && (PageFrameIndex <= MmHighestPhysicalPage) &&
+ (PageFrameIndex >= MmLowestPhysicalPage));
+
+ //
+ // Check to ensure the reference count for the page
+ // is zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(PageFrameIndex);
+
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
+
+ MmModifiedNoWritePageListHead.Total += 1; // One more page on the list.
+
+ first = MmModifiedNoWritePageListHead.Flink;
+ if (first == MM_EMPTY_LIST) {
+
+ //
+ // List is empty add the page to the ListHead.
+ //
+
+ MmModifiedNoWritePageListHead.Blink = PageFrameIndex;
+ } else {
+ Pfn2 = MI_PFN_ELEMENT (first);
+ Pfn2->u2.Blink = PageFrameIndex;
+ }
+
+ MmModifiedNoWritePageListHead.Flink = PageFrameIndex;
+ Pfn1->u1.Flink = first;
+ Pfn1->u2.Blink = MM_EMPTY_LIST;
+ Pfn1->u3.e1.PageLocation = ModifiedNoWritePageList;
+ return;
+}
+
+
+#if 0
+PVOID MmCompressionWorkSpace;
+ULONG MmCompressionWorkSpaceSize;
+PCHAR MmCompressedBuffer;
+
+VOID
+MiInitializeCompression (VOID)
+{
+ NTSTATUS status;
+ ULONG Frag;
+
+ status = RtlGetCompressionWorkSpaceSize (COMPRESSION_FORMAT_LZNT1,
+ &MmCompressionWorkSpaceSize,
+ &Frag
+ );
+ ASSERT (NT_SUCCESS (status));
+ MmCompressionWorkSpace = ExAllocatePoolWithTag (NonPagedPool,
+ MmCompressionWorkSpaceSize,' mM');
+ MmCompressedBuffer = ExAllocatePoolWithTag (NonPagedPool, PAGE_SIZE,' mM');
+ return;
+}
+
+ULONG MmCompressionStats[(PAGE_SIZE/256) + 1];
+
+ULONG
+MiCompressPage (
+ IN PVOID Input
+ )
+
+{
+ ULONG Size;
+ NTSTATUS status;
+
+ status = RtlCompressBuffer (COMPRESSION_FORMAT_LZNT1,
+ (PCHAR)Input,
+ PAGE_SIZE,
+ MmCompressedBuffer,
+ PAGE_SIZE,
+ 4096,
+ &Size,
+ (PVOID)MmCompressionWorkSpace);
+ if (!NT_SUCCESS (status)) {
+ KdPrint(("MM:compress failed %lx\n",status));
+ MmCompressionStats[4096/256] += 1;
+ } else {
+ MmCompressionStats[Size/256] += 1;
+ }
+
+ return Size;
+}
+#endif //0
diff --git a/private/ntos/mm/ppc/datappc.c b/private/ntos/mm/ppc/datappc.c
new file mode 100644
index 000000000..bc7946fc5
--- /dev/null
+++ b/private/ntos/mm/ppc/datappc.c
@@ -0,0 +1,136 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ datappc.c
+
+Abstract:
+
+ This module contains the private hardware specific global storage for
+ the memory management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 27-Mar-1990
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-93
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// A zero Pte.
+//
+
+MMPTE ZeroPte = { 0 };
+
+
+//
+// A kernel zero PTE.
+//
+
+MMPTE ZeroKernelPte = { 0 };
+
+
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidUserPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidPtePte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidPdePde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidKernelPde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE DemandZeroPde = { MM_READWRITE << 3 };
+
+
+MMPTE DemandZeroPte = { MM_READWRITE << 3 };
+
+
+MMPTE TransitionPde = { MM_PTE_TRANSITION_MASK | (MM_READWRITE << 3) };
+
+
+MMPTE PrototypePte = { 0xFFFFF000 | (MM_READWRITE << 3) | MM_PTE_PROTOTYPE_MASK };
+
+
+//
+// PTE which generates an access violation when referenced.
+//
+
+MMPTE NoAccessPte = {MM_NOACCESS << 3};
+
+
+//
+// Pool start and end.
+//
+
+PVOID MmNonPagedPoolStart;
+
+PVOID MmNonPagedPoolEnd = ((PVOID)MM_NONPAGED_POOL_END);
+
+PVOID MmPagedPoolStart = ((PVOID)MM_PAGED_POOL_START);
+
+PVOID MmPagedPoolEnd;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+
+MMPFNLIST MmStandbyPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST
+ };
+
+
+#endif
+
+PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+
+//
+// Color tables for modified pages destined for the paging file.
+//
+
+MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST};
+
+ULONG MmSecondaryColorMask;
+
+//
+// Count of the number of modified pages destined for the paging file.
+//
+
+ULONG MmTotalPagesForPagingFile = 0;
+
+//
+// PTE reserved for mapping physical data for debugger.
+// Use 1 page from last 4MB of virtual address space
+// reserved for the HAL.
+//
+
+PMMPTE MmDebugPte = (MiGetPteAddress((PVOID)MM_HAL_RESERVED));
+
+
+//
+// 16 PTEs reserved for mapping MDLs (64k max).
+//
+
+PMMPTE MmCrashDumpPte = (MiGetPteAddress((PVOID)MM_HAL_RESERVED));
+
diff --git a/private/ntos/mm/ppc/debugsup.c b/private/ntos/mm/ppc/debugsup.c
new file mode 100644
index 000000000..bd71d496a
--- /dev/null
+++ b/private/ntos/mm/ppc/debugsup.c
@@ -0,0 +1,199 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ debugsup.c
+
+Abstract:
+
+ This module contains routines which provide support for the
+ kernel debugger.
+
+Author:
+
+ Lou Perazzoli (loup) 02-Aug-90
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-93
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PVOID
+MmDbgReadCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for read access.
+
+ The address may be within the PowerPC kernel BAT or may be
+ otherwise valid and readable.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if ((VirtualAddress >= (PVOID)KIPCR) &&
+ (VirtualAddress < (PVOID)(KIPCR2 + PAGE_SIZE))) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgWriteCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for write access.
+
+ The address may be within the PowerPC kernel BAT or may be
+ otherwise valid and writable.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or writable, otherwise
+ returns the virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if ((VirtualAddress >= (PVOID)KIPCR) &&
+ (VirtualAddress < (PVOID)(KIPCR2 + PAGE_SIZE))) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ //
+ // This is being added back in permanently since the PowerPC
+ // hardware debug registers break in before the instruction
+ // is executed. This will generally allow the kernel debugger
+ // to step over the instruction that triggered the hardware
+ // debug register breakpoint.
+ //
+
+ if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
+
+ // This code is similar in spirit to that in the MIPS version.
+ // It returns a writable alias for breakpoints in user pages.
+ // However, it uses the virtual address reserved for the debugger,
+ // rather than the wired-in KSEG0 translation available in MIPS.
+ //
+ // N.B. Microsoft says kernel debugger can't do user code at all.
+
+ return MmDbgTranslatePhysicalAddress (
+ MmGetPhysicalAddress (VirtualAddress) );
+ }
+
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ if (PointerPte->u.Hard.Write == 0) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgTranslatePhysicalAddress (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine maps the specified physical address and returns
+ the virtual address which maps the physical address.
+
+ The next call to MmDbgTranslatePhyiscalAddress removes the
+ previous phyiscal address translation, hence on a single
+ physical address can be examined at a time (can't cross page
+ boundaries).
+
+Arguments:
+
+ PhysicalAddress - Supplies the phyiscal address to map and translate.
+
+Return Value:
+
+ The virtual address which corresponds to the phyiscal address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PVOID BaseAddress;
+
+ BaseAddress = MiGetVirtualAddressMappedByPte (MmDebugPte);
+
+ KiFlushSingleTb (TRUE, BaseAddress);
+
+ *MmDebugPte = ValidKernelPte;
+ MmDebugPte->u.Hard.PageFrameNumber = PhysicalAddress.LowPart >> PAGE_SHIFT;
+
+ return (PVOID)((ULONG)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart));
+}
diff --git a/private/ntos/mm/ppc/hypermap.c b/private/ntos/mm/ppc/hypermap.c
new file mode 100644
index 000000000..fd07b80af
--- /dev/null
+++ b/private/ntos/mm/ppc/hypermap.c
@@ -0,0 +1,344 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ hypermap.c
+
+Abstract:
+
+ This module contains the routines which map physical pages into
+ reserved PTEs within hyper space.
+
+ This module is machine dependent. This version is targetted
+ for PowerPC.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 11-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ IN PKIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified physical page into hyperspace
+ and returns the virtual address that maps the page.
+
+ ************************************
+ * *
+ * Returns with a spin lock held!!! *
+ * *
+ ************************************
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Virtual address in hyperspace that maps the page.
+
+ RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
+
+ The routine MiUnmapPageInHyperSpace MUST be called to release the lock!!!!
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ ULONG i;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // Find the proper location in hyper space and map the page there.
+ //
+
+ LOCK_HYPERSPACE(OldIrql);
+ PointerPte = MmFirstReservedMappingPte;
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // All the pages in reserved for mapping have been used,
+ // flush the TB and reinitialize the pages.
+ //
+
+ RtlZeroMemory ((PVOID)MmFirstReservedMappingPte,
+ (NUMBER_OF_MAPPING_PTES + 1) * sizeof(MMPTE));
+ PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
+ KeFlushEntireTb (TRUE, FALSE);
+
+ }
+
+ //
+ // Get the offset to the first free PTE.
+ //
+
+ i = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Change the offset for the next time through.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = i - 1;
+
+ //
+ // Point to the free entry and make it valid.
+ //
+
+ PointerPte += i;
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ return MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified physical page into hyperspace
+ at the address reserved explicitly for image page header mapping
+ and returns the virtual address that maps the page. No other
+ hyperspace maps will affect this map. If another thread attempts
+ to map an image at the same time, it will be forced to wait until
+ this header is unmapped.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Virtual address in hyperspace that maps the page.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte->u.Long != 0) {
+
+ //
+ // If there is no event specified, set one up.
+ //
+
+ if (MmWorkingSetList->WaitingForImageMapping == (PKEVENT)NULL) {
+
+ //
+ // Set the global event into the field and wait for it.
+ //
+
+ MmWorkingSetList->WaitingForImageMapping = &MmImageMappingPteEvent;
+ }
+
+ //
+ // Release the PFN lock and wait on the event in an
+ // atomic operation.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(MmWorkingSetList->WaitingForImageMapping,
+ Executive,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ return (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure unmaps the PTE reserved for mapping the image
+ header, flushes the TB, and, if the WaitingForImageMapping field
+ is not NULL, sets the specified event.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PKEVENT Event;
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ TempPte.u.Long = 0;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the current state of the event field and clear it out.
+ //
+
+ Event = MmWorkingSetList->WaitingForImageMapping;
+
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+
+ ASSERT (PointerPte->u.Long != 0);
+
+ KeFlushSingleTb (IMAGE_MAPPING_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Hard);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != (PKEVENT)NULL) {
+
+ //
+ // If there was an event specified, set the event.
+ //
+
+ KePulseEvent (Event, 0, FALSE);
+ }
+
+ return;
+}
+
+PVOID
+MiMapPageToZeroInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and returns the virtual address which maps the page.
+
+ NOTE: it maps it into the same location reserved for fork operations!!
+ This is only to be used by the zeroing page thread.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Must be holding the PFN lock.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (ZEROING_PAGE_PTE);
+
+ TempPte.u.Long = 0;
+
+ KeFlushSingleTb (ZEROING_PAGE_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Hard);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ return ZEROING_PAGE_PTE;
+}
diff --git a/private/ntos/mm/ppc/initppc.c b/private/ntos/mm/ppc/initppc.c
new file mode 100644
index 000000000..e2a9f972e
--- /dev/null
+++ b/private/ntos/mm/ppc/initppc.c
@@ -0,0 +1,874 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ initppc.c
+
+Abstract:
+
+ This module contains the machine dependent initialization for the
+ memory management component. It is specifically tailored to the
+ PowerPC environment.
+
+Author:
+
+ Lou Perazzoli (loup) 3-Apr-1990
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 8-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// Local definitions
+//
+
+#define _16MB ((16*1024*1024)/PAGE_SIZE)
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs the necessary operations to enable virtual
+ memory. This includes building the page directory page, building
+ page table pages to map the code section, the data section, the'
+ stack section and the trap handler.
+
+ It also initializes the PFN database and populates the free list.
+
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ ULONG i, j;
+ ULONG HighPage;
+ ULONG PdePageNumber;
+ ULONG PdePage;
+ ULONG PageFrameIndex;
+ ULONG NextPhysicalPage;
+ ULONG PfnAllocation;
+ ULONG NumberOfPages;
+ ULONG MaxPool;
+ KIRQL OldIrql;
+ PEPROCESS CurrentProcess;
+ ULONG DirBase;
+ ULONG MostFreePage = 0;
+ ULONG MostFreeLowMem = 0;
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MMPTE TempPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE Pde;
+ PMMPTE StartPde;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG va;
+
+ PointerPte = MiGetPdeAddress (PDE_BASE);
+
+// N.B. this will cause first HPT miss fault, DSI in real0.s should fix it!
+ PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
+
+ DirBase = PdePageNumber << PAGE_SHIFT;
+
+ PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = DirBase;
+
+ KeSweepDcache (FALSE);
+
+ //
+ // Get the lower bound of the free physical memory and the
+ // number of physical pages by walking the memory descriptor lists.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
+ MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
+
+ if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
+ MmLowestPhysicalPage = MemoryDescriptor->BasePage;
+ }
+
+ if (HighPage > MmHighestPhysicalPage) {
+ MmHighestPhysicalPage = HighPage;
+ }
+
+ //
+ // Locate the largest free block and the largest free block below 16MB.
+ //
+
+ if ((MemoryDescriptor->MemoryType == LoaderFree) ||
+ (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
+ (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
+ (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
+
+ if ((MemoryDescriptor->BasePage < _16MB) &&
+ (MostFreeLowMem < MemoryDescriptor->PageCount) &&
+ (MostFreeLowMem < ((ULONG)_16MB - MemoryDescriptor->BasePage))) {
+
+ MostFreeLowMem = (ULONG)_16MB - MemoryDescriptor->BasePage;
+ if (MemoryDescriptor->PageCount < MostFreeLowMem) {
+ MostFreeLowMem = MemoryDescriptor->PageCount;
+ }
+ FreeDescriptorLowMem = MemoryDescriptor;
+
+ } else if (MemoryDescriptor->PageCount > MostFreePage) {
+
+ MostFreePage = MemoryDescriptor->PageCount;
+ FreeDescriptor = MemoryDescriptor;
+ }
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // This printout must be updated when the HAL goes to unicode
+ //
+
+ if (MmNumberOfPhysicalPages < 1024) {
+ KeBugCheckEx (INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MmLowestPhysicalPage,
+ MmHighestPhysicalPage,
+ 0);
+ }
+
+ //
+ // Build non-paged pool using the physical pages following the
+ // data page in which to build the pool from. Non-page pool grows
+ // from the high range of the virtual address space and expands
+ // downward.
+ //
+ // At this time non-paged pool is constructed so virtual addresses
+ // are also physically contiguous.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ (7 * (MmNumberOfPhysicalPages << 3))) {
+
+ //
+ // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = 0;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
+
+ //
+ // Calculate the size of nonpaged pool.
+ // Use the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
+
+ MmSizeOfNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMinAdditionNonPagedPoolPerMb;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
+ MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
+ }
+
+ //
+ // Align to page size boundary.
+ //
+
+ MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
+
+ //
+ // Calculate the maximum size of pool.
+ //
+
+ if (MmMaximumNonPagedPoolInBytes == 0) {
+
+ //
+ // Calculate the size of nonpaged pool. If 4mb of less use
+ // the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
+
+ //
+ // Make sure enough expansion for pfn database exists.
+ //
+
+ MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ MmMaximumNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMaxAdditionNonPagedPoolPerMb;
+ }
+
+ MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
+ (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmMaximumNonPagedPoolInBytes < MaxPool) {
+ MmMaximumNonPagedPoolInBytes = MaxPool;
+ }
+
+ if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
+ MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
+ }
+
+ MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
+ - (MmMaximumNonPagedPoolInBytes - 1));
+
+ MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
+
+ //
+ // Calculate the starting PDE for the system PTE pool which is
+ // right below the nonpaged pool.
+ //
+
+ MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
+ (~PAGE_DIRECTORY_MASK));
+
+ if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
+ MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
+ MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
+ (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
+ ASSERT (MmNumberOfSystemPtes > 1000);
+ }
+
+ StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
+
+ EndPde = MiGetPdeAddress((PVOID)((PCHAR)MmNonPagedPoolEnd - 1));
+
+ ASSERT ((ULONG)(EndPde - StartPde) < FreeDescriptorLowMem->PageCount);
+
+ //
+ // Start building the nonpaged pool with the largest free chunk of memory
+ // below 16MB.
+ //
+
+ NextPhysicalPage = FreeDescriptorLowMem->BasePage;
+ NumberOfPages = FreeDescriptorLowMem->PageCount;
+ TempPte = ValidKernelPte;
+
+ while (StartPde <= EndPde) {
+ if (StartPde->u.Hard.Valid == 0) {
+
+ //
+ // Map in a page directory page.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ *StartPde = TempPte;
+
+ }
+ StartPde += 1;
+ }
+
+ ASSERT(NumberOfPages > 0);
+
+ //
+ // Zero the PTEs before nonpaged pool.
+ //
+
+ StartPde = MiGetPteAddress(MmNonPagedSystemStart);
+ PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
+
+ RtlZeroMemory (StartPde, ((ULONG)PointerPte - (ULONG)StartPde));
+
+ //
+ // Fill in the PTEs for non-paged pool.
+ //
+
+ LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes - 1);
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+
+ //
+ // Zero the remaining PTEs (if any).
+ //
+
+ while (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
+ *PointerPte = ZeroKernelPte;
+ PointerPte++;
+ }
+
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ //
+ // Non-paged pages now exist, build the pool structures.
+ //
+
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes);
+ MiInitializeNonPagedPool (MmNonPagedPoolStart);
+
+ //
+ // Before Non-paged pool can be used, the PFN database must
+ // be built. This is due to the fact that the start and end of
+ // allocation bits for nonpaged pool are maintained in the
+ // PFN elements for the corresponding pages.
+ //
+
+ //
+ // Calculate the number of pages required from page zero to
+ // the highest page.
+ //
+ // Get the number of secondary colors and add the arrary for tracking
+ // secondary colors to the end of the PFN database.
+ //
+
+ //
+ // Get secondary color value from registry.
+ //
+
+ if (MmSecondaryColors == 0) {
+ MmSecondaryColors = PCR->SecondLevelDcacheSize;
+ }
+
+ MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
+
+ //
+ // Make sure value is power of two and within limits.
+ //
+
+ if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
+ (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
+ (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ }
+
+ MmSecondaryColorMask = (MmSecondaryColors - 1) & ~MM_COLOR_MASK;
+
+ PfnAllocation = 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
+ (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
+ >> PAGE_SHIFT);
+
+ //
+ // Calculate the start of the Pfn Database (it starts a physical
+ // page zero, even if the Lowest physical page is not zero).
+ //
+
+ PointerPte = MiReserveSystemPtes (PfnAllocation,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+
+ MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
+
+ //
+ // Go through the memory descriptors and for each physical page
+ // make the PFN database has a valid PTE to map it. This allows
+ // machines with sparse physical memory to have a minimal PFN
+ // database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage));
+
+ LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage +
+ MemoryDescriptor->PageCount))) - 1);
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
+ &MmPfnDatabase[MmHighestPhysicalPage + 1];
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ //
+ // Make sure the PTEs are mapped.
+ //
+
+
+ if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
+ PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
+
+ LastPte = MiGetPteAddress (
+ (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ for (i = 0; i < MmSecondaryColors; i++) {
+ MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
+ MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
+ }
+#endif
+
+ //
+ // Go through the page table entries and for any page which is
+ // valid, update the corresponding PFN database element.
+ //
+
+ Pde = MiGetPdeAddress (NULL);
+ PointerPde = MiGetPdeAddress (PTE_BASE);
+ va = 0;
+
+ for (i = 0; i < PDE_PER_PAGE; i++) {
+ if (Pde->u.Hard.Valid == 1) {
+
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (Pde));
+
+ PointerPte = MiGetPteAddress (va);
+
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ Pfn1->u2.ShareCount += 1;
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ if (PageFrameIndex <= MmHighestPhysicalPage) {
+
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+
+ if (MmIsAddressValid(Pfn2) &&
+ MmIsAddressValid((PUCHAR)(Pfn2+1)-1)) {
+
+ Pfn2->PteFrame = PdePage;
+ Pfn2->PteAddress = PointerPte;
+ Pfn2->u2.ShareCount += 1;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+ Pfn2->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ PointerPte));
+ }
+ }
+ }
+ va += PAGE_SIZE;
+ PointerPte++;
+ }
+ } else {
+ va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
+ }
+ Pde++;
+ }
+
+ //
+ // If page zero is still unused, mark it as in use. This is
+ // temporary as we want to find bugs where a physical page
+ // is specified as zero.
+ //
+
+ Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Make the reference count non-zero and point it into a
+ // page directory.
+ //
+
+ Pde = MiGetPdeAddress (0xb0000000);
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (Pde));
+ }
+
+ // end of temporary set to physical page zero.
+
+ //
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ i = MemoryDescriptor->PageCount;
+ NextPhysicalPage = MemoryDescriptor->BasePage;
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderBad:
+ while (i != 0) {
+ MiInsertPageInList (MmPageLocationList[BadPageList],
+ NextPhysicalPage);
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress =
+ (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ Pfn1->PteAddress));
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ default:
+
+ PointerPte = MiGetPteAddress (KSEG0_BASE +
+ (NextPhysicalPage << PAGE_SHIFT));
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ //
+ // Set page as in use.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ PointerPte));
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ PointerPte += 1;
+ }
+
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // Indicate that the PFN database is allocated in NonPaged pool.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ //
+ // Indicate that nonpaged pool must succeed is allocated in
+ // nonpaged pool.
+ //
+
+ i = MmSizeOfNonPagedMustSucceed;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(MmNonPagedMustSucceed)->u.Hard.PageFrameNumber);
+
+ while ((LONG)i > 0) {
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ i -= PAGE_SIZE;
+ Pfn1 += 1;
+ }
+
+ KeInitializeSpinLock (&MmSystemSpaceLock);
+ KeInitializeSpinLock (&MmPfnLock);
+
+ //
+ // Initialize the nonpaged available PTEs for mapping I/O space
+ // and kernel stacks.
+ //
+
+ PointerPte = MiGetPteAddress (MmNonPagedSystemStart);
+
+ PointerPte = (PMMPTE)PAGE_ALIGN (PointerPte);
+
+ MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolStart) - PointerPte - 1;
+
+ MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
+
+ //
+ // Initialize the nonpaged pool.
+ //
+
+ InitializePool(NonPagedPool,0);
+
+ //
+ // Initialize memory management structures for this process.
+ //
+
+ //
+ // Build working set list. System initialization has created
+ // a PTE for hyperspace.
+ //
+ // Note, we can't remove a zeroed page as hyper space does not
+ // exist and we map non-zeroed pages into hyper space to zero.
+ //
+
+ PointerPte = MiGetPdeAddress(HYPER_SPACE);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PointerPte->u.Hard.Write = 1;
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Point to the page table page we just created and zero it.
+ //
+
+ PointerPte = MiGetPteAddress(HYPER_SPACE);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ //
+ // Hyper space now exists, set the necessary variables.
+ //
+
+ MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
+ MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
+
+ MmWorkingSetList = WORKING_SET_LIST;
+ MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL));
+
+ //
+ // Initialize this process's memory management structures including
+ // the working set list.
+ //
+
+ //
+ // The pfn element for the page directory has already been initialized,
+ // zero the reference count and the share count so they won't be
+ // wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 0;
+
+ //
+ // The pfn element for the PDE which maps hyperspace has already
+ // been initialized, zero the reference count and the share count
+ // so they won't be wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 1;
+
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Get a page for the working set list and map it into the Page
+ // directory at the page after hyperspace.
+ //
+
+ PointerPte = MiGetPteAddress (HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte));
+ CurrentProcess->WorkingSetPage = PageFrameIndex;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
+
+ *PointerPde = TempPte;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ TempPte = *PointerPde;
+ TempPte.u.Hard.Valid = 0;
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeFlushSingleTb (PointerPte,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPde,
+ TempPte.u.Hard);
+
+ KeLowerIrql(OldIrql);
+
+ CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
+ CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
+
+ MmInitializeProcessAddressSpace (CurrentProcess,
+ (PEPROCESS)NULL,
+ (PVOID)NULL);
+
+ *PointerPde = ZeroKernelPte;
+
+ //
+ // Check to see if moving the secondary page structures to the end
+ // of the PFN database is a waste of memory. And if so, copy it
+ // to paged pool.
+ //
+ // If the PFN datbase ends on a page aligned boundary and the
+ // size of the two arrays is less than a page, free the page
+ // and allocate nonpagedpool for this.
+ //
+
+ if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
+ ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
+
+ PMMCOLOR_TABLES c;
+
+ c = MmFreePagesByColor[0];
+
+ MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
+ ' mM');
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ RtlMoveMemory (MmFreePagesByColor[0],
+ c,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
+
+ //
+ // Free the page.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(c)) {
+ PointerPte = MiGetPteAddress(c);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *PointerPte = ZeroKernelPte;
+ } else {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ }
+
+ return;
+}
+
diff --git a/private/ntos/mm/ppc/mippc.h b/private/ntos/mm/ppc/mippc.h
new file mode 100644
index 000000000..c8f2ea751
--- /dev/null
+++ b/private/ntos/mm/ppc/mippc.h
@@ -0,0 +1,2034 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ mippc.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ It is specifically tailored for PowerPC.
+
+Author:
+
+ Lou Perazzoli (loup) 9-Jan-1991
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-1993
+
+Revision History:
+
+--*/
+
+/*++
+
+ Virtual Memory Layout for PowerPC is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7fff0000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | | KSEG0
+ | OsLoader loads critical parts |
+ | of boot code and data in |
+ | this region. Mapped by BAT0. |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ 8xxx0000 | | KSEG1 KSEG2
+ | OsLoader loads remaining boot |
+ | code and data here. Mapped |
+ | by segment register 8. |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ 8yyy0000 | |
+ | |
+ | Unused NO ACCESS |
+ | |
+ | |
+ +------------------------------------+
+ 90000000 | System Cache Working Set |
+ 90400000 | System Cache |
+ | |
+ | |
+ | |
+ AE000000 | Kernel mode access only. |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 4mb region |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ C0400000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 4mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C0800000 | NO ACCESS AREA |
+ | |
+ +------------------------------------+
+ D0000000 | System mapped views |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ D3000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ | |
+ +------------------------------------+
+ E0000000 | |
+ | Kernel mode access only. |
+ | |
+ | |
+ EFBFFFFF | NonPaged System area |
+ +------------------------------------+
+ EFC00000 | Last 4mb reserved for HAL usage |
+ +------------------------------------+
+ F0000000 | Unused, No access. |
+ | |
+ FFFFD000 | Per Processor PCR |
+ FFFFE000 | Shared PCR2 |
+ FFFFF000 | Debugger Page for physical memory |
+ +------------------------------------+
+
+ Segment Register usage
+
+ 0 - 7 User mode addresses, switched at Process Switch time
+ 8 Constant, shared amongst processors and processes.
+ No change on switch to user mode but always invalid for
+ user mode. Very low part of this range is KSEG0, mapped
+ by a BAT register.
+ 9 - A Constant, Shared amongst processors and processes,
+ invalidated while in user mode.
+ C Per process kernel data. invalidated while in user mode.
+ D Constant, Shared amongst processors and processes,
+ invalidated while in user mode.
+ E Constant, shared amongst processors and processes.
+ No change on switch to user mode but always invalid for
+ user mode.
+ F Per processor. Kernel mode access only.
+
+--*/
+
+//
+// PAGE_SIZE for PowerPC is 4k, virtual page is 20 bits with a PAGE_SHIFT
+// byte offset.
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT 20
+
+//
+// Address space layout definitions.
+//
+
+//#define PDE_BASE ((ULONG)0xC0300000)
+
+//#define PTE_BASE ((ULONG)0xC0000000)
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define MM_SYSTEM_SPACE_START (0xD0000000)
+
+//
+// N.B. This should ONLY be used for copying PDEs.
+// Segment 15 is only used for PCR pages,
+// hardwired PDE for the debuggers, and
+// crash dump.
+//
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define MM_HAL_RESERVED (0xFFC00000)
+
+#define PDE_TOP 0xC03FFFFF
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+
+#define HYPER_SPACE_END 0xC07fffff
+
+//
+// Define the start and maximum size for the system cache.
+// Maximum size 476MB.
+//
+
+#define MM_SYSTEM_CACHE_AND_POOL_DISJOINT 1
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0x90000000)
+
+#define MM_SYSTEM_CACHE_START (0x90400000)
+
+#define MM_SYSTEM_CACHE_END (0xAE000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
+ (((ULONG)MM_SYSTEM_CACHE_END - (ULONG)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT)
+
+//
+// Tell MM that boot code and data is pageable.
+//
+
+#define MM_BOOT_CODE_PAGEABLE 1
+
+#define MM_BOOT_CODE_START (0x80000000)
+#define MM_BOOT_CODE_END (0x90000000)
+
+//
+// Define MM_SYSTEM_CACHE_AND_POOL_DISJOINT so that MmCreateProcessAddressSpace
+// knows that it has to do two RtlCopyMemorys to copy the PDEs for the cache
+// and the rest of system space.
+//
+
+#define MM_SYSTEM_CACHE_AND_POOL_DISJOINT 1
+
+
+//
+// Define area for mapping views into system space.
+//
+
+#define MM_SYSTEM_VIEW_START (0xD0000000)
+
+#define MM_SYSTEM_VIEW_SIZE (48*1024*1024)
+
+#define MM_PAGED_POOL_START ((PVOID)(0xD3000000))
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(0xE0000000))
+
+#define MmProtopte_Base ((ULONG)0xD3000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xEFC00000))
+
+#define NON_PAGED_SYSTEM_END ((ULONG)0xEFFFFFF0) //quadword aligned.
+
+//
+// Define absolute minumum and maximum count for system ptes.
+//
+
+#define MM_MINIMUM_SYSTEM_PTES 9000
+
+#define MM_MAXIMUM_SYSTEM_PTES 35000
+
+#define MM_DEFAULT_SYSTEM_PTES 15000
+
+//
+// Pool limits
+//
+
+//
+// The maximim amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The total amount of nonpaged pool (initial pool + expansion + system PTEs).
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)(192*1024*1024))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(176*1024*1024))
+
+#define MM_MAX_TOTAL_POOL (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)(MM_PAGED_POOL_START)))
+
+
+//
+// Structure layout defintions.
+//
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x003FFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x400000)
+
+// N.B. this is probably a real address, for what purpose?
+#define LOWEST_IO_ADDRESS (0x80000000)
+
+#define PTE_SHIFT (2)
+
+//
+// The number of bits in a physical address.
+//
+
+#define PHYSICAL_ADDRESS_BITS (32)
+
+#define MM_PROTO_PTE_ALIGNMENT ((ULONG)MM_MAXIMUM_NUMBER_OF_COLORS * (ULONG)PAGE_SIZE)
+
+//
+// Maximum number of paging files.
+//
+
+#define MAX_PAGE_FILES 16
+
+//
+// Hyper space definitions.
+//
+
+#define FIRST_MAPPING_PTE ((ULONG)0xC0400000)
+
+#define NUMBER_OF_MAPPING_PTES 255
+
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_MAXIMUM_WORKING_SET \
+ ((ULONG)((ULONG)2*1024*1024*1024 - 64*1024*1024) >> PAGE_SHIFT) //2Gb-64Mb
+
+#define MM_WORKING_SET_END ((ULONG)0xC07FF000)
+
+//
+// Define masks for fields within the PTE.
+//
+
+#define MM_PTE_PROTOTYPE_MASK 0x1
+#define MM_PTE_VALID_MASK 0x4
+#define MM_PTE_CACHE_DISABLE_MASK 0x28 // CacheInhibit | Guard
+#define MM_PTE_TRANSITION_MASK 0x2
+#define MM_PTE_WRITE_MASK 0x200
+#define MM_PTE_COPY_ON_WRITE_MASK 0x400
+
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS 0x0 // not expressable on PowerPC
+#define MM_PTE_READONLY 0x3
+#define MM_PTE_READWRITE (0x3 | MM_PTE_WRITE_MASK)
+#define MM_PTE_WRITECOPY (0x3 | MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_EXECUTE 0x3 // read-only on PowerPC
+#define MM_PTE_EXECUTE_READ 0x3
+#define MM_PTE_EXECUTE_READWRITE (0x3 | MM_PTE_WRITE_MASK)
+#define MM_PTE_EXECUTE_WRITECOPY (0x3 | MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_NOCACHE (MM_PTE_CACHE_DISABLE_MASK)
+#define MM_PTE_GUARD 0x0 // not expressable on PowerPC
+#define MM_PTE_CACHE 0x0
+
+#define MM_PROTECT_FIELD_SHIFT 3
+
+//
+// Zero PTE
+//
+
+#define MM_ZERO_PTE 0
+
+//
+// Zero Kernel PTE
+//
+
+#define MM_ZERO_KERNEL_PTE 0
+
+
+//
+// A demand zero PTE with a protection of PAGE_READWRITE.
+//
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A demand zero PTE with a protection of PAGE_READWRITE for system space.
+//
+
+#define MM_KERNEL_DEMAND_ZERO_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A no access PTE for system space.
+//
+
+#define MM_KERNEL_NOACCESS_PTE (MM_NOACCESS << MM_PROTECT_FIELD_SHIFT)
+
+//
+// Dirty bit definitions for clean and dirty.
+//
+
+#define MM_PTE_CLEAN 3
+#define MM_PTE_DIRTY 0
+
+
+//
+// Kernel stack alignment requirements.
+//
+
+#define MM_STACK_ALIGNMENT 0x0
+#define MM_STACK_OFFSET 0x0
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)1024)
+
+#define PTE_PER_PAGE ((ULONG)1024)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (512)
+
+//
+// Indicate the number of page colors required.
+//
+
+#define MM_NUMBER_OF_COLORS 2
+#define MM_MAXIMUM_NUMBER_OF_COLORS 2
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK 1
+
+//
+// Define secondary color stride.
+//
+
+#define MM_COLOR_STRIDE 3
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT 0x2000
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL 0x1000
+
+//
+// Define 256K worth of secondary colors.
+//
+
+#define MM_SECONDARY_COLORS_DEFAULT ((256*1024) >> PAGE_SHIFT)
+
+#define MM_SECONDARY_COLORS_MIN (2)
+
+#define MM_SECONDARY_COLORS_MAX (2048)
+
+//
+// Mask for isolating secondary color from physical page number;
+//
+
+extern ULONG MmSecondaryColorMask;
+
+//
+// Define macro to initialize directory table base.
+//
+
+#define INITIALIZE_DIRECTORY_TABLE_BASE(dirbase,pfn) \
+ *((PULONG)(dirbase)) = ((pfn) << PAGE_SHIFT)
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN OWNER
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ { \
+ (OUTPTE).u.Long = ((FRAME << 12) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MM_PTE_VALID_MASK); \
+ if (((OUTPTE).u.Hard.Write == 1) && \
+ (((PMMPTE)PPTE) >= MiGetPteAddress(MM_LOWEST_NONPAGED_SYSTEM_START)))\
+ { \
+ (OUTPTE).u.Hard.Dirty = MM_PTE_DIRTY; \
+ } \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Valid = 0; \
+ (OUTPTE).u.Trans.Prototype = 0; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ (OUTPTE).u.Long = (((PPTE)->u.Long & 0xFFFFF000) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MM_PTE_VALID_MASK);
+
+//++
+//VOID
+//MI_SET_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set dirty.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_DIRTY(PTE) (PTE).u.Hard.Dirty = MM_PTE_DIRTY
+
+
+//++
+//VOID
+//MI_SET_PTE_CLEAN (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro clears the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set clear.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_CLEAN(PTE) (PTE).u.Hard.Dirty = MM_PTE_CLEAN
+
+
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != MM_PTE_CLEAN)
+
+
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_BIT_IF_SYSTEM (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit if the pointer PTE is within
+// system space.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the PTE becoming valid.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE)
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_STATE (
+// IN MMPTE PTE,
+// IN ULONG STATE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit in the PTE. if the pointer PTE is within
+//
+// Argments
+//
+// PTE - Supplies the PTE to set global state into.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_STATE(PTE,STATE)
+
+
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_ENABLE_CACHING(PTE) \
+ ((PTE).u.Hard.CacheDisable = (PTE).u.Hard.GuardedStorage = 0)
+
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_DISABLE_CACHING(PTE) \
+ ((PTE).u.Hard.CacheDisable = (PTE).u.Hard.GuardedStorage = 1)
+
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+
+#define MI_IS_CACHING_DISABLED(PPTE) \
+ ((PPTE)->u.Hard.CacheDisable == 1)
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) ((PPFN)->PteAddress = (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ ((PPFN)->PteAddress == (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// PPTE - Supplies a pointer to the new PTE which will contain the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,COLOR) \
+{ \
+ PMMPFN PPFN; \
+ ULONG OldColor; \
+ PPFN = MI_PFN_ELEMENT(PAGE); \
+ OldColor = PPFN->u3.e1.PageColor; \
+ if ((COLOR) != OldColor) { \
+ PPFN->u3.e1.PageColor = COLOR; \
+ } \
+}
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// HYPER_PAGE
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// HYPER_PAGE - Phyical page number for the page to become hyperspace.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_INITIALIZE_HYPERSPACE_MAP(HYPER_PAGE) \
+ { \
+ PMMPTE Base; \
+ KIRQL OldIrql; \
+ Base = MiMapPageInHyperSpace (HYPER_PAGE, &OldIrql); \
+ Base->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES; \
+ MiUnmapPageInHyperSpace (OldIrql); \
+ }
+
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTEADDRESS)) >> 2) & MM_COLOR_MASK))
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_PTE_PROCESS (
+// IN PCHAR COLOR,
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTE)) >> 2) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_VA_PROCESS (
+// IN PVOID ADDRESS,
+// IN PEPROCESS COLOR
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_GET_NEXT_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the next color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the next of.
+//
+// Return Value:
+//
+// Next color in sequence.
+//
+//--
+
+#define MI_GET_NEXT_COLOR(COLOR) ((COLOR + 1) & MM_COLOR_MASK)
+
+
+//++
+//ULONG
+//MI_GET_PREVIOUS_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the previous color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the previous of.
+//
+// Return Value:
+//
+// Previous color in sequence.
+//
+//--
+
+#define MI_GET_PREVIOUS_COLOR(COLOR) ((COLOR - 1) & MM_COLOR_MASK)
+
+#define MI_GET_SECONDARY_COLOR(PAGE,PFN) \
+ ((((ULONG)(PAGE) & MmSecondaryColorMask)) | (PFN)->u3.e1.PageColor)
+
+#define MI_GET_COLOR_FROM_SECONDARY(COLOR) ((COLOR) & MM_COLOR_MASK)
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_BY_COLOR (
+// OUT ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. It does NOT remove the page
+// from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_ANY_COLOR (
+// OUT ULONG PAGE,
+// IN OUT ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. If not page of the desired
+// color exists, all colored lists are searched for a page.
+// It does NOT remove the page from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate and returns the
+// color of the page located.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
+ { \
+ if (MmTotalPagesForPagingFile == 0) { \
+ PAGE = MM_EMPTY_LIST; \
+ } else { \
+ while (MmModifiedPageListByColor[COLOR].Flink == \
+ MM_EMPTY_LIST) { \
+ COLOR = MI_GET_NEXT_COLOR(COLOR); \
+ } \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink; \
+ } \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_WRITE_COPY (
+// IN OUT PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if the PTE indicates that the
+// page is writable and if so it clears the write bit and
+// sets the copy-on-write bit.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Dirty = MM_PTE_CLEAN; \
+ }
+
+
+//++
+//ULONG
+//MI_DETERMINE_OWNER (
+// IN MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro examines the virtual address of the PTE and determines
+// if the PTE resides in system space or user space.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ ((((PPTE) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) || \
+ ((PPTE) >= MiGetPdeAddress(NULL) && \
+ ((PPTE) <= MiGetPdeAddress(MM_HIGHEST_USER_ADDRESS)))) ? 1 : 0)
+
+
+//++
+//VOID
+//MI_SET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+
+
+//++
+//ULONG
+//MI_GET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro returns the state of the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the ACCESSED field.
+//
+//--
+
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+
+
+//++
+//VOID
+//MI_SET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// IN ULONG OWNER
+// );
+//
+// Routine Description:
+//
+// This macro sets the owner field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER)
+
+
+//++
+//ULONG
+//MI_GET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro gets the owner field from the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the OWNER field.
+//
+//--
+
+#define MI_GET_OWNER_IN_PTE(PPTE) KernelMode
+
+
+// bit mask to clear out fields in a PTE to or in paging file location.
+
+#define CLEAR_FOR_PAGE_FILE ((ULONG)(0x0F8))
+
+
+//++
+//VOID
+//MI_SET_PAGING_FILE_INFO (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// FILEINFO - Supplies the number of the paging file.
+//
+// OFFSET - Supplies the offset into the paging file.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) \
+ ((((PTE).u.Long & CLEAR_FOR_PAGE_FILE) | \
+ (((FILEINFO) << 8) | \
+ (OFFSET << 12))))
+
+
+//++
+//PMMPTE
+//MiPteToProto (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro returns the address of the corresponding prototype which
+// was encoded earlier into the supplied PTE.
+//
+// NOTE THAT AS PROTOPTE CAN RESIDE IN BOTH PAGED AND NONPAGED POOL
+// THIS MACRO LIMITS THE COMBINED SIZES OF TWO POOLS AND REQUIRES THEM
+// TO BE WITHIN THE MAX SIZE CONSTRAINTS
+//
+// MAX SIZE = 2^(2+8+20) = 2^30 = 1GB
+//
+// NOTE, that the valid bit must be zero!
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Pointer to the prototype PTE that backs this PTE.
+//
+//--
+
+#define MiPteToProto(lpte) ((PMMPTE)((((lpte)->u.Long >> 4) << 2) + \
+ MmProtopte_Base))
+
+
+//++
+//ULONG
+//MiProtoAddressForPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForPte(proto_va) \
+ ((ULONG)((((ULONG)proto_va - MmProtopte_Base) << 2) | MM_PTE_PROTOTYPE_MASK))
+
+
+//++
+//ULONG
+//MiProtoAddressForKernelPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// This macro also sets any other information (such as global bits)
+// required for kernel mode PTEs.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForKernelPte(proto_va) MiProtoAddressForPte(proto_va)
+
+
+//++
+//PSUBSECTION
+//MiGetSubsectionAddress (
+// IN PMMPTE lpte
+// );
+//
+// Routine Description:
+//
+// This macro takes a PTE and returns the address of the subsection that
+// the PTE refers to. Subsections are quadword structures allocated
+// from nonpaged pool.
+//
+// NOTE THIS MACRO LIMITS THE SIZE OF NONPAGED POOL!
+// MAXIMUM NONPAGED POOL = 2^(3+1+24) = 2^28 = 256mb.
+//
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// A pointer to the subsection referred to by the supplied PTE.
+//
+//--
+
+#define MiGetSubsectionAddress(lpte) \
+ ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - \
+ (((((lpte)->u.Long) >> 8) << 4) | \
+ ((((lpte)->u.Long) << 2) & 0x8))))
+
+
+//++
+//ULONG
+//MiGetSubsectionAddressForPte (
+// IN PSUBSECTION VA
+// );
+//
+// Routine Description:
+//
+// This macro takes the address of a subsection and encodes it for use
+// in a PTE.
+//
+// NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
+//
+// Argments
+//
+// VA - Supplies a pointer to the subsection to encode.
+//
+// Return Value:
+//
+// The mask to set into the PTE to make it reference the supplied
+// subsetion.
+//
+//--
+
+#define MiGetSubsectionAddressForPte(VA) \
+ (((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) << 4) & (ULONG)0xffffff00) | \
+ ((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) >> 2) & (ULONG)0x2))
+
+
+//++
+//PMMPTE
+//MiGetPdeAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeAddress returns the address of the PDE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PDE for.
+//
+// Return Value:
+//
+// The address of the PDE.
+//
+//--
+
+#define MiGetPdeAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 22) << 2) + PDE_BASE))
+
+
+//++
+//PMMPTE
+//MiGetPteAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PTE for.
+//
+// Return Value:
+//
+// The address of the PTE.
+//
+//--
+
+#define MiGetPteAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 12) << 2) + PTE_BASE))
+
+
+//++
+//ULONG
+//MiGetPdeOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page directory table the corresponding PDE is at.
+//
+//--
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+
+
+//++
+//ULONG
+//MiGetPteOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteOffset returns the offset into a page table page
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page table page table the corresponding PTE is at.
+//
+//--
+
+#define MiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
+
+
+//++
+//PMMPTE
+//MiGetProtoPteAddress (
+// IN PMMPTE VAD,
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+// Argments
+//
+// VAD - Supplies a pointer to the virtual address descriptor that contains
+// the VA.
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// A pointer to the proto PTE which corresponds to the VA.
+//
+//--
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+
+//++
+//PVOID
+//MiGetVirtualAddressMappedByPte (
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+// Argments
+//
+// PTE - Supplies the PTE to get the virtual address for.
+//
+// Return Value:
+//
+// Virtual address mapped by the PTE.
+//
+//--
+
+#define MiGetVirtualAddressMappedByPte(va) ((PVOID)((ULONG)(va) << 10))
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_NUMBER (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the paging file number from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file number.
+//
+//--
+
+#define GET_PAGING_FILE_NUMBER(PTE) ((((PTE).u.Long) >> 8) & 0xF)
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_OFFSET (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the offset into the paging file from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file offset.
+//
+//--
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+
+//++
+//ULONG
+//IS_PTE_NOT_DEMAND_ZERO (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if a given PTE is NOT a demand zero PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Returns 0 if the PTE is demand zero, non-zero otherwise.
+//
+//--
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFF007)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_PTE_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make a single valid PTE invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make multiple valid PTEs invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+
+//
+// Make a writable PTE, writeable-copy PTE. This takes advantage of
+// the fact that the protection field in the PTE (5 bit protection) is
+// set up such that write is a bit.
+//
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Long & 0x20) { \
+ ((PTE).u.Long |= 0x8); \
+ }
+
+
+//++
+//VOID
+//MI_SET_PAGE_DIRTY(
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit (and release page file space).
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
+ if ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+
+
+//++
+//VOID
+//MI_NO_FAULT_FOUND(
+// IN TEMP,
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro handles the case when a page fault is taken and no
+// PTE with the valid bit clear is found.
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ } else { \
+ KeFillEntryTb ((PHARDWARE_PTE)PPTE, VA, FALSE); \
+ }
+// KeFillEntryTb((PHARDWARE_PTE)(MiGetPdeAddress(VA)),(PVOID)PPTE,FALSE);
+ //
+ // If the PTE was already valid, assume that the PTE
+ // in the TB is stall and just reload the PTE.
+ //
+
+
+//++
+//ULONG
+//MI_CAPTURE_DIRTY_BIT_TO_PFN (
+// IN PMMPTE PPTE,
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro gets captures the state of the dirty bit to the PFN
+// and frees any associated page file space if the PTE has been
+// modified element.
+//
+// NOTE - THE PFN LOCK MUST BE HELD!
+//
+// Argments
+//
+// PPTE - Supplies the PTE to operate upon.
+//
+// PPFN - Supplies a pointer to the PFN database element that corresponds
+// to the page mapped by the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ if (((PPFN)->u3.e1.Modified == 0) && \
+ ((PPTE)->u.Hard.Dirty == MM_PTE_DIRTY)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+
+//++
+//BOOLEAN
+//MI_IS_PHYSICAL_ADDRESS (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro deterines if a give virtual address is really a
+// physical address.
+//
+// Argments
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// FALSE if it is not a physical address, TRUE if it is.
+//
+//--
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ (((ULONG)Va >= KSEG0_BASE) && ((ULONG)Va < KSEG2_BASE))
+
+
+//++
+//ULONG
+//MI_CONVERT_PHYSICAL_TO_PFN (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
+// to its corresponding physical frame number.
+//
+// Argments
+//
+// VA - Supplies a pointer to the physical address.
+//
+// Return Value:
+//
+// Returns the PFN for the page.
+//
+//--
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) (((ULONG)Va << 2) >> 14)
+
+
+typedef struct _MMCOLOR_TABLES {
+ ULONG Flink;
+ PVOID Blink;
+} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
+
+typedef struct _MMPRIMARY_COLOR_TABLES {
+ LIST_ENTRY ListHead;
+} MMPRIMARY_COLOR_TABLES, *PMMPRIMARY_COLOR_TABLES;
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+extern PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+extern ULONG MmTotalPagesForPagingFile;
+
+
+
+//
+// A valid Page Table Entry has the following definition.
+//
+
+// N.B. defined as in comments below in ../public/sdk/inc/ntppc.h
+
+// typedef struct _HARDWARE_PTE {
+// ULONG Dirty : 2;
+// ULONG Valid : 1; // software
+// ULONG GuardedStorage : 1;
+// ULONG MemoryCoherence : 1;
+// ULONG CacheDisable : 1;
+// ULONG WriteThrough : 1;
+// ULONG Change : 1;
+// ULONG Reference : 1;
+// ULONG Write : 1; // software
+// ULONG CopyOnWrite : 1; // software
+// ULONG rsvd1 : 1;
+// ULONG PageFrameNumber : 20;
+// } HARDWARE_PTE, *PHARDWARE_PTE;
+
+
+//
+// Invalid Page Table Entries have the following definitions.
+//
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG filler4 : 4;
+ ULONG PageFrameNumber : 20;
+} MMPTE_TRANSITION;
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG PageFileLow : 4;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Prototype : 1;
+ ULONG filler1 : 1;
+ ULONG Valid : 1;
+ ULONG ReadOnly : 1;
+ ULONG ProtoAddressLow : 8;
+ ULONG ProtoAddressHigh : 20;
+} MMPTE_PROTOTYPE;
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG Prototype : 1;
+ ULONG SubsectionAddressLow : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG SubsectionAddressHigh : 24;
+} MMPTE_SUBSECTION;
+
+typedef struct _MMPTE_LIST {
+ ULONG filler2 : 2;
+ ULONG Valid : 1;
+ ULONG OneEntry : 1;
+ ULONG filler8 : 8;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+
+//
+// A Page Table Entry has the following definition.
+//
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ HARDWARE_PTE Hard;
+ HARDWARE_PTE Flush;
+ MMPTE_TRANSITION Trans;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SUBSECTION Subsect;
+ MMPTE_LIST List;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
+
diff --git a/private/ntos/mm/ppc/setdirty.c b/private/ntos/mm/ppc/setdirty.c
new file mode 100644
index 000000000..9217bb39a
--- /dev/null
+++ b/private/ntos/mm/ppc/setdirty.c
@@ -0,0 +1,124 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ setdirty.c
+
+Abstract:
+
+ This module contains the setting dirty bit routine for memory management.
+
+ PowerPC specific.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1990.
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets dirty in the specified PTE and the modify bit in the
+ correpsonding PFN element. If any page file space is allocated, it
+ is deallocated.
+
+Arguments:
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies a pointer to the corresponding valid PTE.
+
+ PfnHeld - Supplies TRUE if the PFN mutex is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ //
+ // The page is NOT copy on write, update the PTE setting both the
+ // dirty bit and the accessed bit. Note, that as this PTE is in
+ // the TB, the TB must be flushed.
+ //
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ TempPte = *PointerPte;
+ TempPte.u.Hard.Dirty = MM_PTE_DIRTY;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ *PointerPte = TempPte;
+
+ //
+ // Check state of PFN mutex and if not held, don't update PFN database.
+ //
+
+
+ if (PfnHeld) {
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ Pfn1->u3.e1.Modified = 1;
+ }
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty bit clear
+ // has been fetched into the TB. If it isn't flushed, another fault
+ // is generated as the dirty bit is not set in the cached TB entry.
+ //
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, FaultingAddress, TRUE);
+
+ return;
+}
diff --git a/private/ntos/mm/ppc/sources b/private/ntos/mm/ppc/sources
new file mode 100644
index 000000000..5cfcd8eb6
--- /dev/null
+++ b/private/ntos/mm/ppc/sources
@@ -0,0 +1,5 @@
+PPC_SOURCES=..\ppc\initppc.c \
+ ..\ppc\datappc.c \
+ ..\ppc\debugsup.c \
+ ..\ppc\hypermap.c \
+ ..\ppc\setdirty.c
diff --git a/private/ntos/mm/procsup.c b/private/ntos/mm/procsup.c
new file mode 100644
index 000000000..f97a7ee49
--- /dev/null
+++ b/private/ntos/mm/procsup.c
@@ -0,0 +1,3327 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ procsup.c
+
+Abstract:
+
+ This module contains routines which support the process structure.
+
+Author:
+
+ Lou Perazzoli (loup) 25-Apr-1989
+
+Revision History:
+
+--*/
+
+
+#include "mi.h"
+
+#define MM_PROCESS_COMMIT_CHARGE 3
+
+#define MM_PROCESS_CREATE_CHARGE 5
+
+#define HEADER_FILE
+
+extern ULONG MmSharedCommit;
+extern ULONG MmHeapSegmentReserve;
+extern ULONG MmHeapSegmentCommit;
+extern ULONG MmHeapDeCommitTotalFreeThreshold;
+extern ULONG MmHeapDeCommitFreeBlockThreshold;
+extern ULONG MmProductType;
+
+extern ULONG MmWorkingSetReductionMax;
+
+extern MM_SYSTEMSIZE MmSystemSize;
+
+ULONG MmProcessCommit;
+
+ULONG MmKernelStackPages;
+ULONG MmKernelStackResident;
+ULONG MmLargeStacks;
+ULONG MmSmallStacks;
+
+MMPTE KernelDemandZeroPte = {MM_KERNEL_DEMAND_ZERO_PTE};
+
+CCHAR MmRotatingUniprocessorNumber;
+
+ULONG
+MiMakeOutswappedPageResident (
+ IN PMMPTE ActualPteAddress,
+ IN PMMPTE PointerTempPte,
+ IN ULONG Global,
+ IN ULONG ContainingPage,
+ OUT PULONG ActiveTransition
+ );
+
+
+
+PVOID
+MiCreatePebOrTeb (
+ IN PEPROCESS TargetProcess,
+ IN ULONG Size
+ );
+
+VOID
+MiDeleteAddressesInWorkingSet (
+ IN PEPROCESS Process
+ );
+
+VOID
+MiDeleteValidAddress (
+ IN PVOID Va,
+ IN PEPROCESS CurrentProcess
+ );
+
+VOID
+MiDeleteFreeVm (
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress
+ );
+
+VOID
+VadTreeWalk (
+ IN PMMVAD Start
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,MmCreateTeb)
+#pragma alloc_text(PAGE,MmCreatePeb)
+#pragma alloc_text(PAGE,MiCreatePebOrTeb)
+#pragma alloc_text(PAGE,MmDeleteTeb)
+#endif
+
+
+BOOLEAN
+MmCreateProcessAddressSpace (
+ IN ULONG MinimumWorkingSetSize,
+ IN PEPROCESS NewProcess,
+ OUT PULONG DirectoryTableBase
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates an address space which maps the system
+ portion and contains a hyper space entry.
+
+Arguments:
+
+ MinimumWorkingSetSize - Supplies the minimum working set size for
+ this address space. This value is only used
+ to ensure that ample physical pages exist
+ to create this process.
+
+ NewProcess - Supplies a pointer to the process object being created.
+
+ DirectoryTableBase - Returns the value of the newly created
+ address space's Page Directory (PD) page and
+ hyper space page.
+
+Return Value:
+
+ Returns TRUE if an address space was successfully created, FALSE
+ if ample physical pages do not exist.
+
+Environment:
+
+ Kernel mode. APC's Disabled.
+
+--*/
+
+{
+ ULONG PageDirectoryIndex;
+ PMMPTE PointerPte;
+ ULONG HyperSpaceIndex;
+ ULONG PageContainingWorkingSet;
+ MMPTE TempPte;
+ PMMPTE LastPte;
+ PMMPTE PointerFillPte;
+ PMMPTE CurrentAddressSpacePde;
+ PEPROCESS CurrentProcess;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+ ULONG Color;
+
+ //
+ // Get the PFN LOCK to prevent another thread in this
+ // process from using hyper space and to get physical pages.
+ //
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Charge 3 pages of commitment for the page directory page,
+ // working set page table page, and working set list.
+ //
+
+ try {
+ MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return FALSE;
+ }
+
+ NewProcess->NextPageColor = (USHORT)(RtlRandom(&MmProcessColorSeed));
+ KeInitializeSpinLock (&NewProcess->HyperSpaceLock);
+ Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE,
+ &CurrentProcess->NextPageColor);
+
+ LOCK_WS (CurrentProcess);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Check to make sure the phyiscal pages are available.
+ //
+
+ if (MmResidentAvailablePages <= (LONG)MinimumWorkingSetSize) {
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_WS (CurrentProcess);
+ MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);
+
+ //
+ // Indicate no directory base was allocated.
+ //
+
+ return FALSE;
+ }
+
+ MmResidentAvailablePages -= MinimumWorkingSetSize;
+ MmProcessCommit += MM_PROCESS_COMMIT_CHARGE;
+
+ MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
+
+ PageDirectoryIndex = MiRemoveZeroPageIfAny (Color);
+ if (PageDirectoryIndex == 0) {
+ PageDirectoryIndex = MiRemoveAnyPage (Color);
+ UNLOCK_PFN (OldIrql);
+ MiZeroPhysicalPage (PageDirectoryIndex, Color);
+ LOCK_PFN (OldIrql);
+ }
+
+ INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[0], PageDirectoryIndex);
+
+ MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
+
+ Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE),
+ &CurrentProcess->NextPageColor);
+
+ HyperSpaceIndex = MiRemoveZeroPageIfAny (Color);
+ if (HyperSpaceIndex == 0) {
+ HyperSpaceIndex = MiRemoveAnyPage (Color);
+ UNLOCK_PFN (OldIrql);
+ MiZeroPhysicalPage (HyperSpaceIndex, Color);
+ LOCK_PFN (OldIrql);
+ }
+
+ INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[1], HyperSpaceIndex);
+
+ //
+ // Remove page for the working set list.
+ //
+
+ MiEnsureAvailablePageOrWait (CurrentProcess, NULL);
+
+ Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList,
+ &CurrentProcess->NextPageColor);
+
+ PageContainingWorkingSet = MiRemoveZeroPageIfAny (Color);
+ if (PageContainingWorkingSet == 0) {
+ PageContainingWorkingSet = MiRemoveAnyPage (Color);
+ UNLOCK_PFN (OldIrql);
+ MiZeroPhysicalPage (PageContainingWorkingSet, Color);
+ LOCK_PFN (OldIrql);
+ }
+
+ //
+ // Release the PFN mutex as the needed pages have been allocated.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ NewProcess->WorkingSetPage = PageContainingWorkingSet;
+
+ //
+ // Initialize the page reserved for hyper space.
+ //
+
+ MI_INITIALIZE_HYPERSPACE_MAP (HyperSpaceIndex);
+
+ //
+ // Set the PTE address in the PFN for hyper space mapping.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex);
+
+ ASSERT (Pfn1->u3.e1.PageColor == 0);
+
+ Pfn1->PteAddress = (PMMPTE)PDE_BASE;
+
+
+ TempPte = ValidPdePde;
+
+ TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex;
+ MI_SET_GLOBAL_STATE (TempPte, 0);
+#ifdef R4000
+ TempPte.u.Hard.Write = 1;
+#endif
+
+ //
+ // Map in page table page for hyper space.
+ //
+
+ PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageDirectoryIndex, &OldIrql);
+ PointerPte[MiGetPdeOffset(HYPER_SPACE)] = TempPte;
+
+ //
+ // Map in the page directory page so it points to itself.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex;
+
+ PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte;
+
+ //
+ // Map in the non paged portion of the system.
+ //
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+
+ PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_SPACE_START)];
+ CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_SPACE_START);
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ ((1 + (MiGetPdeAddress(MM_SYSTEM_SPACE_END) -
+ MiGetPdeAddress(MM_SYSTEM_SPACE_START))) * sizeof(MMPTE)));
+
+#if defined(MM_SYSTEM_CACHE_AND_POOL_DISJOINT)
+ PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_CACHE_WORKING_SET)];
+ CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_CACHE_WORKING_SET);
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ (((1 + (MiGetPdeAddress(MM_SYSTEM_CACHE_END) -
+ CurrentAddressSpacePde))) * sizeof(MMPTE)));
+#endif
+
+#if defined(MM_BOOT_CODE_PAGEABLE)
+ PointerFillPte = &PointerPte[MiGetPdeOffset(MM_BOOT_CODE_START)];
+ CurrentAddressSpacePde = MiGetPdeAddress(MM_BOOT_CODE_START);
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ ((1 + (MiGetPdeAddress(MM_BOOT_CODE_END) -
+ MiGetPdeAddress(MM_BOOT_CODE_START))) * sizeof(MMPTE)));
+#endif
+
+#else // the following is for x86 only
+
+ PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START)];
+ CurrentAddressSpacePde = MiGetPdeAddress(CODE_START);
+
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ (((1 + CODE_END) - CODE_START) / MM_VA_MAPPED_BY_PDE));
+
+
+ LastPte = &PointerPte[MiGetPdeOffset(NON_PAGED_SYSTEM_END)];
+ PointerFillPte = &PointerPte[MiGetPdeOffset(MmNonPagedSystemStart)];
+ CurrentAddressSpacePde = MiGetPdeAddress(MmNonPagedSystemStart);
+
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ ((1 + (MiGetPdeAddress(NON_PAGED_SYSTEM_END) -
+ CurrentAddressSpacePde))) * sizeof(MMPTE));
+
+ //
+ // Map in the system cache page table pages.
+ //
+
+ LastPte = &PointerPte[MiGetPdeOffset(MmSystemCacheEnd)];
+ PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_CACHE_WORKING_SET)];
+ CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_CACHE_WORKING_SET);
+
+ RtlCopyMemory (PointerFillPte,
+ CurrentAddressSpacePde,
+ ((1 + (MiGetPdeAddress(MmSystemCacheEnd) -
+ CurrentAddressSpacePde))) * sizeof(MMPTE));
+
+
+#endif // else _MIPS_ || _ALPHA_ || _PPC_
+
+ MiUnmapPageInHyperSpace (OldIrql);
+
+ //
+ // Release working set mutex and lower IRQL.
+ //
+
+ UNLOCK_WS (CurrentProcess);
+
+ return TRUE;
+}
+
+NTSTATUS
+MmInitializeProcessAddressSpace (
+ IN PEPROCESS ProcessToInitialize,
+ IN PEPROCESS ProcessToClone OPTIONAL,
+ IN PVOID SectionToMap OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This routine initializes the working set and mutexes within an
+ newly created address space to support paging.
+
+ No page faults may occur in a new process until this routine is
+ completed.
+
+Arguments:
+
+ ProcessToInitialize - Supplies a pointer to the process to initialize.
+
+ ProcessToClone - Optionally supplies a pointer to the process whose
+ address space should be copied into the
+ ProcessToInitialize address space.
+
+ SectionToMap - Optionally supplies a section to map into the newly
+ intialized address space.
+
+ Only one of ProcessToClone and SectionToMap may be specified.
+
+
+Return Value:
+
+ None.
+
+
+Environment:
+
+ Kernel mode. APC's Disabled.
+
+--*/
+
+
+{
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ PVOID BaseAddress;
+ ULONG ViewSize;
+ KIRQL OldIrql;
+ NTSTATUS Status;
+ ULONG PdePhysicalPage;
+ ULONG PageContainingWorkingSet;
+ LARGE_INTEGER SectionOffset;
+
+ //
+ // Initialize Working Set Mutex in process header.
+ //
+
+ KeAttachProcess (&ProcessToInitialize->Pcb);
+ ProcessToInitialize->AddressSpaceInitialized = TRUE;
+
+ ExInitializeFastMutex(&ProcessToInitialize->AddressCreationLock);
+
+ ExInitializeFastMutex(&ProcessToInitialize->WorkingSetLock);
+
+ //
+ // NOTE: The process block has been zeroed when allocated, so
+ // there is no need to zero fields and set pointers to NULL.
+ //
+ //
+
+ ASSERT (ProcessToInitialize->VadRoot == NULL);
+
+ KeQuerySystemTime(&ProcessToInitialize->Vm.LastTrimTime);
+ ProcessToInitialize->Vm.VmWorkingSetList = MmWorkingSetList;
+
+ //
+ // Obtain a page to map the working set and initialize the
+ // working set. Get PFN mutex to allocate physical pages.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Initialize the PFN database for the Page Directory and the
+ // PDE which maps hyper space.
+ //
+
+ PointerPte = MiGetPteAddress (PDE_BASE);
+ PdePhysicalPage = PointerPte->u.Hard.PageFrameNumber;
+
+ MiInitializePfn (PdePhysicalPage, PointerPte, 1);
+
+ PointerPte = MiGetPdeAddress (HYPER_SPACE);
+ MiInitializePfn (PointerPte->u.Hard.PageFrameNumber, PointerPte, 1);
+
+ PageContainingWorkingSet = ProcessToInitialize->WorkingSetPage;
+
+ PointerPte = MiGetPteAddress (MmWorkingSetList);
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+
+ MiInitializePfn (PageContainingWorkingSet, PointerPte, 1);
+
+ UNLOCK_PFN (OldIrql);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageContainingWorkingSet,
+ MM_READWRITE,
+ PointerPte );
+
+ MI_SET_PTE_DIRTY (TempPte);
+ *PointerPte = TempPte;
+
+ MiInitializeWorkingSetList (ProcessToInitialize);
+
+ //
+ // Page faults may be taken now.
+ //
+
+ if (SectionToMap != (PSECTION)NULL) {
+#if DEVL
+ UNICODE_STRING UnicodeString;
+ ULONG n;
+ PWSTR Src;
+ PCHAR Dst;
+
+ UnicodeString = ((PSECTION)SectionToMap)->Segment->ControlArea->FilePointer->FileName;
+ Src = (PWSTR)((PCHAR)UnicodeString.Buffer + UnicodeString.Length);
+ n = 0;
+ if (UnicodeString.Buffer != NULL) {
+ while (Src > UnicodeString.Buffer) {
+ if (*--Src == OBJ_NAME_PATH_SEPARATOR) {
+ Src++;
+ break;
+ }
+ else {
+ n += 1;
+ }
+ }
+ }
+ Dst = ProcessToInitialize->ImageFileName;
+ if (n >= sizeof( ProcessToInitialize->ImageFileName )) {
+ n = sizeof( ProcessToInitialize->ImageFileName ) - 1;
+ }
+
+ while (n--) {
+ *Dst++ = (UCHAR)*Src++;
+ }
+ *Dst = '\0';
+#endif // DEVL
+
+ ProcessToInitialize->SubSystemMajorVersion = (UCHAR)((PSECTION)SectionToMap)->Segment->ImageInformation.SubSystemMajorVersion;
+ ProcessToInitialize->SubSystemMinorVersion = (UCHAR)((PSECTION)SectionToMap)->Segment->ImageInformation.SubSystemMinorVersion;
+
+ //
+ // Map the specified section into the address space of the
+ // process but only if it is an image section
+ //
+
+ if (!((PSECTION)SectionToMap)->u.Flags.Image) {
+ Status = STATUS_SECTION_NOT_IMAGE;
+ } else {
+ BaseAddress = NULL;
+ ViewSize = 0;
+ ZERO_LARGE (SectionOffset);
+
+ Status = MmMapViewOfSection ( (PSECTION)SectionToMap,
+ ProcessToInitialize,
+ &BaseAddress,
+ 0, // ZeroBits,
+ 0, // CommitSize,
+ &SectionOffset, //SectionOffset,
+ &ViewSize,
+ ViewShare, //InheritDisposition,
+ 0, //allocation type
+ PAGE_READWRITE // Protect
+ );
+
+ ProcessToInitialize->SectionBaseAddress = BaseAddress;
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ DbgPrint("mapped image section vads\n");
+ VadTreeWalk(ProcessToInitialize->VadRoot);
+ }
+#endif //DBG
+ }
+
+ KeDetachProcess ();
+ return Status;
+ }
+
+ if (ProcessToClone != (PEPROCESS)NULL) {
+#if DEVL
+ strcpy( ProcessToInitialize->ImageFileName, ProcessToClone->ImageFileName );
+#endif // DEVL
+
+ //
+ // Clone the address space of the specified process.
+ //
+
+ //
+ // As the page directory and page tables are private to each
+ // process, the physical pages which map the directory page
+ // and the page table usage must be mapped into system space
+ // so they can be updated while in the context of the process
+ // we are cloning.
+ //
+
+ KeDetachProcess ();
+ return MiCloneProcessAddressSpace (ProcessToClone,
+ ProcessToInitialize,
+ PdePhysicalPage,
+ PageContainingWorkingSet
+ );
+
+ }
+
+ //
+ // System Process.
+ //
+
+ KeDetachProcess ();
+ return STATUS_SUCCESS;
+}
+
+VOID
+MmDeleteProcessAddressSpace (
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes a process's Page Directory and working set page.
+
+Arguments:
+
+ Process - Supplies a pointer to the deleted process.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. APC's Disabled.
+
+--*/
+
+{
+
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ ULONG PageFrameIndex;
+
+ //
+ // Return commitment.
+ //
+
+ MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);
+ ASSERT (Process->CommitCharge == 0);
+
+ //
+ // Remove working set list page from the deleted process.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (Process->WorkingSetPage);
+
+ LOCK_PFN (OldIrql);
+ MmProcessCommit -= MM_PROCESS_COMMIT_CHARGE;
+
+ if (Process->AddressSpaceInitialized) {
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MiDecrementShareCountOnly (Process->WorkingSetPage);
+
+ ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
+
+ //
+ // Remove hyper space page table page from deleted process.
+ //
+
+ PageFrameIndex =
+ ((PHARDWARE_PTE)(&(Process->Pcb.DirectoryTableBase[1])))->PageFrameNumber;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MiDecrementShareCountOnly (PageFrameIndex);
+ ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
+
+ //
+ // Remove page directory page.
+ //
+
+ PageFrameIndex =
+ ((PHARDWARE_PTE)(&(Process->Pcb.DirectoryTableBase[0])))->PageFrameNumber;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareAndValidCount (PageFrameIndex);
+ MiDecrementShareCountOnly (PageFrameIndex);
+
+ ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress));
+
+ } else {
+
+ //
+ // Process initialization never completed, just return the pages
+ // to the free list.
+ //
+
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ Process->WorkingSetPage);
+
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ ((PHARDWARE_PTE)(&(Process->Pcb.DirectoryTableBase[1])))->PageFrameNumber);
+
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ ((PHARDWARE_PTE)(&(Process->Pcb.DirectoryTableBase[0])))->PageFrameNumber);
+ }
+
+ MmResidentAvailablePages += MM_PROCESS_CREATE_CHARGE;
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Check to see if the paging files should be contracted.
+ //
+
+ MiContractPagingFiles ();
+
+ return;
+}
+
+VOID
+MmCleanProcessAddressSpace (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine cleans an address space by deleting all the
+ user and pageable portion of the address space. At the
+ completion of this routine, no page faults may occur within
+ the process.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PEPROCESS Process;
+ PMMVAD Vad;
+ KEVENT Event;
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ PMMPTE PointerPte;
+ PVOID TempVa;
+ LONG AboveWsMin;
+ MMPTE_FLUSH_LIST PteFlushList;
+
+ PteFlushList.Count = 0;
+ Process = PsGetCurrentProcess();
+ if ((Process->AddressSpaceDeleted != 0) ||
+ (Process->AddressSpaceInitialized == FALSE)) {
+
+ //
+ // This process's address space has already been deleted.
+ //
+
+ return;
+ }
+
+ //
+ // If working set expansion for this process is allowed, disable
+ // it and remove the process from expanded process list if it
+ // is on it.
+ //
+
+ LOCK_EXPANSION (OldIrql);
+
+ if (Process->Vm.WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION) {
+
+ //
+ // Check to see if trimming is in progress.
+ //
+
+ if (Process->Vm.WorkingSetExpansionLinks.Blink ==
+ MM_WS_EXPANSION_IN_PROGRESS) {
+
+ //
+ // Initialize an event and put the event address
+ // in the blink field. When the trimming is complete,
+ // this event will be set.
+ //
+
+ KeInitializeEvent(&Event, NotificationEvent, FALSE);
+
+ Process->Vm.WorkingSetExpansionLinks.Blink = (PLIST_ENTRY)&Event;
+
+ //
+ // Release the mutex and wait for the event.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_EXPANSION_AND_THEN_WAIT (OldIrql);
+
+ KeWaitForSingleObject(&Event,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ } else {
+
+ //
+ // No expansion is allowed already, therefore it is not on the list.
+ //
+
+ UNLOCK_EXPANSION (OldIrql);
+ }
+ } else {
+
+ RemoveEntryList (&Process->Vm.WorkingSetExpansionLinks);
+
+ //
+ // Disable expansion.
+ //
+
+ Process->Vm.WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION;
+
+ //
+ // Release the pfn mutex.
+ //
+
+ UNLOCK_EXPANSION (OldIrql);
+ }
+
+ //
+ // Delete all the user owned pagable virtual addresses in the process.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Synchonize address space delete with NtReadVirtualMemory and
+ // NtWriteVirtualMemory.
+ //
+
+ ExAcquireSpinLock (&MmSystemSpaceLock, &OldIrql);
+ Process->AddressSpaceDeleted = 1;
+ if ( Process->VmOperation != 0) {
+
+ //
+ // A Vm operation is in progress, set the event and
+ // indicate this process is being deleted to stop other
+ // vm operations.
+ //
+
+ KeInitializeEvent(&Event, NotificationEvent, FALSE);
+ Process->VmOperationEvent = &Event;
+
+ do {
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ KeWaitForSingleObject(&Event,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Synchonize address space delete with NtReadVirtualMemory and
+ // NtWriteVirtualMemory.
+ //
+
+ ExAcquireSpinLock (&MmSystemSpaceLock, &OldIrql);
+
+ } while (Process->VmOperation != 0);
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ } else {
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ }
+
+ //
+ // Delete all the valid user mode addresses from the working set
+ // list. At this point NO page faults are allowed on user space
+ // addresses.
+ //
+
+ MiDeleteAddressesInWorkingSet (Process);
+
+ //
+ // Delete the virtual address descriptors and dereference any
+ // section objects.
+ //
+
+ Vad = Process->VadRoot;
+
+ while (Vad != (PMMVAD)NULL) {
+
+ MiRemoveVad (Vad);
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != NULL)) {
+
+ //
+ // This Vad represents a mapped view - delete the
+ // view and perform any section related cleanup
+ // operations.
+ //
+
+ MiRemoveMappedView (Process, Vad);
+
+ } else {
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Don't specify address space deletion as TRUE as
+ // the working set must be consistant as page faults may
+ // be taken during clone removal, protoPTE lookup, etc.
+ //
+
+ MiDeleteVirtualAddresses (Vad->StartingVa,
+ Vad->EndingVa,
+ FALSE,
+ Vad);
+
+ UNLOCK_PFN (OldIrql);
+ }
+
+ ExFreePool (Vad);
+ Vad = Process->VadRoot;
+ }
+
+ //
+ // Delete the shared data page, if any.
+ //
+
+ LOCK_PFN (OldIrql);
+
+#if defined(MM_SHARED_USER_DATA_VA)
+ MiDeleteVirtualAddresses ((PVOID) MM_SHARED_USER_DATA_VA,
+ (PVOID) MM_SHARED_USER_DATA_VA,
+ FALSE,
+ NULL);
+#endif
+
+ //
+ // Delete the system portion of the address space.
+ //
+
+ LOCK_EXPANSION_IF_ALPHA (OldIrql2);
+ Process->Vm.AddressSpaceBeingDeleted = TRUE;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql2);
+
+ //
+ // Adjust the count of pages above working set maximum. This
+ // must be done here because the working set list is not
+ // updated during this deletion.
+ //
+
+ AboveWsMin = (LONG)Process->Vm.WorkingSetSize - (LONG)Process->Vm.MinimumWorkingSetSize;
+ if (AboveWsMin > 0) {
+ MmPagesAboveWsMinimum -= AboveWsMin;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Return commitment for page table pages.
+ //
+
+ MiReturnCommitment (MmWorkingSetList->NumberOfCommittedPageTables);
+ PsGetCurrentProcess()->CommitCharge -=
+ MmWorkingSetList->NumberOfCommittedPageTables;
+
+ //
+ // Check to make sure all the clone descriptors went away.
+ //
+
+ ASSERT (Process->CloneRoot == (PMMCLONE_DESCRIPTOR)NULL);
+
+#if DBG
+ if (Process->NumberOfLockedPages != 0) {
+ KdPrint(("number of locked pages is not zero - %lx",
+ Process->NumberOfLockedPages));
+ KeBugCheckEx (PROCESS_HAS_LOCKED_PAGES,
+ (ULONG)Process,
+ Process->NumberOfLockedPages,
+ Process->NumberOfPrivatePages,
+ 0);
+ return;
+ }
+#endif //DBG
+
+#if DBG
+ if ((Process->NumberOfPrivatePages != 0) && (MmDebug & MM_DBG_PRIVATE_PAGES)) {
+ DbgPrint("MM: Process contains private pages %ld\n",
+ Process->NumberOfPrivatePages);
+ DbgBreakPoint();
+ }
+#endif //DBG
+
+ //
+ // Remove the working set list pages (except for the first one).
+ // These pages are not removed because DPCs could still occur within
+ // the address space. In a DPC, nonpagedpool could be allocated
+ // which could require removing a page from the standby list, requiring
+ // hyperspace to map the previous PTE.
+ //
+
+ PointerPte = MiGetPteAddress (MmWorkingSetList) + 1;
+
+ PteFlushList.Count = 0;
+
+ LOCK_PFN (OldIrql)
+ while (PointerPte->u.Hard.Valid) {
+ TempVa = MiGetVirtualAddressMappedByPte(PointerPte);
+ MiDeletePte (PointerPte,
+ TempVa,
+ TRUE,
+ Process,
+ NULL,
+ &PteFlushList);
+
+ PointerPte += 1;
+ }
+
+ //
+ // Remove hash table pages, if any.
+ //
+
+ PointerPte = MiGetPteAddress (&MmWsle[MM_MAXIMUM_WORKING_SET]) + 1;
+
+ while (PointerPte->u.Hard.Valid) {
+ TempVa = MiGetVirtualAddressMappedByPte(PointerPte);
+ MiDeletePte (PointerPte,
+ TempVa,
+ TRUE,
+ Process,
+ NULL,
+ &PteFlushList);
+
+ PointerPte += 1;
+ }
+
+ MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ ASSERT (Process->Vm.MinimumWorkingSetSize >= MM_PROCESS_CREATE_CHARGE);
+ MmResidentAvailablePages += Process->Vm.MinimumWorkingSetSize -
+ MM_PROCESS_CREATE_CHARGE;
+ ASSERT (Process->Vm.WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION);
+ UNLOCK_PFN (OldIrql);
+
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+ return;
+}
+
+
+#if DBG
+typedef struct _MMKSTACK {
+ PMMPFN Pfn;
+ PMMPTE Pte;
+} MMKSTACK, *PMMKSTACK;
+MMKSTACK MmKstacks[10];
+#endif //DBG
+
+PVOID
+MmCreateKernelStack (
+ IN BOOLEAN LargeStack
+ )
+
+/*++
+
+Routine Description:
+
+ This routine allocates a kernel stack and a no-access page within
+ the non-pagable portion of the system address space.
+
+Arguments:
+
+ LargeStack - Supplies the value TRUE if a large stack should be
+ created. FALSE if a small stack is to be created.
+
+Return Value:
+
+ Returns a pointer to the base of the kernel stack. Note, that the
+ base address points to the guard page, so space must be allocated
+ on the stack before accessing the stack.
+
+ If a kernel stack cannot be created, the value NULL is returned.
+
+Environment:
+
+ Kernel mode. APC's Disabled.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ ULONG NumberOfPages;
+ ULONG NumberOfPtes;
+ ULONG PageFrameIndex;
+ ULONG i;
+ PVOID StackVa;
+ KIRQL OldIrql;
+
+ //
+ // Acquire the PFN mutex to synchronize access to the dead stack
+ // list and to the pfn database.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Check to see if any "unused" stacks are available.
+ //
+
+ if ((!LargeStack) && (MmNumberDeadKernelStacks != 0)) {
+
+#if DBG
+ {
+ ULONG i = MmNumberDeadKernelStacks;
+ PMMPFN PfnList = MmFirstDeadKernelStack;
+
+ while (i > 0) {
+ i--;
+ if ((PfnList != MmKstacks[i].Pfn) ||
+ (PfnList->PteAddress != MmKstacks[i].Pte)) {
+ DbgPrint("MMPROCSUP: kstacks %lx %ld. %lx\n",
+ PfnList, i, MmKstacks[i].Pfn);
+ DbgBreakPoint();
+ }
+ PfnList = PfnList->u1.NextStackPfn;
+ }
+ }
+ NumberOfPages = BYTES_TO_PAGES (KERNEL_STACK_SIZE);
+#endif //DBG
+
+ MmNumberDeadKernelStacks -= 1;
+ PointerPte = MmFirstDeadKernelStack->PteAddress;
+ MmFirstDeadKernelStack = MmFirstDeadKernelStack->u1.NextStackPfn;
+
+ } else {
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Make sure there are at least 100 free system PTEs.
+ //
+
+ if (MmTotalFreeSystemPtes[SystemPteSpace] < 100) {
+ return NULL;
+ }
+
+ if (LargeStack) {
+ NumberOfPtes = BYTES_TO_PAGES (KERNEL_LARGE_STACK_SIZE);
+ NumberOfPages = BYTES_TO_PAGES (KERNEL_LARGE_STACK_COMMIT);
+ } else {
+ NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE);
+ NumberOfPages = NumberOfPtes;
+ }
+
+ //
+ // Charge commitment for the page file space for the kernel stack.
+ //
+
+ try {
+
+ MiChargeCommitment (NumberOfPtes, NULL);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Commitment exceeded, return NULL, indicating no kernel
+ // stacks are available.
+ //
+
+ return NULL;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Obtain enough pages to contain the stack plus a guard page from
+ // the system PTE pool. The system PTE pool contains non-paged PTEs
+ // which are currently empty.
+ //
+
+
+ //
+ // Check to make sure the phyiscal pages are available.
+ //
+
+ if (MmResidentAvailablePages <= (LONG)NumberOfPages) {
+ UNLOCK_PFN (OldIrql);
+ MiReturnCommitment (NumberOfPtes);
+ return NULL;
+ }
+
+ MmKernelStackPages += NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0);
+ PointerPte = MiReserveSystemPtes (NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0),
+ SystemPteSpace,
+ MM_STACK_ALIGNMENT,
+ MM_STACK_OFFSET,
+ FALSE);
+ if (PointerPte == NULL) {
+ UNLOCK_PFN (OldIrql);
+ MiReturnCommitment (NumberOfPages);
+ return NULL;
+ }
+
+ MmResidentAvailablePages -= NumberOfPages;
+ PointerPte += (NumberOfPtes - NumberOfPages);
+
+ for (i=0; i < NumberOfPages; i++) {
+ PointerPte += 1;
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ PointerPte->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+//fixfix (see mmfault as well.)
+ PointerPte->u.Soft.Protection = 31;
+// end fixfix
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ MM_READWRITE,
+ PointerPte );
+ MI_SET_PTE_DIRTY (TempPte);
+
+ *PointerPte = TempPte;
+ }
+ MmProcessCommit += NumberOfPtes;
+ MmKernelStackResident += NumberOfPages;
+ MmLargeStacks += LargeStack;
+ MmSmallStacks += !LargeStack;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ PointerPte += 1;
+ StackVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+#if DBG
+ {
+ PULONG p;
+ ULONG i;
+
+ p = (PULONG)((ULONG)StackVa - (NumberOfPages * PAGE_SIZE));
+ i = (NumberOfPages * PAGE_SIZE) >> 2;
+ while(i--) {
+ *p++ = 0x12345678;
+ }
+
+ }
+#endif // DBG
+
+ return StackVa;
+}
+
+VOID
+MmDeleteKernelStack (
+ IN PVOID PointerKernelStack,
+ IN BOOLEAN LargeStack
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes a kernel stack and the no-access page within
+ the non-pagable portion of the system address space.
+
+Arguments:
+
+ PointerKernelStack - Supplies a pointer to the base of the kernel stack.
+
+ LargeStack - Supplies the value TRUE if a large stack is being deleted.
+ FALSE if a small stack is to be deleted.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. APC's Disabled.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ ULONG NumberOfPages = 0;
+ ULONG NumberOfPtes;
+ ULONG PageFrameIndex;
+ ULONG i;
+ KIRQL OldIrql;
+ MMPTE PteContents;
+
+ if (LargeStack) {
+ NumberOfPtes = BYTES_TO_PAGES (KERNEL_LARGE_STACK_SIZE);
+ } else {
+ NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE);
+ }
+
+ PointerPte = MiGetPteAddress (PointerKernelStack);
+
+ //
+ // PointerPte points to the guard page, point to the previous
+ // page before removing physical pages.
+ //
+
+ PointerPte -= 1;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Check to see if the stack page should be placed on the dead
+ // kernel stack page list. The dead kernel stack list is a
+ // singly linked list of kernel stacks from terminated threads.
+ // The stacks are saved on a linked list up to a maximum number
+ // to avoid the overhead of flushing the entire TB on all processors
+ // everytime a thread terminates. The TB on all processors must
+ // be flushed as kernel stacks reside in the non paged system part
+ // of the address space.
+ //
+
+ if ((!LargeStack) &&
+ (MmNumberDeadKernelStacks < MmMaximumDeadKernelStacks)) {
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+#if DBG
+ {
+ ULONG i = MmNumberDeadKernelStacks;
+ PMMPFN PfnList = MmFirstDeadKernelStack;
+
+ while (i > 0) {
+ i--;
+ if ((PfnList != MmKstacks[i].Pfn) ||
+ (PfnList->PteAddress != MmKstacks[i].Pte)) {
+ DbgPrint("MMPROCSUP: kstacks %lx %ld. %lx\n",
+ PfnList, i, MmKstacks[i].Pfn);
+ DbgBreakPoint();
+ }
+ PfnList = PfnList->u1.NextStackPfn;
+ }
+ MmKstacks[MmNumberDeadKernelStacks].Pte = Pfn1->PteAddress;
+ MmKstacks[MmNumberDeadKernelStacks].Pfn = Pfn1;
+ }
+#endif //DBG
+
+ MmNumberDeadKernelStacks += 1;
+ Pfn1->u1.NextStackPfn = MmFirstDeadKernelStack;
+ MmFirstDeadKernelStack = Pfn1;
+
+ UNLOCK_PFN (OldIrql);
+
+ return;
+ }
+
+ //
+ // We have exceeded the limit of dead kernel stacks or this is a large
+ // stack, delete this kernel stack.
+ //
+
+ for (i=0; i < NumberOfPtes; i++) {
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+ PageFrameIndex = PteContents.u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (PteContents.u.Hard.PageFrameNumber);
+ NumberOfPages += 1;
+ }
+ PointerPte -= 1;
+ }
+
+ MmKernelStackPages -= NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0);
+ MiReleaseSystemPtes (PointerPte,
+ NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0),
+ SystemPteSpace);
+
+ //
+ // Update the count of available resident pages.
+ //
+
+ MmKernelStackResident -= NumberOfPages;
+ MmResidentAvailablePages += NumberOfPages;
+ MmProcessCommit -= NumberOfPtes;
+
+ MmLargeStacks -= LargeStack;
+ MmSmallStacks -= !LargeStack;
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Return commitment.
+ //
+
+ MiReturnCommitment (NumberOfPtes);
+
+ return;
+}
+
+
+NTSTATUS
+MmGrowKernelStack (
+ IN PVOID CurrentStack
+ )
+
+/*++
+
+Routine Description:
+
+ This function attempts to grows the current thread's kernel stack
+ such that there is always KERNEL_LARGE_STACK_COMMIT bytes below
+ the current stack pointer.
+
+Arguments:
+
+ CurrentStack - Supplies a pointer to the current stack pointer.
+
+Return Value:
+
+ STATUS_SUCCESS is returned if the stack was grown,
+ STATUS_STACK_OVERFLOW is returned if there was not enough space reserved
+ for the commitment.
+
+--*/
+
+{
+ PMMPTE NewLimit;
+ PMMPTE StackLimit;
+ PMMPTE EndStack;
+ PETHREAD Thread;
+ ULONG NumberOfPages = 0;
+ KIRQL OldIrql;
+ ULONG PageFrameIndex;
+ MMPTE TempPte;
+
+ Thread = PsGetCurrentThread ();
+ ASSERT (((PCHAR)Thread->Tcb.StackBase - (PCHAR)Thread->Tcb.StackLimit) <=
+ (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
+ NewLimit = MiGetPteAddress ((PVOID)((PUCHAR)CurrentStack -
+ KERNEL_LARGE_STACK_COMMIT));
+
+ StackLimit = MiGetPteAddress (Thread->Tcb.StackLimit);
+
+ //
+ // If the new stack limit is exceeds the reserved region for the kernel
+ // stack, then return an error.
+ //
+
+ EndStack = MiGetPteAddress ((PVOID)((PUCHAR)Thread->Tcb.StackBase -
+ KERNEL_LARGE_STACK_SIZE));
+
+ if (NewLimit < EndStack) {
+
+ //
+ // Don't go into guard page.
+ //
+
+ return STATUS_STACK_OVERFLOW;
+
+ }
+
+ ASSERT (StackLimit->u.Hard.Valid == 1);
+
+ //
+ // Lock the PFN database and attempt to expand the kernel stack.
+ //
+
+ StackLimit -= 1;
+
+ LOCK_PFN (OldIrql);
+
+ while (StackLimit >= NewLimit) {
+
+ ASSERT (StackLimit->u.Hard.Valid == 0);
+
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (StackLimit));
+ StackLimit->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+//fixfix (see mmfault as well.)
+ StackLimit->u.Soft.Protection = 31;
+// end fixfix
+ MiInitializePfn (PageFrameIndex, StackLimit, 1);
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ MM_READWRITE,
+ StackLimit );
+
+ MI_SET_PTE_DIRTY (TempPte);
+ *StackLimit = TempPte;
+ NumberOfPages += 1;
+ StackLimit -= 1;
+ }
+
+ MmKernelStackResident += NumberOfPages;
+ MmResidentAvailablePages -= NumberOfPages;
+ UNLOCK_PFN (OldIrql);
+
+ ASSERT (NewLimit->u.Hard.Valid == 1);
+ ASSERT ((NewLimit - 1)->u.Hard.Valid == 0);
+ Thread->Tcb.StackLimit = MiGetVirtualAddressMappedByPte (NewLimit);
+
+ return STATUS_SUCCESS;
+}
+
+
+VOID
+MmOutPageKernelStack (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This routine makes the specified kernel stack non-resident and
+ puts the pages on the transition list. Note, that if the
+ CurrentStackPointer is within the first page of the stack, the
+ contents of the second page of the stack is no useful and the
+ page is freed.
+
+Arguments:
+
+ Thread - Supplies a pointer to the thread whose stack should be
+ removed.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+#define MAX_STACK_PAGES (KERNEL_LARGE_STACK_SIZE / PAGE_SIZE)
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE EndOfStackPte;
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+ MMPTE TempPte;
+ PVOID BaseOfKernelStack;
+ PMMPTE FlushPte[MAX_STACK_PAGES];
+ PVOID FlushVa[MAX_STACK_PAGES];
+ MMPTE FlushPteSave[MAX_STACK_PAGES];
+ ULONG StackSize;
+ ULONG Count;
+
+ ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <=
+ (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
+
+ if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) {
+ return;
+ }
+
+ //
+ // The first page of the stack is the page before the base
+ // of the stack.
+ //
+
+ BaseOfKernelStack = (PVOID)((ULONG)Thread->StackBase - PAGE_SIZE);
+ PointerPte = MiGetPteAddress (BaseOfKernelStack);
+ LastPte = MiGetPteAddress ((PULONG)Thread->KernelStack - 1);
+ if (Thread->LargeStack) {
+ StackSize = KERNEL_LARGE_STACK_SIZE >> PAGE_SHIFT;
+ } else {
+ StackSize = KERNEL_STACK_SIZE >> PAGE_SHIFT;
+ }
+ EndOfStackPte = PointerPte - StackSize;
+
+ //
+ // Put a signature at the current stack location - 4.
+ //
+
+ *((PULONG)Thread->KernelStack - 1) = (ULONG)Thread;
+
+ Count = 0;
+
+ LOCK_PFN (OldIrql);
+
+ do {
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ TempPte = *PointerPte;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte, 0);
+//fixfix (see mmfault as well.)
+ TempPte.u.Soft.Protection = 31;
+ {
+ PMMPFN x;
+ x = MI_PFN_ELEMENT(PageFrameIndex);
+ x->OriginalPte.u.Soft.Protection = 31;
+ }
+// end fixfix
+ FlushPteSave[Count] = TempPte;
+ FlushPte[Count] = PointerPte;
+ FlushVa[Count] = BaseOfKernelStack;
+
+ MiDecrementShareCount (PageFrameIndex);
+ PointerPte -= 1;
+ Count += 1;
+ BaseOfKernelStack = (PVOID)((ULONG)BaseOfKernelStack - PAGE_SIZE);
+ } while (PointerPte >= LastPte);
+
+ while (PointerPte != EndOfStackPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ break;
+ }
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (PointerPte->u.Hard.PageFrameNumber);
+
+ FlushPteSave[Count] = KernelDemandZeroPte;
+//fixfix (see mmfault as well.)
+ FlushPteSave[Count].u.Soft.Protection = 31;
+// end fixfix
+ FlushPte[Count] = PointerPte;
+
+ FlushVa[Count] = BaseOfKernelStack;
+ Count += 1;
+ MmResidentAvailablePages += 1;
+
+ PointerPte -= 1;
+ BaseOfKernelStack = (PVOID)((ULONG)BaseOfKernelStack - PAGE_SIZE);
+ }
+
+ ASSERT (Count <= MAX_STACK_PAGES);
+
+ if (Count < MM_MAXIMUM_FLUSH_COUNT) {
+ KeFlushMultipleTb (Count,
+ &FlushVa[0],
+ TRUE,
+ TRUE,
+ &((PHARDWARE_PTE)FlushPte[0]),
+ ZeroPte.u.Flush);
+ } else {
+ KeFlushEntireTb (TRUE, TRUE);
+ }
+
+ //
+ // Increase the available pages by the number of pages that where
+ // deleted and turned into demand zero.
+ //
+
+ MmKernelStackResident -= Count;
+
+ //
+ // Put the right contents back into the PTEs
+ //
+
+ do {
+ Count -= 1;
+ *FlushPte[Count] = FlushPteSave[Count];
+ } while (Count != 0);
+
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MmInPageKernelStack (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This routine makes the specified kernel stack resident.
+
+Arguments:
+
+ Supplies a pointer to the base of the kernel stack.
+
+Return Value:
+
+ Thread - Supplies a pointer to the thread whose stack should be
+ made resident.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PVOID BaseOfKernelStack;
+ PMMPTE PointerPte;
+ PMMPTE EndOfStackPte;
+ ULONG Temp;
+ ULONG ContainingPage;
+ KIRQL OldIrql;
+
+ ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <=
+ (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE));
+
+ if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) {
+ return;
+ }
+
+ //
+ // The first page of the stack is the page before the base
+ // of the stack.
+ //
+
+ if (Thread->LargeStack) {
+ PointerPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->StackLimit));
+
+ EndOfStackPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->InitialStack -
+ KERNEL_LARGE_STACK_COMMIT));
+ //
+ // Trim back the stack. Make sure that the stack does not grow, i.e.
+ // StackLimit remains the limit.
+ //
+
+ if (EndOfStackPte < PointerPte) {
+ EndOfStackPte = PointerPte;
+ }
+ Thread->StackLimit = MiGetVirtualAddressMappedByPte (EndOfStackPte);
+ } else {
+ EndOfStackPte = MiGetPteAddress (Thread->StackLimit);
+ }
+
+ BaseOfKernelStack = (PVOID)((ULONG)Thread->StackBase - PAGE_SIZE);
+ PointerPte = MiGetPteAddress (BaseOfKernelStack);
+
+ LOCK_PFN (OldIrql);
+ while (PointerPte >= EndOfStackPte) {
+
+
+//fixfix (see mmfault as well.)
+ if (!((PointerPte->u.Long == KernelDemandZeroPte.u.Long) ||
+ (PointerPte->u.Soft.Protection == 31))) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x3451,
+ (ULONG)PointerPte,
+ (ULONG)Thread,
+ 0);
+ }
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ if (PointerPte->u.Soft.Protection == 31) {
+ PointerPte->u.Soft.Protection = PAGE_READWRITE;
+ }
+// end fixfix
+ ContainingPage = (MiGetPteAddress (PointerPte))->u.Hard.PageFrameNumber;
+ if (PointerPte->u.Long == MM_KERNEL_DEMAND_ZERO_PTE) {
+ MmResidentAvailablePages -= 1;
+ }
+ MiMakeOutswappedPageResident (PointerPte,
+ PointerPte,
+ 1,
+ ContainingPage,
+ &Temp);
+ PointerPte -= 1;
+ MmKernelStackResident += 1;
+ }
+
+ //
+ // Check the signature at the current stack location - 4.
+ //
+
+ if (*((PULONG)Thread->KernelStack - 1) != (ULONG)Thread) {
+ KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR,
+ 0,
+ *((PULONG)Thread->KernelStack - 1),
+ 0,
+ (ULONG)Thread->KernelStack);
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return;
+}
+
+VOID
+MmOutSwapProcess (
+ IN PKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine out swaps the specified process.
+
+Arguments:
+
+ Process - Supplies a pointer to the process that is swapped out
+ of memory.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ PEPROCESS OutProcess;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ ULONG HyperSpacePageTable;
+ PMMPTE HyperSpacePageTableMap;
+ ULONG PdePage;
+ PMMPTE PageDirectoryMap;
+ ULONG ProcessPage;
+ MMPTE TempPte;
+
+ OutProcess = CONTAINING_RECORD( Process,
+ EPROCESS,
+ Pcb);
+
+ OutProcess->ProcessOutswapEnabled = TRUE;
+
+#if DBG
+ if ((MmDebug & MM_DBG_SWAP_PROCESS) != 0) {
+ return;
+ }
+#endif //DBG
+
+ if ((OutProcess->Vm.WorkingSetSize == 3) &&
+ (OutProcess->Vm.AllowWorkingSetAdjustment)) {
+
+ //
+ // Swap the process working set info and page directory from
+ // memory.
+ //
+
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ ASSERT (OutProcess->ProcessOutswapped == FALSE);
+ OutProcess->ProcessOutswapped = TRUE;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Remove working set list page from the process.
+ //
+
+ HyperSpacePageTable =
+ ((PHARDWARE_PTE)(&(OutProcess->Pcb.DirectoryTableBase[1])))->PageFrameNumber;
+ HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2);
+
+ TempPte = HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)];
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ MM_READWRITE);
+
+ HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)] = TempPte;
+ MiUnmapPageInHyperSpace (OldIrql2);
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT (OutProcess->WorkingSetPage);
+ ASSERT (Pfn1->u3.e1.Modified == 1);
+#endif
+ MiDecrementShareCount (OutProcess->WorkingSetPage);
+
+ //
+ // Remove the hyper space page from the process.
+ //
+
+ PdePage =
+ ((PHARDWARE_PTE)(&(OutProcess->Pcb.DirectoryTableBase[0])))->PageFrameNumber;
+ PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2);
+
+ TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)];
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ MM_READWRITE);
+
+ PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte;
+
+#if DBG
+ Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable);
+ ASSERT (Pfn1->u3.e1.Modified == 1);
+#endif
+
+ MiDecrementShareCount (HyperSpacePageTable);
+
+ //
+ // Remove the page directory page.
+ //
+
+ TempPte = PageDirectoryMap[MiGetPdeOffset(PDE_BASE)];
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ MM_READWRITE);
+
+ PageDirectoryMap[MiGetPdeOffset(PDE_BASE)] = TempPte;
+ MiUnmapPageInHyperSpace (OldIrql2);
+
+ Pfn1 = MI_PFN_ELEMENT (PdePage);
+
+ //
+ // Decrement share count so page directory page gets removed.
+ // This can cause the PteCount to equal the sharecount as the
+ // page directory page no longer contains itself, yet can have
+ // itself as a transition page.
+ //
+
+ Pfn1->u2.ShareCount -= 2;
+ Pfn1->PteAddress = (PMMPTE)&OutProcess->PageDirectoryPte;
+
+ OutProcess->PageDirectoryPte = TempPte.u.Flush;
+
+ if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) {
+ ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess);
+ } else {
+ PointerPte = MiGetPteAddress (OutProcess);
+ ProcessPage = PointerPte->u.Hard.PageFrameNumber;
+ }
+
+ Pfn1->PteFrame = ProcessPage;
+ Pfn1 = MI_PFN_ELEMENT (ProcessPage);
+
+ //
+ // Increment the share count for the process page.
+ //
+
+ Pfn1->u2.ShareCount += 1;
+ UNLOCK_PFN (OldIrql);
+
+ LOCK_EXPANSION (OldIrql);
+ if (OutProcess->Vm.WorkingSetExpansionLinks.Flink >
+ MM_IO_IN_PROGRESS) {
+
+ //
+ // The entry must be on the list.
+ //
+ RemoveEntryList (&OutProcess->Vm.WorkingSetExpansionLinks);
+ OutProcess->Vm.WorkingSetExpansionLinks.Flink = MM_WS_SWAPPED_OUT;
+ }
+ UNLOCK_EXPANSION (OldIrql);
+
+ OutProcess->WorkingSetPage = 0;
+ OutProcess->Vm.WorkingSetSize = 0;
+#if defined(_PPC_)
+
+ //
+ // Force assignment of new PID as we have removed
+ // the page directory page.
+ // Note that a TB flush would not work here as we
+ // are in the wrong process context.
+ //
+
+ Process->ProcessSequence = 0;
+#endif // _PPC_
+
+ }
+ return;
+}
+
+VOID
+MmInSwapProcess (
+ IN PKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine in swaps the specified process.
+
+Arguments:
+
+ Process - Supplies a pointer to the process that is to be swapped
+ into memory.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ PEPROCESS OutProcess;
+ ULONG PdePage;
+ PMMPTE PageDirectoryMap;
+ MMPTE TempPte;
+ ULONG HyperSpacePageTable;
+ PMMPTE HyperSpacePageTableMap;
+ ULONG WorkingSetPage;
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ ULONG ProcessPage;
+ ULONG Transition;
+
+ OutProcess = CONTAINING_RECORD( Process,
+ EPROCESS,
+ Pcb);
+
+ if (OutProcess->ProcessOutswapped != FALSE) {
+
+ //
+ // The process is out of memory, rebuild the initialize page
+ // structure.
+ //
+
+ if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) {
+ ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess);
+ } else {
+ PointerPte = MiGetPteAddress (OutProcess);
+ ProcessPage = PointerPte->u.Hard.PageFrameNumber;
+ }
+
+ LOCK_PFN (OldIrql);
+ PdePage = MiMakeOutswappedPageResident (MiGetPteAddress (PDE_BASE),
+ (PMMPTE)&OutProcess->PageDirectoryPte,
+ 0,
+ ProcessPage,
+ &Transition);
+
+ //
+ // Adjust the counts for the process page.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (ProcessPage);
+ Pfn1->u2.ShareCount -= 1;
+
+ ASSERT ((LONG)Pfn1->u2.ShareCount >= 1);
+
+ //
+ // Adjust the counts properly for the Page directory page.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePage);
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u1.WsIndex = (ULONG)OutProcess;
+ Pfn1->PteFrame = PdePage;
+ Pfn1->PteAddress = MiGetPteAddress (PDE_BASE);
+
+ //
+ // Locate the page table page for hyperspace and make it resident.
+ //
+
+ PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2);
+
+ TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)];
+ MiUnmapPageInHyperSpace (OldIrql2);
+
+ HyperSpacePageTable = MiMakeOutswappedPageResident (
+ MiGetPdeAddress (HYPER_SPACE),
+ &TempPte,
+ 0,
+ PdePage,
+ &Transition);
+
+ ASSERT (Pfn1->u2.ShareCount >= 3);
+
+ PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2);
+ PageDirectoryMap[MiGetPdeOffset(PDE_BASE)].u.Flush =
+ OutProcess->PageDirectoryPte;
+ PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte;
+
+ MiUnmapPageInHyperSpace (OldIrql2);
+
+ //
+ // Map in the hyper space page table page and retieve the
+ // PTE that maps the working set list.
+ //
+
+ HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2);
+ TempPte = HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)];
+ MiUnmapPageInHyperSpace (OldIrql2);
+ Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable);
+
+ Pfn1->u1.WsIndex = 1;
+
+ WorkingSetPage = MiMakeOutswappedPageResident (
+ MiGetPteAddress (MmWorkingSetList),
+ &TempPte,
+ 0,
+ HyperSpacePageTable,
+ &Transition);
+
+ HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2);
+ HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)] = TempPte;
+ MiUnmapPageInHyperSpace (OldIrql2);
+
+ Pfn1 = MI_PFN_ELEMENT (WorkingSetPage);
+
+ Pfn1->u1.WsIndex = 2;
+
+ UNLOCK_PFN (OldIrql);
+
+ LOCK_EXPANSION (OldIrql);
+
+ //
+ // Allow working set trimming on this process.
+ //
+
+ OutProcess->Vm.AllowWorkingSetAdjustment = TRUE;
+ if (OutProcess->Vm.WorkingSetExpansionLinks.Flink == MM_WS_SWAPPED_OUT) {
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &OutProcess->Vm.WorkingSetExpansionLinks);
+ }
+ UNLOCK_EXPANSION (OldIrql);
+
+ //
+ // Set up process structures.
+ //
+
+ OutProcess->WorkingSetPage = WorkingSetPage;
+ OutProcess->Vm.WorkingSetSize = 3;
+
+ INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[0],
+ PdePage);
+ INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[1],
+ HyperSpacePageTable);
+
+ OutProcess->ProcessOutswapped = FALSE;
+ }
+ OutProcess->ProcessOutswapEnabled = FALSE;
+ return;
+}
+
+PVOID
+MiCreatePebOrTeb (
+ IN PEPROCESS TargetProcess,
+ IN ULONG Size
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates a TEB or PEB page within the target process.
+
+Arguments:
+
+ TargetProcess - Supplies a pointer to the process in which to create
+ the structure.
+
+ Size - Supplies the size of the stucture to create a VAD for.
+
+Return Value:
+
+ Returns the address of the base of the newly created TEB or PEB.
+
+Environment:
+
+ Kernel mode, attached to the specified process.
+
+--*/
+
+{
+
+ PVOID Base;
+ PMMVAD Vad;
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
+
+ try {
+ Vad = (PMMVAD)NULL;
+
+ //
+ // Find a VA for a PEB on a page-size boudary.
+ //
+
+ Base = MiFindEmptyAddressRangeDown (
+ ROUND_TO_PAGES (Size),
+ (PVOID)((ULONG)MM_HIGHEST_VAD_ADDRESS + 1),
+ PAGE_SIZE);
+
+ //
+ // An unoccuppied address range has been found, build the virtual
+ // address descriptor to describe this range.
+ //
+
+ Vad = (PMMVAD)ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MMVAD),
+ ' daV');
+ Vad->StartingVa = Base;
+ Vad->EndingVa = (PVOID)((ULONG)Base + ROUND_TO_PAGES (Size - 1) - 1);
+
+ Vad->u.LongFlags = 0;
+
+ Vad->u.VadFlags.CommitCharge = BYTES_TO_PAGES (Size);
+ Vad->u.VadFlags.MemCommit = 1;
+ Vad->u.VadFlags.PrivateMemory = 1;
+ Vad->u.VadFlags.Protection = MM_EXECUTE_READWRITE;
+
+ //
+ // Mark VAD as not deletable, no protection change.
+ //
+
+ Vad->u.VadFlags.NoChange = 1;
+ Vad->u2.LongFlags2 = 0;
+ Vad->u2.VadFlags2.OneSecured = 1;
+ Vad->u2.VadFlags2.StoredInVad = 1;
+ Vad->u2.VadFlags2.ReadOnly = 0;
+ Vad->u3.Secured.StartVa = Base;
+ Vad->u3.Secured.EndVa = Vad->EndingVa;
+
+ MiInsertVad (Vad);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception was occurred, if pool was allocated, deallocate
+ // it and raise an exception for the caller.
+ //
+
+ if (Vad != (PMMVAD)NULL) {
+ ExFreePool (Vad);
+ }
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ ExRaiseStatus (GetExceptionCode ());
+ }
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+
+ return Base;
+}
+
+PTEB
+MmCreateTeb (
+ IN PEPROCESS TargetProcess,
+ IN PINITIAL_TEB InitialTeb,
+ IN PCLIENT_ID ClientId
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates a TEB page within the target process
+ and copies the initial TEB values into it.
+
+Arguments:
+
+ TargetProcess - Supplies a pointer to the process in which to create
+ and initialize the TEB.
+
+ InitialTeb - Supplies a pointer to the initial TEB to copy into the
+ newly created TEB.
+
+Return Value:
+
+ Returns the address of the base of the newly created TEB.
+
+ Can raise exceptions if no address space is available for the TEB or
+ the user has exceeded quota (non-paged, pagefile, commit).
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PTEB TebBase;
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+ TebBase = (PTEB)MiCreatePebOrTeb (TargetProcess,
+ (ULONG)sizeof(TEB));
+
+ //
+ // Initialize the TEB.
+ //
+
+ TebBase->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
+ TebBase->NtTib.SubSystemTib = NULL;
+ TebBase->NtTib.Version = OS2_VERSION;
+ TebBase->NtTib.ArbitraryUserPointer = NULL;
+ TebBase->NtTib.Self = (PNT_TIB)TebBase;
+ TebBase->EnvironmentPointer = NULL;
+ TebBase->ProcessEnvironmentBlock = TargetProcess->Peb;
+ TebBase->ClientId = *ClientId;
+ TebBase->RealClientId = *ClientId;
+ if (InitialTeb->OldInitialTeb.OldStackBase == NULL &&
+ InitialTeb->OldInitialTeb.OldStackLimit == NULL
+ ) {
+ TebBase->NtTib.StackBase = InitialTeb->StackBase;
+ TebBase->NtTib.StackLimit = InitialTeb->StackLimit;
+ TebBase->DeallocationStack = InitialTeb->StackAllocationBase;
+ }
+ else {
+ TebBase->NtTib.StackBase = InitialTeb->OldInitialTeb.OldStackBase;
+ TebBase->NtTib.StackLimit = InitialTeb->OldInitialTeb.OldStackLimit;
+ }
+ TebBase->StaticUnicodeString.Buffer = TebBase->StaticUnicodeBuffer;
+ TebBase->StaticUnicodeString.MaximumLength = (USHORT)sizeof( TebBase->StaticUnicodeBuffer );
+ TebBase->StaticUnicodeString.Length = (USHORT)0;
+
+ KeDetachProcess();
+ return TebBase;
+}
+
+PPEB
+MmCreatePeb (
+ IN PEPROCESS TargetProcess,
+ IN PINITIAL_PEB InitialPeb
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates a PEB page within the target process
+ and copies the initial PEB values into it.
+
+Arguments:
+
+ TargetProcess - Supplies a pointer to the process in which to create
+ and initialize the PEB.
+
+ InitialPeb - Supplies a pointer to the initial PEB to copy into the
+ newly created PEB.
+
+Return Value:
+
+ Returns the address of the base of the newly created PEB.
+
+ Can raise exceptions if no address space is available for the PEB or
+ the user has exceeded quota (non-paged, pagefile, commit).
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PPEB PebBase;
+ NTSTATUS Status;
+ PVOID ViewBase;
+ LARGE_INTEGER SectionOffset;
+ PIMAGE_NT_HEADERS NtHeaders;
+ ULONG ViewSize, ReturnedSize;
+ PIMAGE_LOAD_CONFIG_DIRECTORY ImageConfigData;
+
+ ViewBase = NULL;
+ SectionOffset.LowPart = 0;
+ SectionOffset.HighPart = 0;
+ ViewSize = 0;
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+ //
+ // Map the NLS tables into the applications address space
+ //
+
+ Status = MmMapViewOfSection(
+ InitNlsSectionPointer,
+ TargetProcess,
+ &ViewBase,
+ 0L,
+ 0L,
+ &SectionOffset,
+ &ViewSize,
+ ViewShare,
+ MEM_TOP_DOWN | SEC_NO_CHANGE,
+ PAGE_READONLY
+ );
+
+ if ( !NT_SUCCESS(Status) ) {
+ KeDetachProcess();
+ ExRaiseStatus(Status);
+ }
+
+ PebBase = (PPEB)MiCreatePebOrTeb (TargetProcess,
+ (ULONG)sizeof( PEB ));
+
+ //
+ // Initialize the Peb.
+ //
+
+ PebBase->InheritedAddressSpace = InitialPeb->InheritedAddressSpace;
+ PebBase->Mutant = InitialPeb->Mutant;
+ PebBase->ImageBaseAddress = TargetProcess->SectionBaseAddress;
+
+ PebBase->AnsiCodePageData = (PVOID)((PUCHAR)ViewBase+InitAnsiCodePageDataOffset);
+ PebBase->OemCodePageData = (PVOID)((PUCHAR)ViewBase+InitOemCodePageDataOffset);
+ PebBase->UnicodeCaseTableData = (PVOID)((PUCHAR)ViewBase+InitUnicodeCaseTableDataOffset);
+
+ PebBase->NumberOfProcessors = KeNumberProcessors;
+ PebBase->BeingDebugged = (BOOLEAN)(TargetProcess->DebugPort != NULL ? TRUE : FALSE);
+ PebBase->NtGlobalFlag = NtGlobalFlag;
+ PebBase->CriticalSectionTimeout = MmCriticalSectionTimeout;
+ PebBase->HeapSegmentReserve = MmHeapSegmentReserve;
+ PebBase->HeapSegmentCommit = MmHeapSegmentCommit;
+ PebBase->HeapDeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
+ PebBase->HeapDeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
+ PebBase->NumberOfHeaps = 0;
+ PebBase->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof( PEB )) / sizeof( PVOID );
+ PebBase->ProcessHeaps = (PVOID *)(PebBase+1);
+
+ PebBase->OSMajorVersion = NtMajorVersion;
+ PebBase->OSMinorVersion = NtMinorVersion;
+ PebBase->OSBuildNumber = NtBuildNumber & 0x3FFF;
+ PebBase->OSPlatformId = 2; // VER_PLATFORM_WIN32_NT from winbase.h
+
+ NtHeaders = RtlImageNtHeader( PebBase->ImageBaseAddress );
+ if (NtHeaders != NULL) {
+ PebBase->ImageSubsystem = NtHeaders->OptionalHeader.Subsystem;
+ PebBase->ImageSubsystemMajorVersion = NtHeaders->OptionalHeader.MajorSubsystemVersion;
+ PebBase->ImageSubsystemMinorVersion = NtHeaders->OptionalHeader.MinorSubsystemVersion;
+
+ //
+ // See if this image wants GetVersion to lie about who the system is
+ // If so, capture the lie into the PEB for the process.
+ //
+
+ if (NtHeaders->OptionalHeader.Win32VersionValue != 0) {
+ PebBase->OSMajorVersion = NtHeaders->OptionalHeader.Win32VersionValue & 0xFF;
+ PebBase->OSMinorVersion = (NtHeaders->OptionalHeader.Win32VersionValue >> 8) & 0xFF;
+ PebBase->OSBuildNumber = (NtHeaders->OptionalHeader.Win32VersionValue >> 16) & 0x3FFF;
+
+ //
+ // Win32 API GetVersion returns the following bogus bit definitions
+ // in the high two bits:
+ //
+ // 00 - Windows NT
+ // 01 - reserved
+ // 10 - Win32s running on Windows 3.x
+ // 11 - Windows 95
+ //
+ //
+ // Win32 API GetVersionEx returns a dwPlatformId with the following values
+ // defined in winbase.h
+ //
+ // 00 - VER_PLATFORM_WIN32s
+ // 01 - VER_PLATFORM_WIN32_WINDOWS
+ // 10 - VER_PLATFORM_WIN32_NT
+ // 11 - reserved
+ //
+ //
+ // So convert the former from the Win32VersionValue field into the OSPlatformId
+ // field. This is done by XORing with 0x2. The translation is symetric so there
+ // is the same code to do the reverse in windows\base\client\module.c (GetVersion)
+ //
+
+ PebBase->OSPlatformId = (NtHeaders->OptionalHeader.Win32VersionValue >> 30) ^ 0x2;
+ }
+
+ if ( MmProductType == 0 ) {
+ if (NtHeaders->FileHeader.Characteristics & IMAGE_FILE_AGGRESIVE_WS_TRIM ) {
+ TargetProcess->MmAgressiveWsTrimMask = PS_WS_TRIM_FROM_EXE_HEADER;
+ }
+#if defined(_X86_)
+ if ( MmSystemSize == MmSmallSystem ) {
+ TargetProcess->MmAgressiveWsTrimMask |= PS_WS_TRIM_BACKGROUND_ONLY_APP;
+ }
+#endif // _X86_
+ }
+
+ //
+ // See if image wants to override the default processor affinity mask
+ //
+ if (NtHeaders->FileHeader.Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY) {
+ //
+ // Image is NOT MP safe. So assign it a processor on a rotating
+ // basis to spread these processes around on MP systems
+ //
+ do {
+ PebBase->ImageProcessAffinityMask = (KAFFINITY)(0x1 << MmRotatingUniprocessorNumber);
+ if (++MmRotatingUniprocessorNumber >= KeNumberProcessors) {
+ MmRotatingUniprocessorNumber = 0;
+ }
+ } while ((PebBase->ImageProcessAffinityMask & KeActiveProcessors) == 0);
+ } else {
+ ImageConfigData = RtlImageDirectoryEntryToData( PebBase->ImageBaseAddress,
+ TRUE,
+ IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
+ &ReturnedSize
+ );
+ if (ImageConfigData != NULL && ImageConfigData->ProcessAffinityMask != 0) {
+ //
+ // Yes, get it and pass it up to LdrpInitializeProcess via the PEB
+ //
+ PebBase->ImageProcessAffinityMask = ImageConfigData->ProcessAffinityMask;
+ }
+ }
+ }
+
+ KeDetachProcess();
+ return PebBase;
+}
+
+VOID
+MmDeleteTeb (
+ IN PEPROCESS TargetProcess,
+ IN PVOID TebBase
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes a TEB page wihtin the target process.
+
+Arguments:
+
+ TargetProcess - Supplies a pointer to the process in which to delete
+ the TEB.
+
+ TebBase - Supplies the base address of the TEB to delete.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PVOID EndingAddress;
+ PMMVAD Vad;
+
+ EndingAddress = (PVOID)((ULONG)TebBase +
+ ROUND_TO_PAGES (sizeof(TEB)) - 1);
+
+ //
+ // Attach to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
+
+ Vad = MiLocateAddress (TebBase);
+
+ ASSERT (Vad != (PMMVAD)NULL);
+
+ ASSERT ((Vad->StartingVa == TebBase) && (Vad->EndingVa == EndingAddress));
+
+ MiRemoveVad (Vad);
+ ExFreePool (Vad);
+
+ MiDeleteFreeVm (TebBase, EndingAddress);
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ return;
+
+}
+
+VOID
+MmAllowWorkingSetExpansion (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine updates the working set list head FLINK field to
+ indicate that working set adjustment is allowed.
+
+ NOTE: This routine may be called more than once per process.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ PEPROCESS CurrentProcess;
+ KIRQL OldIrql;
+
+ //
+ // Check the current state of the working set adjustment flag
+ // in the process header.
+ //
+
+ CurrentProcess = PsGetCurrentProcess();
+
+ LOCK_EXPANSION (OldIrql);
+
+ if (!CurrentProcess->Vm.AllowWorkingSetAdjustment) {
+ CurrentProcess->Vm.AllowWorkingSetAdjustment = TRUE;
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &CurrentProcess->Vm.WorkingSetExpansionLinks);
+ }
+
+ UNLOCK_EXPANSION (OldIrql);
+ return;
+}
+
+
+VOID
+MiDeleteAddressesInWorkingSet (
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes all user mode addresses from the working set
+ list.
+
+Arguments:
+
+ Process = Pointer to the current process.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working Set Lock held.
+
+--*/
+
+{
+ PMMWSLE Wsle;
+ ULONG index;
+ PVOID Va;
+ KIRQL OldIrql;
+#if DBG
+ ULONG LastEntry;
+ PMMWSLE LastWsle;
+#endif
+
+ //
+ // Go through the working set and for any page which is in the
+ // working set tree, rip it out of the tree by zeroing it's
+ // link pointers and set the WasInTree bit to indicate that
+ // this has been done.
+ //
+
+ Wsle = &MmWsle[2];
+ index = 2;
+#if DBG
+ LastEntry = MmWorkingSetList->LastEntry;
+#endif
+ while (index <= MmWorkingSetList->LastEntry) {
+ if ((Wsle->u1.e1.Valid == 1) &&
+ (Wsle->u1.e1.Direct == 0)) {
+
+ if (Wsle->u1.VirtualAddress > (PVOID)MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // System space address, set the WasInTree bit.
+ //
+
+ ASSERT (Wsle->u1.VirtualAddress > (PVOID)PDE_TOP);
+ Wsle->u1.e1.WasInTree = 1;
+ }
+ }
+ index += 1;
+ Wsle += 1;
+ }
+
+ MmWorkingSetList->HashTable = NULL;
+
+ //
+ // Go through the working set list and remove all pages for user
+ // space addresses.
+ //
+
+ Wsle = &MmWsle[2];
+ index = 2;
+
+ ASSERT (LastEntry >= MmWorkingSetList->LastEntry);
+
+ while (index <= MmWorkingSetList->LastEntry) {
+ if (Wsle->u1.e1.Valid == 1) {
+
+ Va = Wsle->u1.VirtualAddress;
+ if (Wsle->u1.VirtualAddress < (PVOID)MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // This is a user mode address.
+ //
+
+ //
+ // This entry is in the working set list tree.
+ //
+
+ MiReleaseWsle (index, &Process->Vm);
+ LOCK_PFN (OldIrql);
+ MiDeleteValidAddress (Va, Process);
+ UNLOCK_PFN (OldIrql);
+ } else {
+
+ //
+ // If this entry was ripped out of the working set
+ // tree, put it back in.
+ //
+
+ if (Wsle->u1.e1.WasInTree == 1) {
+ Wsle->u1.e1.WasInTree = 0;
+ MiInsertWsle (index, MmWorkingSetList);
+ }
+ ASSERT (MiGetPteAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
+ }
+ }
+ index += 1;
+ Wsle += 1;
+ }
+#if DBG
+ Wsle = &MmWsle[2];
+ LastWsle = &MmWsle[MmWorkingSetList->LastInitializedWsle];
+ while (Wsle <= LastWsle) {
+ if (Wsle->u1.e1.Valid == 1) {
+ ASSERT (MiGetPteAddress(Wsle->u1.VirtualAddress)->u.Hard.Valid == 1);
+ }
+ Wsle += 1;
+ }
+#endif
+ return;
+}
+
+
+VOID
+MiDeleteValidAddress (
+ IN PVOID Va,
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine deletes the specified virtual address.
+
+Arguments:
+
+ Va - Supplies the virtual address to delete.
+
+ CurrentProcess - Supplies the current process.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. PFN LOCK HELD.
+
+--*/
+
+{
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PMMCLONE_BLOCK CloneBlock;
+ PMMCLONE_DESCRIPTOR CloneDescriptor;
+ ULONG PageFrameIndex;
+
+ PointerPte = MiGetPteAddress (Va);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ CloneDescriptor = NULL;
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ CloneBlock = (PMMCLONE_BLOCK)Pfn1->PteAddress;
+
+ //
+ // Capture the state of the modified bit for this
+ // pte.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiDecrementShareAndValidCount (PointerPde->u.Hard.PageFrameNumber);
+
+ //
+ // Decrement the share count for the physical page.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+
+ //
+ // Check to see if this is a fork prototype PTE and if so
+ // update the clone descriptor address.
+ //
+
+ if (Va <= MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Locate the clone descriptor within the clone tree.
+ //
+
+ CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);
+ }
+ } else {
+
+ //
+ // This pte is a NOT a prototype PTE, delete the physical page.
+ //
+
+ //
+ // Decrement the share and valid counts of the page table
+ // page which maps this PTE.
+ //
+
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ //
+ // Decrement the share count for the physical page. As the page
+ // is private it will be put on the free list.
+ //
+
+ MiDecrementShareCountOnly (PageFrameIndex);
+
+ //
+ // Decrement the count for the number of private pages.
+ //
+
+ CurrentProcess->NumberOfPrivatePages -= 1;
+ }
+
+ //
+ // Set the pointer to PTE to be a demand zero PTE. This allows
+ // the page usage count to be kept properly and handles the case
+ // when a page table page has only valid ptes and needs to be
+ // deleted later when the VADs are removed.
+ //
+
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+
+ if (CloneDescriptor != NULL) {
+
+ //
+ // Decrement the reference count for the clone block,
+ // note that this could release and reacquire
+ // the mutexes hence cannot be done until after the
+ // working set index has been removed.
+ //
+
+ if (MiDecrementCloneBlockReference ( CloneDescriptor,
+ CloneBlock,
+ CurrentProcess )) {
+
+ }
+ }
+}
+
+ULONG
+MiMakeOutswappedPageResident (
+ IN PMMPTE ActualPteAddress,
+ IN OUT PMMPTE PointerTempPte,
+ IN ULONG Global,
+ IN ULONG ContainingPage,
+ OUT PULONG ActiveTransition
+ )
+
+/*++
+
+Routine Description:
+
+ This routine makes the specified PTE valid.
+
+Arguments:
+
+ ActualPteAddress - Supplies the actual address that the PTE will
+ reside at. This is used for page coloring.
+
+ PointerTempPte - Supplies the PTE to operate on, returns a valid
+ PTE.
+
+ Global - Supplies 1 if the resulting PTE is global.
+
+ ContainingPage - Supplies the phyical page number of the page which
+ contains the resulting PTE. If this value is 0, no
+ operations on the containing page are performed.
+
+ ActiveTransition - Returns 1 if the in page operation was for a
+ transition page in the ActiveAndValid state.
+
+Return Value:
+
+ Returns the physical page number that was allocated for the PTE.
+
+Environment:
+
+ Kernel mode, PFN LOCK HELD!
+
+--*/
+
+{
+ MMPTE TempPte;
+ KIRQL OldIrql = APC_LEVEL;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ ULONG MdlHack[(sizeof(MDL)/4) + 2];
+ PMDL Mdl;
+ LARGE_INTEGER StartingOffset;
+ KEVENT Event;
+ IO_STATUS_BLOCK IoStatus;
+ ULONG PageFileNumber;
+ NTSTATUS Status;
+ PULONG Page;
+ ULONG RefaultCount;
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT (PointerTempPte->u.Hard.Valid == 0);
+
+ *ActiveTransition = 0;
+
+ if (PointerTempPte->u.Long == MM_KERNEL_DEMAND_ZERO_PTE) {
+
+ //
+ // Any page will do.
+ //
+
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (ActualPteAddress));
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ MM_READWRITE,
+ ActualPteAddress );
+ MI_SET_PTE_DIRTY (TempPte);
+ MI_SET_GLOBAL_STATE (TempPte, Global);
+
+ *PointerTempPte = TempPte;
+ MiInitializePfnForOtherProcess (PageFrameIndex,
+ ActualPteAddress,
+ ContainingPage);
+
+ } else if (PointerTempPte->u.Soft.Transition == 1) {
+
+ PageFrameIndex = PointerTempPte->u.Trans.PageFrameNumber;
+ PointerTempPte->u.Trans.Protection = MM_READWRITE;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // PTE refers to a transition PTE.
+ //
+
+ if (Pfn1->u3.e1.PageLocation != ActiveAndValid) {
+ MiUnlinkPageFromList (Pfn1);
+ Pfn1->u3.e2.ReferenceCount += 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ } else {
+ *ActiveTransition = 1;
+ }
+
+ //
+ // Update the PFN database, the share count is now 1 and
+ // the reference count is incremented as the share count
+ // just went from zero to 1.
+ //
+
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e1.Modified = 1;
+ if (Pfn1->u3.e1.WriteInProgress == 0) {
+
+ //
+ // Release the page file space for this page.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ Pfn1->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+ }
+
+ MI_MAKE_TRANSITION_PTE_VALID (TempPte, PointerTempPte);
+
+ MI_SET_PTE_DIRTY (TempPte);
+ MI_SET_GLOBAL_STATE (TempPte, Global);
+ *PointerTempPte = TempPte;
+
+ } else {
+
+ //
+ // Page resides in a paging file.
+ // Any page will do.
+ //
+
+ PointerTempPte->u.Soft.Protection = MM_READWRITE;
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (ActualPteAddress));
+
+ //
+ // Initialize the PFN database element, but don't
+ // set read in progress as collided page faults cannot
+ // occur here.
+ //
+
+ MiInitializePfnForOtherProcess (PageFrameIndex,
+ ActualPteAddress,
+ ContainingPage);
+
+ KeInitializeEvent (&Event, NotificationEvent, FALSE);
+
+ //
+ // Calculate the VBN for the in-page operation.
+ //
+
+ TempPte = *PointerTempPte;
+ PageFileNumber = GET_PAGING_FILE_NUMBER (TempPte);
+
+ StartingOffset.QuadPart = (LONGLONG)(GET_PAGING_FILE_OFFSET (TempPte)) <<
+ PAGE_SHIFT;
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // Build MDL for request.
+ //
+
+ Mdl = (PMDL)&MdlHack[0];
+ MmInitializeMdl(Mdl,
+ MiGetVirtualAddressMappedByPte (ActualPteAddress),
+ PAGE_SIZE);
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+
+ Page = (PULONG)(Mdl + 1);
+ *Page = PageFrameIndex;
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Issue the read request.
+ //
+
+ RefaultCount = 0;
+
+Refault:
+ Status = IoPageRead ( MmPagingFile[PageFileNumber]->File,
+ Mdl,
+ &StartingOffset,
+ &Event,
+ &IoStatus
+ );
+
+ if (Status == STATUS_PENDING) {
+ KeWaitForSingleObject( &Event,
+ WrPageIn,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ if ((!NT_SUCCESS(Status)) || (!NT_SUCCESS(IoStatus.Status))) {
+ if ((IoStatus.Status == STATUS_INSUFFICIENT_RESOURCES) &&
+ (RefaultCount < 20)) {
+
+ //
+ // Insuffiencient resources, delay and reissue
+ // the in page operation.
+ //
+
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &MmHalfSecond);
+ KeClearEvent (&Event);
+ RefaultCount += 1;
+ goto Refault;
+ }
+ KdPrint(("MMINPAGE: status %lx io-status %lx\n",
+ Status, IoStatus.Status));
+ KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR,
+ Status,
+ IoStatus.Status,
+ PageFileNumber,
+ StartingOffset.LowPart);
+ }
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Release the page file space.
+ //
+
+ MiReleasePageFileSpace (TempPte);
+ Pfn1->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PageFrameIndex,
+ MM_READWRITE,
+ ActualPteAddress );
+ MI_SET_PTE_DIRTY (TempPte);
+ Pfn1->u3.e1.Modified = 1;
+ MI_SET_GLOBAL_STATE (TempPte, Global);
+
+ *PointerTempPte = TempPte;
+ }
+ return PageFrameIndex;
+}
+
+
+VOID
+MmSetMemoryPriorityProcess(
+ IN PEPROCESS Process,
+ IN UCHAR MemoryPriority
+ )
+
+/*++
+
+Routine Description:
+
+ Sets the memory priority of a process.
+
+Arguments:
+
+ Process - Supplies the process to update
+
+ MemoryPriority - Supplies the new memory priority of the process
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PMMSUPPORT VmSupport;
+ ULONG i;
+ ULONG Trim;
+ UCHAR OldPriority;
+
+ if (MmSystemSize == MmSmallSystem && MmNumberOfPhysicalPages < ((15*1024*1024)/PAGE_SIZE)) {
+
+ //
+ // If this is a small system, make every process BACKGROUND.
+ //
+
+ MemoryPriority = MEMORY_PRIORITY_BACKGROUND;
+ }
+
+ LOCK_EXPANSION (OldIrql);
+
+ OldPriority = Process->Vm.MemoryPriority;
+ Process->Vm.MemoryPriority = MemoryPriority;
+
+ UNLOCK_EXPANSION (OldIrql);
+
+ if ((OldPriority > MemoryPriority) &&
+ (MmAvailablePages < MmMoreThanEnoughFreePages)) {
+
+ //
+ // The priority is being lowered, see if the working set
+ // should be trimmed.
+ //
+
+ VmSupport = &Process->Vm;
+ i = VmSupport->WorkingSetSize - VmSupport->MaximumWorkingSetSize;
+ if ((LONG)i > 0) {
+ Trim = i;
+ if (Trim > MmWorkingSetReductionMax) {
+ Trim = MmWorkingSetReductionMax;
+ }
+ if (Process != PsGetCurrentProcess()) {
+ KeAttachProcess (&Process->Pcb);
+ }
+ LOCK_WS (Process);
+
+ Trim = MiTrimWorkingSet (Trim,
+ VmSupport,
+ FALSE);
+
+ MmWorkingSetList->Quota = VmSupport->WorkingSetSize;
+ if (MmWorkingSetList->Quota < VmSupport->MinimumWorkingSetSize) {
+ MmWorkingSetList->Quota = VmSupport->MinimumWorkingSetSize;
+ }
+
+ UNLOCK_WS (Process);
+ KeDetachProcess ();
+ }
+ }
+ return;
+}
+
+
+#if 0
+VOID
+MiVerifyReferenceCounts (
+ IN ULONG PdePage
+ )
+
+ //
+ // Verify the share and valid PTE counts for page directory page.
+ //
+
+{
+ PMMPFN Pfn1;
+ PMMPFN Pfn3;
+ PMMPTE Pte1;
+ ULONG Share = 0;
+ ULONG Valid = 0;
+ ULONG i, ix, iy;
+ PMMPTE PageDirectoryMap;
+ KIRQL OldIrql;
+
+ PageDirectoryMap = (PMMPTE)MiMapPageInHyperSpace (PdePage, &OldIrql);
+ Pfn1 = MI_PFN_ELEMENT (PdePage);
+ Pte1 = (PMMPTE)PageDirectoryMap;
+
+ //
+ // Map in the non paged portion of the system.
+ //
+
+ ix = MiGetPdeOffset(CODE_START);
+
+ for (i=0;i < ix; i++ ) {
+ if (Pte1->u.Hard.Valid == 1) {
+ Valid += 1;
+ } else if ((Pte1->u.Soft.Prototype == 0) &&
+ (Pte1->u.Soft.Transition == 1)) {
+ Pfn3 = MI_PFN_ELEMENT (Pte1->u.Trans.PageFrameNumber);
+ if (Pfn3->u3.e1.PageLocation == ActiveAndValid) {
+ ASSERT (Pfn1->u2.ShareCount > 1);
+ Valid += 1;
+ } else {
+ Share += 1;
+ }
+ }
+ Pte1 += 1;
+ }
+
+ iy = MiGetPdeOffset(PTE_BASE);
+ Pte1 = &PageDirectoryMap[iy];
+ ix = MiGetPdeOffset(HYPER_SPACE_END) + 1;
+
+ for (i = iy; i < ix; i++) {
+ if (Pte1->u.Hard.Valid == 1) {
+ Valid += 1;
+ } else if ((Pte1->u.Soft.Prototype == 0) &&
+ (Pte1->u.Soft.Transition == 1)) {
+ Pfn3 = MI_PFN_ELEMENT (Pte1->u.Trans.PageFrameNumber);
+ if (Pfn3->u3.e1.PageLocation == ActiveAndValid) {
+ ASSERT (Pfn1->u2.ShareCount > 1);
+ Valid += 1;
+ } else {
+ Share += 1;
+ }
+ }
+ Pte1 += 1;
+ }
+
+ if (Pfn1->u2.ShareCount != (Share+Valid+1)) {
+ DbgPrint ("MMPROCSUP - PDE page %lx ShareCount %lx found %lx\n",
+ PdePage, Pfn1->u2.ShareCount, Valid+Share+1);
+ }
+
+ MiUnmapPageInHyperSpace (OldIrql);
+ ASSERT (Pfn1->u2.ShareCount == (Share+Valid+1));
+ return;
+}
+#endif //0
diff --git a/private/ntos/mm/protect.c b/private/ntos/mm/protect.c
new file mode 100644
index 000000000..6ab0eda1c
--- /dev/null
+++ b/private/ntos/mm/protect.c
@@ -0,0 +1,1861 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ protect.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtProtectVirtualMemory service.
+
+Author:
+
+ Lou Perazzoli (loup) 18-Aug-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if DBG
+PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+
+HARDWARE_PTE
+MiFlushTbAndCapture(
+ IN PMMPTE PtePointer,
+ IN HARDWARE_PTE TempPte,
+ IN PMMPFN Pfn1
+ );
+
+ULONG
+MiSetProtectionOnTransitionPte (
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask
+ );
+
+MMPTE
+MiCaptureSystemPte (
+ IN PMMPTE PointerProtoPte,
+ IN PEPROCESS Process
+ );
+
+
+extern CCHAR MmReadWrite[32];
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtProtectVirtualMemory)
+#pragma alloc_text(PAGE,MiProtectVirtualMemory)
+#pragma alloc_text(PAGE,MiSetProtectionOnSection)
+#pragma alloc_text(PAGE,MiGetPageProtection)
+#pragma alloc_text(PAGE,MiChangeNoAccessForkPte)
+#endif
+
+
+NTSTATUS
+NtProtectVirtualMemory(
+ IN HANDLE ProcessHandle,
+ IN OUT PVOID *BaseAddress,
+ IN OUT PULONG RegionSize,
+ IN ULONG NewProtect,
+ OUT PULONG OldProtect
+ )
+
+/*++
+
+Routine Description:
+
+ This routine changes the protection on a region of committed pages
+ within the virtual address space of the subject process. Setting
+ the protection on a ragne of pages causes the old protection to be
+ replaced by the specified protection value.
+
+Arguments:
+
+ ProcessHandle - An open handle to a process object.
+
+ BaseAddress - The base address of the region of pages
+ whose protection is to be changed. This value is
+ rounded down to the next host page address
+ boundary.
+
+ RegionSize - A pointer to a variable that will receive
+ the actual size in bytes of the protected region
+ of pages. The initial value of this argument is
+ rounded up to the next host page size boundary.
+
+ NewProtect - The new protection desired for the
+ specified region of pages.
+
+ Protect Values
+
+ PAGE_NOACCESS - No access to the specified region
+ of pages is allowed. An attempt to read,
+ write, or execute the specified region
+ results in an access violation (i.e. a GP
+ fault).
+
+ PAGE_EXECUTE - Execute access to the specified
+ region of pages is allowed. An attempt to
+ read or write the specified region results in
+ an access violation.
+
+ PAGE_READONLY - Read only and execute access to the
+ specified region of pages is allowed. An
+ attempt to write the specified region results
+ in an access violation.
+
+ PAGE_READWRITE - Read, write, and execute access to
+ the specified region of pages is allowed. If
+ write access to the underlying section is
+ allowed, then a single copy of the pages are
+ shared. Otherwise the pages are shared read
+ only/copy on write.
+
+ PAGE_GUARD - Read, write, and execute access to the
+ specified region of pages is allowed,
+ however, access to the region causes a "guard
+ region entered" condition to be raised in the
+ subject process. If write access to the
+ underlying section is allowed, then a single
+ copy of the pages are shared. Otherwise the
+ pages are shared read only/copy on write.
+
+ PAGE_NOCACHE - The page should be treated as uncached.
+ This is only valid for non-shared pages.
+
+
+ OldProtect - A pointer to a variable that will receive
+ the old protection of the first page within the
+ specified region of pages.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+
+{
+ //
+ // note - special treatement for the following cases...
+ //
+ // if a page is locked in the working set (memory?) and the
+ // protection is changed to no access, the page should be
+ // removed from the working set... valid pages can't be no access.
+ //
+ // if page is going to be read only or no access? and is demand
+ // zero, make sure it is changed to a page of zeroes.
+ //
+ // update the vm spec to explain locked pages are unlocked when
+ // freed or protection is changed to no-access (this may be a nasty
+ // problem if we don't want to do this!!
+ //
+
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+ ULONG Attached = FALSE;
+ PVOID CapturedBase;
+ ULONG CapturedRegionSize;
+ ULONG ProtectionMask;
+
+ ULONG LastProtect;
+
+ PAGED_CODE();
+
+ //
+ // Check the protection field. This could raise an exception.
+ //
+
+ try {
+ ProtectionMask = MiMakeProtectionMask (NewProtect);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ if (PreviousMode != KernelMode) {
+
+ //
+ // Capture the region size and base address under an exception handler.
+ //
+
+ try {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ ProbeForWriteUlong (OldProtect);
+
+ //
+ // Capture the region size and base address.
+ //
+
+ CapturedBase = *BaseAddress;
+ CapturedRegionSize = *RegionSize;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ } else {
+
+ //
+ // Capture the region size and base address.
+ //
+
+ CapturedRegionSize = *RegionSize;
+ CapturedBase = *BaseAddress;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess ) {
+ DbgPrint("protectvm process handle %lx base address %lx size %lx protect %lx\n",
+ ProcessHandle, CapturedBase, CapturedRegionSize, NewProtect);
+ }
+ }
+#endif
+
+ //
+ // Make sure the specified starting and ending addresses are
+ // within the user part of the virtual address space.
+ //
+
+ if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {
+
+ //
+ // Invalid base address.
+ //
+
+ return STATUS_INVALID_PARAMETER_2;
+ }
+
+ if ((ULONG)MM_HIGHEST_USER_ADDRESS - (ULONG)CapturedBase <
+ CapturedRegionSize) {
+
+ //
+ // Invalid region size;
+ //
+
+ return STATUS_INVALID_PARAMETER_3;
+ }
+
+ if (CapturedRegionSize == 0) {
+ return STATUS_INVALID_PARAMETER_3;
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ if (PsGetCurrentProcess() != Process) {
+ KeAttachProcess (&Process->Pcb);
+ Attached = TRUE;
+ }
+
+ Status = MiProtectVirtualMemory (Process,
+ &CapturedBase,
+ &CapturedRegionSize,
+ NewProtect,
+ &LastProtect);
+
+
+ if (Attached) {
+ KeDetachProcess();
+ }
+
+ ObDereferenceObject (Process);
+
+ //
+ // Establish an exception handler and write the size and base
+ // address.
+ //
+
+ try {
+
+ //
+ // Reprobe the addresses as certain architecures (intel 386 for one)
+ // do not trap kernel writes. This is the one service which allows
+ // the protection of the page to change between the initial probe
+ // and the final argument update.
+ //
+
+ if (PreviousMode != KernelMode) {
+
+ ProbeForWriteUlong ((PULONG)BaseAddress);
+ ProbeForWriteUlong (RegionSize);
+ ProbeForWriteUlong (OldProtect);
+ }
+
+ *RegionSize = CapturedRegionSize;
+ *BaseAddress = CapturedBase;
+ *OldProtect = LastProtect;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+
+ return Status;
+}
+
+
+NTSTATUS
+MiProtectVirtualMemory (
+ IN PEPROCESS Process,
+ IN PVOID *BaseAddress,
+ IN PULONG RegionSize,
+ IN ULONG NewProtect,
+ IN PULONG LastProtect)
+
+/*++
+
+Routine Description:
+
+ This routine changes the protection on a region of committed pages
+ within the virtual address space of the subject process. Setting
+ the protection on a ragne of pages causes the old protection to be
+ replaced by the specified protection value.
+
+Arguments:
+
+ Process - Supplies a pointer to the current process.
+
+ BaseAddress - Supplies the starting address to protect.
+
+ RegionsSize - Supplies the size of the region to protect.
+
+ NewProtect - Supplies the new protection to set.
+
+ LastProtect - Supplies the address of a kernel owned pointer to
+ store (without probing) the old protection into.
+
+
+Return Value:
+
+ the status of the protect operation.
+
+Environment:
+
+ Kernel mode
+
+--*/
+
+{
+
+ PMMVAD FoundVad;
+ PVOID StartingAddress;
+ PVOID EndingAddress;
+ PVOID CapturedBase;
+ ULONG CapturedRegionSize;
+ NTSTATUS Status;
+ ULONG Attached = FALSE;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerProtoPte;
+ PMMPTE LastProtoPte;
+ PMMPFN Pfn1;
+ ULONG CapturedOldProtect;
+ ULONG ProtectionMask;
+ MMPTE TempPte;
+ MMPTE PteContents;
+ MMPTE PreviousPte;
+ ULONG Locked = FALSE;
+ PVOID Va;
+ ULONG DoAgain;
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time.
+ // Get the working set mutex so PTEs can be modified.
+ // Block APCs so an APC which takes a page
+ // fault does not corrupt various structures.
+ //
+
+ CapturedBase = *BaseAddress;
+ CapturedRegionSize = *RegionSize;
+ ProtectionMask = MiMakeProtectionMask (NewProtect);
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorFound;
+ }
+
+ EndingAddress = (PVOID)(((ULONG)CapturedBase +
+ CapturedRegionSize - 1L) | (PAGE_SIZE - 1L));
+ StartingAddress = (PVOID)PAGE_ALIGN(CapturedBase);
+ FoundVad = MiCheckForConflictingVad (StartingAddress, EndingAddress);
+
+ if (FoundVad == (PMMVAD)NULL) {
+
+ //
+ // No virtual address is reserved at the specified base address,
+ // return an error.
+ //
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorFound;
+ }
+
+ //
+ // Ensure that the starting and ending addresses are all within
+ // the same virtual address descriptor.
+ //
+
+ if ((StartingAddress < FoundVad->StartingVa) ||
+ (EndingAddress > FoundVad->EndingVa)) {
+
+ //
+ // Not withing the section virtual address descriptor,
+ // return an error.
+ //
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorFound;
+ }
+
+ if (FoundVad->u.VadFlags.PhysicalMapping == 1) {
+
+ //
+ // Setting the protection of a physically mapped section is
+ // not allowed as there is no corresponding PFN database element.
+ //
+
+ Status = STATUS_CONFLICTING_ADDRESSES;
+ goto ErrorFound;
+ }
+
+ if (FoundVad->u.VadFlags.NoChange == 1) {
+
+ //
+ // An attempt is made at changing the protection
+ // of a secured VAD, check to see if the address range
+ // to change allows the change.
+ //
+
+ Status = MiCheckSecuredVad (FoundVad,
+ CapturedBase,
+ CapturedRegionSize,
+ ProtectionMask);
+
+ if (!NT_SUCCESS (Status)) {
+ goto ErrorFound;
+ }
+ }
+
+ if (FoundVad->u.VadFlags.PrivateMemory == 0) {
+
+
+ //
+ // For mapped sections, the NO_CACHE attribute is not allowed.
+ //
+
+ if (NewProtect & PAGE_NOCACHE) {
+
+ //
+ // Not allowed.
+ //
+
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto ErrorFound;
+ }
+
+ //
+ // If this is a file mapping, then all pages must be
+ // committed as there can be no sparse file maps. Images
+ // can have non-committed pages if the alignment is greater
+ // than the page size.
+ //
+
+ if ((FoundVad->ControlArea->u.Flags.File == 0) ||
+ (FoundVad->ControlArea->u.Flags.Image == 1)) {
+
+ PointerProtoPte = MiGetProtoPteAddress (FoundVad, StartingAddress);
+ LastProtoPte = MiGetProtoPteAddress (FoundVad, EndingAddress);
+
+ //
+ // Release the working set mutex and aquire the section
+ // commit mutex. Check all the prototype PTEs described by
+ // the virtual address range to ensure they are committed.
+ //
+
+ UNLOCK_WS (Process);
+ ExAcquireFastMutex (&MmSectionCommitMutex);
+
+ while (PointerProtoPte <= LastProtoPte) {
+
+ //
+ // Check to see if the prototype PTE is committed, if
+ // not return an error.
+ //
+
+ if (PointerProtoPte->u.Long == 0) {
+
+ //
+ // Error, this prototype PTE is not committed.
+ //
+
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+ Status = STATUS_NOT_COMMITTED;
+ goto ErrorFoundNoWs;
+ }
+ PointerProtoPte += 1;
+ }
+
+ //
+ // The range is committed, release the section committment
+ // mutex, aquire the working set mutex and update the local PTEs.
+ //
+
+ ExReleaseFastMutex (&MmSectionCommitMutex);
+
+ //
+ // Set the protection on the section pages. This could
+ // get a quota exceeded exception.
+ //
+
+ LOCK_WS (Process);
+ }
+
+ try {
+ Locked = MiSetProtectionOnSection ( Process,
+ FoundVad,
+ StartingAddress,
+ EndingAddress,
+ NewProtect,
+ &CapturedOldProtect,
+ FALSE );
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ Status = GetExceptionCode();
+ goto ErrorFound;
+ }
+ } else {
+
+ //
+ // Not a section, private.
+ // For private pages, the WRITECOPY attribute is not allowed.
+ //
+
+ if ((NewProtect & PAGE_WRITECOPY) ||
+ (NewProtect & PAGE_EXECUTE_WRITECOPY)) {
+
+ //
+ // Not allowed.
+ //
+
+ Status = STATUS_INVALID_PARAMETER_4;
+ goto ErrorFound;
+ }
+
+ //
+ // Ensure all of the pages are already committed as described
+ // in the virtual address descriptor.
+ //
+
+ if ( !MiIsEntireRangeCommitted(StartingAddress,
+ EndingAddress,
+ FoundVad,
+ Process)) {
+
+ //
+ // Previously reserved pages have been decommitted, or an error
+ // occurred, release mutex and return status.
+ //
+
+ Status = STATUS_NOT_COMMITTED;
+ goto ErrorFound;
+ }
+
+ //
+ // The address range is committed, change the protection.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ //
+ // Capture the protection for the first page.
+ //
+
+ if (PointerPte->u.Long != 0) {
+
+ CapturedOldProtect = MiGetPageProtection (PointerPte, Process);
+
+ //
+ // Make sure the Page table page is still resident.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ } else {
+
+ //
+ // Get the protection from the VAD.
+ //
+
+ CapturedOldProtect =
+ MI_CONVERT_FROM_PTE_PROTECTION(FoundVad->u.VadFlags.Protection);
+ }
+
+ //
+ // For all the PTEs in the specified address range, set the
+ // protection depending on the state of the PTE.
+ //
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+ }
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Long == 0) {
+
+ //
+ // Increment the count of non-zero page table entires
+ // for this page table and the number of private pages
+ // for the process. The protection will be set as
+ // if the PTE was demand zero.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+
+ }
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Set the protection into both the PTE and the original PTE
+ // in the PFN database.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ //
+ // This PTE refers to a fork prototype PTE, make it
+ // private.
+ //
+
+ MiCopyOnWrite (MiGetVirtualAddressMappedByPte (PointerPte),
+ PointerPte);
+
+ //
+ // This may have released the working set mutex and
+ // the page table page may no longer be in memory.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid (PointerPde,
+ Process,
+ FALSE);
+
+ //
+ // Do the loop again for the same PTE.
+ //
+
+ continue;
+ } else {
+
+ //
+ // The PTE is a private page which is valid, if the
+ // specified protection is no-access or guard page
+ // remove the PTE from the working set.
+ //
+
+ if ((NewProtect & PAGE_NOACCESS) ||
+ (NewProtect & PAGE_GUARD)) {
+
+ //
+ // Remove the page from the working set.
+ //
+
+ Locked = MiRemovePageFromWorkingSet (PointerPte,
+ Pfn1,
+ &Process->Vm);
+
+
+ continue;
+ } else {
+
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
+ MI_MAKE_VALID_PTE (TempPte,
+ PointerPte->u.Hard.PageFrameNumber,
+ ProtectionMask,
+ PointerPte);
+
+ //
+ // Flush the TB as we have changed the protection
+ // of a valid PTE.
+ //
+
+ PreviousPte.u.Flush = MiFlushTbAndCapture (PointerPte,
+ TempPte.u.Flush,
+ Pfn1);
+ }
+ }
+ } else {
+
+ if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // This PTE refers to a fork prototype PTE, make the
+ // page private. This is accomplished by releasing
+ // the working set mutex, reading the page thereby
+ // causing a fault, and re-executing the loop, hopefully,
+ // this time, we'll find the page present and will
+ // turn it into a private page.
+ //
+ // Note, that page a TRY is used to catch guard
+ // page exceptions and no-access exceptions.
+ //
+
+ Va = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ DoAgain = TRUE;
+
+ while (PteContents.u.Hard.Valid == 0) {
+
+ UNLOCK_WS (Process);
+
+ try {
+
+ *(volatile ULONG *)Va;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (GetExceptionCode() ==
+ STATUS_ACCESS_VIOLATION) {
+
+ //
+ // The prototype PTE must be noaccess.
+ //
+
+ DoAgain = MiChangeNoAccessForkPte (PointerPte,
+ ProtectionMask);
+ } else if (GetExceptionCode() ==
+ STATUS_IN_PAGE_ERROR) {
+ //
+ // Ignore this page and go onto the next one.
+ //
+
+ PointerPte += 1;
+ LOCK_WS (Process);
+ continue;
+ }
+ }
+
+ LOCK_WS (Process);
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+ }
+
+ if (DoAgain) {
+ continue;
+ }
+
+ } else {
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ if (MiSetProtectionOnTransitionPte (
+ PointerPte,
+ ProtectionMask)) {
+ continue;
+ }
+ } else {
+
+ //
+ // Must be page file space or demand zero.
+ //
+
+ PointerPte->u.Soft.Protection = ProtectionMask;
+ ASSERT (PointerPte->u.Long != 0);
+ }
+ }
+ }
+ PointerPte += 1;
+ } //end while
+ }
+
+ //
+ // Common completion code.
+ //
+
+ *RegionSize = (ULONG)EndingAddress - (ULONG)StartingAddress + 1L;
+ *BaseAddress = StartingAddress;
+ *LastProtect = CapturedOldProtect;
+
+ if (Locked) {
+ Status = STATUS_WAS_UNLOCKED;
+ } else {
+ Status = STATUS_SUCCESS;
+ }
+
+ErrorFound:
+
+ UNLOCK_WS (Process);
+ErrorFoundNoWs:
+
+ UNLOCK_ADDRESS_SPACE (Process);
+ return Status;
+}
+
+ULONG
+MiSetProtectionOnSection (
+ IN PEPROCESS Process,
+ IN PMMVAD FoundVad,
+ IN PVOID StartingAddress,
+ IN PVOID EndingAddress,
+ IN ULONG NewProtect,
+ OUT PULONG CapturedOldProtect,
+ IN ULONG DontCharge
+ )
+
+/*++
+
+Routine Description:
+
+ This routine changes the protection on a region of committed pages
+ within the virtual address space of the subject process. Setting
+ the protection on a ragne of pages causes the old protection to be
+ replaced by the specified protection value.
+
+Arguments:
+
+ Process - Supplies a pointer to the current process.
+
+ FoundVad - Supplies a pointer to the VAD containing the range to protect.
+
+ StartingAddress - Supplies the starting address to protect.
+
+ EndingAddress - Supplies the ending address to protect.
+
+ NewProtect - Supplies the new protection to set.
+
+ CapturedOldProtect - Supplies the address of a kernel owned pointer to
+ store (without probing) the old protection into.
+
+ DontCharge - Supplies TRUE if no quota or commitment should be charged.
+
+Return Value:
+
+ Returns TRUE if a locked page was removed from the working set (protection
+ was guard page or no-access, FALSE otherwise.
+
+Exceptions raised for page file quota or commitment violations.
+
+Environment:
+
+ Kernel mode, working set mutex held, address creation mutex held
+ APCs disabled.
+
+--*/
+
+{
+
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerProtoPte;
+ PMMPFN Pfn1;
+ MMPTE TempPte;
+ MMPTE PreviousPte;
+ ULONG Locked = FALSE;
+ ULONG ProtectionMask;
+ ULONG ProtectionMaskNotCopy;
+ ULONG NewProtectionMask;
+ MMPTE PteContents;
+ ULONG Index;
+ PULONG Va;
+ ULONG WriteCopy = FALSE;
+ ULONG DoAgain;
+ ULONG QuotaCharge = 0;
+
+ PAGED_CODE();
+
+ //
+ // Make the protection field.
+ //
+
+ if ((FoundVad->u.VadFlags.ImageMap == 1) ||
+ (FoundVad->u.VadFlags.CopyOnWrite == 1)) {
+
+ if (NewProtect & PAGE_READWRITE) {
+ NewProtect &= ~PAGE_READWRITE;
+ NewProtect |= PAGE_WRITECOPY;
+ }
+
+ if (NewProtect & PAGE_EXECUTE_READWRITE) {
+ NewProtect &= ~PAGE_EXECUTE_READWRITE;
+ NewProtect |= PAGE_EXECUTE_WRITECOPY;
+ }
+ }
+
+ ProtectionMask = MiMakeProtectionMask (NewProtect);
+
+ //
+ // Determine if copy on write is being set.
+ //
+
+ ProtectionMaskNotCopy = ProtectionMask;
+ if ((ProtectionMask & MM_COPY_ON_WRITE_MASK) == MM_COPY_ON_WRITE_MASK) {
+ WriteCopy = TRUE;
+ ProtectionMaskNotCopy &= ~MM_PROTECTION_COPY_MASK;
+ }
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ //
+ // Capture the protection for the first page.
+ //
+
+ if (PointerPte->u.Long != 0) {
+
+ *CapturedOldProtect = MiGetPageProtection (PointerPte, Process);
+
+ //
+ // Make sure the Page table page is still resident.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE);
+
+ } else {
+
+ //
+ // Get the protection from the VAD, unless image file.
+ //
+
+ if (FoundVad->u.VadFlags.ImageMap == 0) {
+
+ //
+ // This is not an image file, the protection is in the VAD.
+ //
+
+ *CapturedOldProtect =
+ MI_CONVERT_FROM_PTE_PROTECTION(FoundVad->u.VadFlags.Protection);
+ } else {
+
+ //
+ // This is an image file, the protection is in the
+ // prototype PTE.
+ //
+
+ PointerProtoPte = MiGetProtoPteAddress (FoundVad,
+ MiGetVirtualAddressMappedByPte (PointerPte));
+
+ TempPte = MiCaptureSystemPte (PointerProtoPte, Process);
+
+ *CapturedOldProtect = MiGetPageProtection (&TempPte,
+ Process);
+
+ //
+ // Make sure the Page table page is still resident.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE);
+ }
+ }
+
+ //
+ // If the page protection is being change to be copy-on-write, the
+ // commitment and page file quota for the potentially dirty private pages
+ // must be calculated and charged. This must be done before any
+ // protections are changed as the changes cannot be undone.
+ //
+
+ if (WriteCopy) {
+
+ //
+ // Calculate the charges. If the page is shared and not write copy
+ // it is counted as a charged page.
+ //
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No PDE exists for this address. Therefore
+ // all the PTEs are shared and not copy on write.
+ // go to the next PDE.
+ //
+
+ PointerPde += 1;
+ PointerProtoPte = PointerPte;
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ if (PointerPte > LastPte) {
+ QuotaCharge += 1 + LastPte - PointerProtoPte;
+ goto Done;
+ }
+ QuotaCharge += PointerPte - PointerProtoPte;
+ }
+ }
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Long == 0) {
+
+ //
+ // The PTE has not been evalulated, assume copy on write.
+ //
+
+ QuotaCharge += 1;
+
+ } else if ((PteContents.u.Hard.Valid == 1) &&
+ (PteContents.u.Hard.CopyOnWrite == 0)) {
+
+ //
+ // See if this is a prototype PTE, if so charge it.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+ QuotaCharge += 1;
+ }
+ } else {
+
+ if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // This is a prototype PTE. Charge if it is not
+ // in copy on write format.
+ //
+
+ if (PteContents.u.Soft.PageFileHigh == 0xFFFFF) {
+
+ //
+ // Page protection is within the PTE.
+ //
+
+ if (!MI_IS_PTE_PROTECTION_COPY_WRITE(PteContents.u.Soft.Protection)) {
+ QuotaCharge += 1;
+ }
+ } else {
+
+ //
+ // The PTE references the prototype directly, therefore
+ // it can't be copy on write. Charge.
+ //
+
+ QuotaCharge += 1;
+ }
+ }
+ }
+ PointerPte += 1;
+ }
+
+Done:
+ NOTHING;
+
+ //
+ // Charge for the quota.
+ //
+
+ if (!DontCharge) {
+ MiChargePageFileQuota (QuotaCharge, Process);
+
+ try {
+ MiChargeCommitment (QuotaCharge, Process);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ MiReturnPageFileQuota (QuotaCharge, Process);
+ ExRaiseStatus (GetExceptionCode());
+ }
+
+ //
+ // Add the quota into the charge to the VAD.
+ //
+
+ FoundVad->u.VadFlags.CommitCharge += QuotaCharge;
+ Process->CommitCharge += QuotaCharge;
+ }
+ }
+
+ //
+ // For all the PTEs in the specified address range, set the
+ // protection depending on the state of the PTE.
+ //
+
+ //
+ // If the PTE was copy on write (but not written) and the
+ // new protection is NOT copy-on-write, return page file quota
+ // and committment.
+ //
+
+ PointerPde = MiGetPdeAddress (StartingAddress);
+ PointerPte = MiGetPteAddress (StartingAddress);
+
+ MiDoesPdeExistAndMakeValid (PointerPde, Process, FALSE);
+
+ QuotaCharge = 0;
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+ PointerPde = MiGetPteAddress (PointerPte);
+ MiMakePdeExistAndMakeValid (PointerPde, Process, FALSE);
+ }
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Long == 0) {
+
+ //
+ // The PTE is Zero, set it into prototype PTE format
+ // with the protection in the prototype PTE.
+ //
+
+ *PointerPte = PrototypePte;
+ PointerPte->u.Soft.Protection = ProtectionMask;
+
+ //
+ // Increment the count of non-zero page table entires
+ // for this page table and the number of private pages
+ // for the process.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries
+ [MiGetPteOffset(PointerPte)] += 1;
+
+ } else if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Set the protection into both the PTE and the original PTE
+ // in the PFN database for private pages only.
+ //
+
+ NewProtectionMask = ProtectionMask;
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if ((NewProtect & PAGE_NOACCESS) ||
+ (NewProtect & PAGE_GUARD)) {
+
+ Locked = MiRemovePageFromWorkingSet (PointerPte,
+ Pfn1,
+ &Process->Vm );
+ continue;
+
+ } else {
+
+ if (Pfn1->u3.e1.PrototypePte == 1) {
+
+ //
+ // The true protection may be in the WSLE, locate
+ // the WSLE.
+ //
+
+ Va = (PULONG)MiGetVirtualAddressMappedByPte (PointerPte);
+ Index = MiLocateWsle ((PVOID)Va, MmWorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ //
+ // Check to see if this is a prototype PTE. This
+ // is done by comparing the PTE address in the
+ // PFN database to the PTE address indicated by the
+ // VAD. If they are not equal, this is a prototype
+ // PTE.
+ //
+
+ if (Pfn1->PteAddress !=
+ MiGetProtoPteAddress (FoundVad, (PVOID)Va)) {
+
+ //
+ // This PTE refers to a fork prototype PTE, make it
+ // private.
+ //
+
+ MiCopyOnWrite ((PVOID)Va, PointerPte);
+
+ if (WriteCopy) {
+ QuotaCharge += 1;
+ }
+
+ //
+ // This may have released the working set mutex and
+ // the page table page may no longer be in memory.
+ //
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde,
+ Process, FALSE);
+
+ //
+ // Do the loop again.
+ //
+
+ continue;
+
+ } else {
+
+ //
+ // Update the protection field in the WSLE and
+ // the PTE.
+ //
+ //
+ // If the PTE is copy on write uncharge the
+ // previously charged quota.
+ //
+
+ if ((!WriteCopy) && (PteContents.u.Hard.CopyOnWrite == 1)) {
+ QuotaCharge += 1;
+ }
+
+ MmWsle[Index].u1.e1.Protection = ProtectionMask;
+ MmWsle[Index].u1.e1.SameProtectAsProto = 0;
+ }
+
+ } else {
+
+ //
+ // Page is private (copy on written), protection mask
+ // is stored in the original pte field.
+ //
+
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMaskNotCopy;
+ NewProtectionMask = ProtectionMaskNotCopy;
+ }
+
+ MI_MAKE_VALID_PTE (TempPte,
+ PteContents.u.Hard.PageFrameNumber,
+ NewProtectionMask,
+ PointerPte);
+ }
+
+ //
+ // Flush the TB as we have changed the protection
+ // of a valid PTE.
+ //
+
+ PreviousPte.u.Flush = MiFlushTbAndCapture (PointerPte,
+ TempPte.u.Flush,
+ Pfn1);
+ } else {
+
+ if (PteContents.u.Soft.Prototype == 1) {
+
+ //
+ // The PTE is in prototype PTE format.
+ //
+
+ //
+ // Is it a fork prototype PTE?
+ //
+
+ Va = (PULONG)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ if ((PteContents.u.Soft.PageFileHigh != 0xFFFFF) &&
+ (MiPteToProto (PointerPte) !=
+ MiGetProtoPteAddress (FoundVad, (PVOID)Va))) {
+
+ //
+ // This PTE refers to a fork prototype PTE, make the
+ // page private. This is accomplished by releasing
+ // the working set mutex, reading the page thereby
+ // causing a fault, and re-executing the loop, hopefully,
+ // this time, we'll find the page present and will
+ // turn it into a private page.
+ //
+ // Note, that page with prototype = 1 cannot be
+ // no-access.
+ //
+
+ DoAgain = TRUE;
+
+ while (PteContents.u.Hard.Valid == 0) {
+
+ UNLOCK_WS (Process);
+
+ try {
+
+ *(volatile ULONG *)Va;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ if (GetExceptionCode() !=
+ STATUS_GUARD_PAGE_VIOLATION) {
+
+ //
+ // The prototype PTE must be noaccess.
+ //
+
+ DoAgain = MiChangeNoAccessForkPte (PointerPte,
+ ProtectionMask);
+ }
+ }
+
+ LOCK_WS (Process);
+
+ (VOID)MiDoesPdeExistAndMakeValid(PointerPde,
+ Process,
+ FALSE);
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+ }
+
+ if (DoAgain) {
+ continue;
+ }
+
+ } else {
+
+ //
+ // If the new protection is not write-copy, the PTE
+ // protection is not in the prototype PTE (can't be
+ // write copy for sections), and the protection in
+ // the PTE is write-copy, release the page file
+ // quota and commitment for this page.
+ //
+
+ if ((!WriteCopy) &&
+ (PteContents.u.Soft.PageFileHigh == 0XFFFFF)) {
+ if (MI_IS_PTE_PROTECTION_COPY_WRITE(PteContents.u.Soft.Protection)) {
+ QuotaCharge += 1;
+ }
+
+ }
+
+ //
+ // The PTE is a prototype PTE. Make the high part
+ // of the PTE indicate that the protection field
+ // is in the PTE itself.
+ //
+
+ *PointerPte = PrototypePte;
+ PointerPte->u.Soft.Protection = ProtectionMask;
+ }
+
+ } else {
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // This is a transition PTE. (Page is private)
+ //
+
+ if (MiSetProtectionOnTransitionPte (
+ PointerPte,
+ ProtectionMaskNotCopy)) {
+ continue;
+ }
+
+ } else {
+
+ //
+ // Must be page file space or demand zero.
+ //
+
+ PointerPte->u.Soft.Protection = ProtectionMaskNotCopy;
+ }
+ }
+ }
+
+ PointerPte += 1;
+ }
+
+ //
+ // Return the quota charge and the commitment, if any.
+ //
+
+ if ((QuotaCharge > 0) && (!DontCharge)) {
+
+ MiReturnCommitment (QuotaCharge);
+ MiReturnPageFileQuota (QuotaCharge, Process);
+
+ ASSERT (QuotaCharge <= FoundVad->u.VadFlags.CommitCharge);
+
+ FoundVad->u.VadFlags.CommitCharge -= QuotaCharge;
+ Process->CommitCharge -= QuotaCharge;
+ }
+
+ return Locked;
+}
+
+ULONG
+MiGetPageProtection (
+ IN PMMPTE PointerPte,
+ IN PEPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns the page protection of a non-zero PTE.
+ It may release and reacquire the working set mutex.
+
+Arguments:
+
+ PointerPte - Supplies a pointer to a non-zero PTE.
+
+Return Value:
+
+ Returns the protection code.
+
+Environment:
+
+ Kernel mode, working set and address creation mutex held.
+ Note, that the address creation mutex does not need to be held
+ if the working set mutex does not need to be released in the
+ case of a prototype PTE.
+
+--*/
+
+{
+
+ MMPTE PteContents;
+ MMPTE ProtoPteContents;
+ PMMPFN Pfn1;
+ PMMPTE ProtoPteAddress;
+ PVOID Va;
+ ULONG Index;
+
+ PAGED_CODE();
+
+ PteContents = *PointerPte;
+
+ if ((PteContents.u.Soft.Valid == 0) && (PteContents.u.Soft.Prototype == 1)) {
+
+ //
+ // This pte is in prototype format, the protection is
+ // stored in the prototype PTE.
+ //
+
+ if ((PointerPte > (PMMPTE)PDE_TOP) ||
+ (PteContents.u.Soft.PageFileHigh == 0xFFFFF)) {
+
+ //
+ // The protection is within this PTE.
+ //
+
+ return MI_CONVERT_FROM_PTE_PROTECTION (
+ PteContents.u.Soft.Protection);
+ }
+
+ ProtoPteAddress = MiPteToProto (PointerPte);
+
+ //
+ // Capture protopte PTE contents.
+ //
+
+ ProtoPteContents = MiCaptureSystemPte (ProtoPteAddress, Process);
+
+ //
+ // The working set mutex may have been released and the
+ // page may no longer be in prototype format, get the
+ // new contents of the PTE and obtain the protection mask.
+ //
+
+ PteContents = MiCaptureSystemPte (PointerPte, Process);
+ }
+
+ if ((PteContents.u.Soft.Valid == 0) && (PteContents.u.Soft.Prototype == 1)) {
+
+ //
+ // Pte is still prototype, return the protection captured
+ // from the prototype PTE.
+ //
+
+ if (ProtoPteContents.u.Hard.Valid == 1) {
+
+ //
+ // The prototype PTE is valid, get the protection from
+ // the PFN database.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (ProtoPteContents.u.Hard.PageFrameNumber);
+ return MI_CONVERT_FROM_PTE_PROTECTION(
+ Pfn1->OriginalPte.u.Soft.Protection);
+
+ } else {
+
+ //
+ // The prototype PTE is not valid, return the protection from the
+ // PTE.
+ //
+
+ return MI_CONVERT_FROM_PTE_PROTECTION (
+ ProtoPteContents.u.Soft.Protection);
+ }
+ }
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // The page is valid, the protection field is either in the
+ // PFN database origional PTE element or the WSLE. If
+ // the page is private, get it from the PFN original PTE
+ // element. If the page else use the WSLE.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if ((Pfn1->u3.e1.PrototypePte == 0) ||
+ (PointerPte > (PMMPTE)PDE_TOP)) {
+
+ //
+ // This is a private PTE or the PTE address is that of a
+ // prototype PTE, hence the protection is in
+ // the orginal PTE.
+ //
+
+ return MI_CONVERT_FROM_PTE_PROTECTION(
+ Pfn1->OriginalPte.u.Soft.Protection);
+ }
+
+ //
+ // The PTE was a hardware PTE, get the protection
+ // from the WSLE.
+
+ Va = (PULONG)MiGetVirtualAddressMappedByPte (PointerPte);
+ Index = MiLocateWsle ((PVOID)Va, MmWorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ return MI_CONVERT_FROM_PTE_PROTECTION (MmWsle[Index].u1.e1.Protection);
+ }
+
+ //
+ // PTE is either demand zero or transition, in either
+ // case protection is in PTE.
+ //
+
+ return MI_CONVERT_FROM_PTE_PROTECTION (PteContents.u.Soft.Protection);
+
+}
+
+ULONG
+MiChangeNoAccessForkPte (
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+ PointerPte - Supplies a pointer to the current PTE.
+
+ ProtectionMask - Supplies the protection mask to set.
+
+Return Value:
+
+ FASLE if the loop should be repeated for this PTE, TRUE
+ if protection has been set.
+
+
+Environment:
+
+ Kernel mode, address creation mutex held, APCs disabled.
+
+--*/
+
+{
+ PAGED_CODE();
+
+ if (ProtectionMask == MM_NOACCESS) {
+
+ //
+ // No need to change the page protection.
+ //
+
+ return TRUE;
+ }
+
+ PointerPte->u.Proto.ReadOnly = 1;
+
+ return FALSE;
+}
+
+
+HARDWARE_PTE
+MiFlushTbAndCapture(
+ IN PMMPTE PointerPte,
+ IN HARDWARE_PTE TempPte,
+ IN PMMPFN Pfn1
+ )
+
+// non pagable helper routine.
+
+{
+ MMPTE PreviousPte;
+ KIRQL OldIrql;
+
+ //
+ // Flush the TB as we have changed the protection
+ // of a valid PTE.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ PreviousPte.u.Flush = KeFlushSingleTb (
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ FALSE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte);
+
+ ASSERT (PreviousPte.u.Hard.Valid == 1);
+
+ //
+ // A page protection is being changed, on certain
+ // hardware the dirty bit should be ORed into the
+ // modify bit in the PFN element.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn1);
+ UNLOCK_PFN (OldIrql);
+ return PreviousPte.u.Flush;
+}
+
+ULONG
+MiSetProtectionOnTransitionPte (
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask
+ )
+
+ // nonpaged helper routine.
+
+{
+ KIRQL OldIrql;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+
+ //
+ // This is a transition PTE. (Page is private)
+ //
+
+ //
+ // Need pfn mutex to ensure page doesn't become
+ // non-transition.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Make sure the page is still a transition page.
+ //
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+
+ if ((PteContents.u.Soft.Prototype == 0) &&
+ (PointerPte->u.Soft.Transition == 1)) {
+
+ Pfn1 = MI_PFN_ELEMENT (
+ PteContents.u.Trans.PageFrameNumber);
+
+ Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask;
+ PointerPte->u.Soft.Protection = ProtectionMask;
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+
+ //
+ // Do this loop again for the same PTE.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+}
+
+MMPTE
+MiCaptureSystemPte (
+ IN PMMPTE PointerProtoPte,
+ IN PEPROCESS Process
+ )
+
+// nonpagable helper routine.
+{
+ MMPTE TempPte;
+ KIRQL OldIrql;
+
+ LOCK_PFN (OldIrql);
+ MiMakeSystemAddressValidPfnWs (PointerProtoPte, Process);
+ TempPte = *PointerProtoPte;
+ UNLOCK_PFN (OldIrql);
+ return TempPte;
+}
+
+NTSTATUS
+MiCheckSecuredVad (
+ IN PMMVAD Vad,
+ IN PVOID Base,
+ IN ULONG Size,
+ IN ULONG ProtectionMask
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks to see if the specified VAD is secured in such
+ a way as to conflick with the address range and protection mask
+ specified.
+
+Arguments:
+
+ Vad - Supplies a pointer to the VAD containing the address range.
+
+ Base - Supplies the base of the range the protection starts at.
+
+ Size - Supplies the size of the range.
+
+ ProtectionMask - Supplies the protection mask being set.
+
+Return Value:
+
+ Status value.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PVOID End;
+ PLIST_ENTRY Next;
+ PMMSECURE_ENTRY Entry;
+ NTSTATUS Status = STATUS_SUCCESS;
+
+ End = (PVOID)((PCHAR)Base + Size);
+
+ if (ProtectionMask < MM_SECURE_DELETE_CHECK) {
+ if ((Vad->u.VadFlags.NoChange == 1) &&
+ (Vad->u2.VadFlags2.SecNoChange == 1) &&
+ (Vad->u.VadFlags.Protection != ProtectionMask)) {
+
+ //
+ // An attempt is made at changing the protection
+ // of a SEC_NO_CHANGE section - return an error.
+ //
+
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto done;
+ }
+ } else {
+
+ //
+ // Deletion - set to no-access for check. SEC_NOCHANGE allows
+ // deletion, but does not allow page protection changes.
+ //
+
+ ProtectionMask = 0;
+ }
+
+ if (Vad->u2.VadFlags2.OneSecured) {
+
+ if ((Base <= Vad->u3.Secured.EndVa) && (End >= Vad->u3.Secured.EndVa)) {
+
+ //
+ // This region conflicts, check the protections.
+ //
+
+ if (Vad->u2.VadFlags2.ReadOnly) {
+ if (MmReadWrite[ProtectionMask] < 10) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto done;
+ }
+ } else {
+ if (MmReadWrite[ProtectionMask] < 11) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto done;
+ }
+ }
+ }
+
+ } else if (Vad->u2.VadFlags2.MultipleSecured) {
+
+ Next = Vad->u3.List.Flink;
+ do {
+ Entry = CONTAINING_RECORD( Next,
+ MMSECURE_ENTRY,
+ List);
+
+ if ((Base <= Entry->EndVa) &&
+ (End >= Entry->EndVa)) {
+
+ //
+ // This region conflicts, check the protections.
+ //
+
+ if (Entry->u2.VadFlags2.ReadOnly) {
+ if (MmReadWrite[ProtectionMask] < 10) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto done;
+ }
+ } else {
+ if (MmReadWrite[ProtectionMask] < 11) {
+ Status = STATUS_INVALID_PAGE_PROTECTION;
+ goto done;
+ }
+ }
+ }
+ Next = Entry->List.Flink;
+ } while (Entry->List.Flink != &Vad->u3.List);
+ }
+
+done:
+ return Status;
+}
diff --git a/private/ntos/mm/querysec.c b/private/ntos/mm/querysec.c
new file mode 100644
index 000000000..83bd0ddf3
--- /dev/null
+++ b/private/ntos/mm/querysec.c
@@ -0,0 +1,242 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ querysec.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtQuerySection service.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtQuerySection)
+#endif
+
+
+NTSTATUS
+NtQuerySection(
+ IN HANDLE SectionHandle,
+ IN SECTION_INFORMATION_CLASS SectionInformationClass,
+ OUT PVOID SectionInformation,
+ IN ULONG SectionInformationLength,
+ OUT PULONG ReturnLength OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns information about an opened section object.
+ This function provides the capability to determine the base address,
+ size, granted access, and allocation of an opened section object.
+
+Arguments:
+
+ SectionHandle - Supplies an open handle to a section object.
+
+ SectionInformationClass - The section information class about
+ which to retrieve information.
+
+ SectionInformation - A pointer to a buffer that receives the
+ specified information. The format and content of the buffer
+ depend on the specified section class.
+
+ SectionInformation Format by Information Class:
+
+ SectionBasicInformation - Data type is PSECTION_BASIC_INFORMATION.
+
+ SECTION_BASIC_INFORMATION Structure
+
+ PVOID BaseAddress - The base virtual address of the
+ section if the section is based.
+
+ LARGE_INTEGER MaximumSize - The maximum size of the section in
+ bytes.
+
+ ULONG AllocationAttributes - The allocation attributes
+ flags.
+
+ AllocationAttributes Flags
+
+ SEC_BASED - The section is a based section.
+
+ SEC_TILE - The section must be allocated in the first
+ 512mb of the virtual address space.
+
+ SEC_FILE - The section is backed by a data file.
+
+ SEC_RESERVE - All pages of the section were initially
+ set to the reserved state.
+
+ SEC_COMMIT - All pages of the section were initially
+ to the committed state.
+
+ SEC_IMAGE - The section was mapped as an executable
+ image file.
+
+ SECTION_IMAGE_INFORMATION
+
+ SectionInformationLength - Specifies the length in bytes of the
+ section information buffer.
+
+ ReturnLength - An optional pointer which, if specified, receives
+ the number of bytes placed in the section information buffer.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PSECTION Section;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ //
+ // Get previous processor mode and probe output argument if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+
+ //
+ // Check arguments.
+ //
+
+ try {
+
+ ProbeForWrite(SectionInformation,
+ SectionInformationLength,
+ sizeof(ULONG));
+
+ if (ARGUMENT_PRESENT (ReturnLength)) {
+ ProbeForWriteUlong(ReturnLength);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+ }
+
+ //
+ // Check argument validity.
+ //
+
+ if ((SectionInformationClass != SectionBasicInformation) &&
+ (SectionInformationClass != SectionImageInformation)) {
+ return STATUS_INVALID_INFO_CLASS;
+ }
+
+ if (SectionInformationClass == SectionBasicInformation) {
+ if (SectionInformationLength < (ULONG)sizeof(SECTION_BASIC_INFORMATION)) {
+ return STATUS_INFO_LENGTH_MISMATCH;
+ }
+ } else {
+ if (SectionInformationLength < (ULONG)sizeof(SECTION_IMAGE_INFORMATION)) {
+ return STATUS_INFO_LENGTH_MISMATCH;
+ }
+ }
+
+ //
+ // Reference section object by handle for READ access, get the information
+ // from the section object, deference the section
+ // object, fill in information structure, optionally return the length of
+ // the information structure, and return service status.
+ //
+
+ Status = ObReferenceObjectByHandle(SectionHandle, SECTION_QUERY,
+ MmSectionObjectType,
+ PreviousMode, (PVOID *)&Section, NULL);
+
+ if (NT_SUCCESS(Status)) {
+
+ try {
+
+ if (SectionInformationClass == SectionBasicInformation) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->BaseAddress =
+ Section->Address.StartingVa;
+
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->MaximumSize =
+ Section->SizeOfSection;
+
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes =
+ 0;
+
+ if (Section->u.Flags.Image) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes =
+ SEC_IMAGE;
+ }
+ if (Section->u.Flags.Based) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes |=
+ SEC_BASED;
+ }
+ if (Section->u.Flags.File) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes |=
+ SEC_FILE;
+ }
+ if (Section->u.Flags.NoCache) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes |=
+ SEC_NOCACHE;
+ }
+ if (Section->u.Flags.Reserve) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes |=
+ SEC_RESERVE;
+ }
+ if (Section->u.Flags.Commit) {
+ ((PSECTION_BASIC_INFORMATION)SectionInformation)->AllocationAttributes |=
+ SEC_COMMIT;
+ }
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = sizeof(SECTION_BASIC_INFORMATION);
+ }
+
+ } else {
+
+ if (Section->u.Flags.Image == 0) {
+ return STATUS_SECTION_NOT_IMAGE;
+ }
+ *((PSECTION_IMAGE_INFORMATION)SectionInformation) =
+ Section->Segment->ImageInformation;
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = sizeof(SECTION_IMAGE_INFORMATION);
+ }
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ }
+
+ ObDereferenceObject ((PVOID)Section);
+ }
+ return Status;
+}
diff --git a/private/ntos/mm/queryvm.c b/private/ntos/mm/queryvm.c
new file mode 100644
index 000000000..55736bf9f
--- /dev/null
+++ b/private/ntos/mm/queryvm.c
@@ -0,0 +1,920 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ queryvm.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtQueryVirtualMemory service.
+
+Author:
+
+ Lou Perazzoli (loup) 21-Aug-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+extern POBJECT_TYPE IoFileObjectType;
+
+NTSTATUS
+MiGetWorkingSetInfo (
+ IN PMEMORY_WORKING_SET_INFORMATION WorkingSetInfo,
+ IN ULONG Length,
+ IN PEPROCESS Process
+ );
+
+MMPTE
+MiCaptureSystemPte (
+ IN PMMPTE PointerProtoPte,
+ IN PEPROCESS Process
+ );
+
+#if DBG
+PEPROCESS MmWatchProcess;
+VOID MmFooBar(VOID);
+#endif // DBG
+
+ULONG
+MiQueryAddressState (
+ IN PVOID Va,
+ IN PMMVAD Vad,
+ IN PEPROCESS TargetProcess,
+ OUT PULONG ReturnedProtect
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtQueryVirtualMemory)
+#pragma alloc_text(PAGE,MiQueryAddressState)
+#pragma alloc_text(PAGELK,MiGetWorkingSetInfo)
+#endif
+
+
+NTSTATUS
+NtQueryVirtualMemory (
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ IN MEMORY_INFORMATION_CLASS MemoryInformationClass,
+ OUT PVOID MemoryInformation,
+ IN ULONG MemoryInformationLength,
+ OUT PULONG ReturnLength OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function provides the capability to determine the state,
+ protection, and type of a region of pages within the virtual address
+ space of the subject process.
+
+ The state of the first page within the region is determined and then
+ subsequent entries in the process address map are scanned from the
+ base address upward until either the entire range of pages has been
+ scanned or until a page with a nonmatching set of attributes is
+ encountered. The region attributes, the length of the region of pages
+ with matching attributes, and an appropriate status value are
+ returned.
+
+ If the entire region of pages does not have a matching set of
+ attributes, then the returned length parameter value can be used to
+ calculate the address and length of the region of pages that was not
+ scanned.
+
+Arguments:
+
+
+ ProcessHandle - An open handle to a process object.
+
+ BaseAddress - The base address of the region of pages to be
+ queried. This value is rounded down to the next host-page-
+ address boundary.
+
+ MemoryInformationClass - The memory information class about which
+ to retrieve information.
+
+ MemoryInformation - A pointer to a buffer that receives the
+ specified information. The format and content of the buffer
+ depend on the specified information class.
+
+
+ MemoryBasicInformation - Data type is PMEMORY_BASIC_INFORMATION.
+
+ MEMORY_BASIC_INFORMATION Structure
+
+
+ ULONG RegionSize - The size of the region in bytes
+ beginning at the base address in which all pages have
+ identical attributes.
+
+ ULONG State - The state of the pages within the region.
+
+ State Values State Values
+
+ MEM_COMMIT - The state of the pages within the region
+ is committed.
+
+ MEM_FREE - The state of the pages within the region
+ is free.
+
+ MEM_RESERVE - The state of the pages within the
+ region is reserved.
+
+ ULONG Protect - The protection of the pages within the
+ region.
+
+
+ Protect Values Protect Values
+
+ PAGE_NOACCESS - No access to the region of pages is
+ allowed. An attempt to read, write, or execute
+ within the region results in an access violation
+ (i.e., a GP fault).
+
+ PAGE_EXECUTE - Execute access to the region of pages
+ is allowed. An attempt to read or write within
+ the region results in an access violation.
+
+ PAGE_READONLY - Read-only and execute access to the
+ region of pages is allowed. An attempt to write
+ within the region results in an access violation.
+
+ PAGE_READWRITE - Read, write, and execute access to
+ the region of pages is allowed. If write access
+ to the underlying section is allowed, then a
+ single copy of the pages are shared. Otherwise,
+ the pages are shared read-only/copy-on-write.
+
+ PAGE_GUARD - Read, write, and execute access to the
+ region of pages is allowed; however, access to
+ the region causes a "guard region entered"
+ condition to be raised in the subject process.
+
+ PAGE_NOCACHE - Disable the placement of committed
+ pages into the data cache.
+
+ ULONG Type - The type of pages within the region.
+
+
+ Type Values
+
+ MEM_PRIVATE - The pages within the region are
+ private.
+
+ MEM_MAPPED - The pages within the region are mapped
+ into the view of a section.
+
+ MEM_IMAGE - The pages within the region are mapped
+ into the view of an image section.
+
+ MemoryInformationLength - Specifies the length in bytes of
+ the memory information buffer.
+
+ ReturnLength - An optional pointer which, if specified,
+ receives the number of bytes placed in the process
+ information buffer.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ KPROCESSOR_MODE PreviousMode;
+ PEPROCESS TargetProcess;
+ NTSTATUS Status;
+ PMMVAD Vad;
+ BOOLEAN PteIsZero = FALSE;
+ PVOID Va;
+ BOOLEAN Found = FALSE;
+ ULONG TheRegionSize;
+ ULONG NewProtect;
+ ULONG NewState;
+ PVOID FilePointer;
+
+ MEMORY_BASIC_INFORMATION Info;
+
+ //
+ // The only supported option is MEMORY_BASIC_INFORMATION, make
+ // sure the user's buffer is large enough for this.
+ //
+
+ //
+ // Check argument validity.
+ //
+ switch (MemoryInformationClass) {
+ case MemoryBasicInformation:
+ if (MemoryInformationLength < sizeof(MEMORY_BASIC_INFORMATION)) {
+ return STATUS_INFO_LENGTH_MISMATCH;
+ }
+ break;
+
+ case MemoryWorkingSetInformation:
+ break;
+
+ case MemoryMappedFilenameInformation:
+ FilePointer = NULL;
+ break;
+ default:
+ return STATUS_INVALID_INFO_CLASS;
+ }
+
+ PreviousMode = KeGetPreviousMode();
+
+ if (PreviousMode != KernelMode) {
+
+ //
+ // Check arguments.
+ //
+
+ try {
+
+ ProbeForWrite(MemoryInformation,
+ MemoryInformationLength,
+ sizeof(ULONG));
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ ProbeForWriteUlong(ReturnLength);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+ }
+ if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ if (BaseAddress >= MM_HIGHEST_VAD_ADDRESS) {
+
+ //
+ // Indicate a reserved area from this point on.
+ //
+
+ if ( MemoryInformationClass == MemoryBasicInformation ) {
+
+ try {
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->AllocationBase =
+ (PVOID)((ULONG)MM_HIGHEST_VAD_ADDRESS + 1);
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->AllocationProtect =
+ PAGE_READONLY;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->BaseAddress =
+ PAGE_ALIGN(BaseAddress);
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->RegionSize =
+ ((ULONG)MM_HIGHEST_USER_ADDRESS + 1) -
+ (ULONG)PAGE_ALIGN(BaseAddress);
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->State = MEM_RESERVE;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->Protect = PAGE_NOACCESS;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->Type = MEM_PRIVATE;
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+#if defined(MM_SHARED_USER_DATA_VA)
+ if (PAGE_ALIGN(BaseAddress) == (PVOID)MM_SHARED_USER_DATA_VA) {
+
+ //
+ // This is the page that is double mapped between
+ // user mode and kernel mode.
+ //
+
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->Protect =
+ PAGE_READONLY;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->RegionSize =
+ PAGE_SIZE;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->State =
+ MEM_COMMIT;
+ }
+#endif
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Just return success.
+ //
+ }
+
+ return STATUS_SUCCESS;
+ } else {
+ return STATUS_INVALID_ADDRESS;
+ }
+ }
+
+ if ( ProcessHandle == NtCurrentProcess() ) {
+ TargetProcess = PsGetCurrentProcess();
+ } else {
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_QUERY_INFORMATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&TargetProcess,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+ }
+
+ if (MemoryInformationClass == MemoryWorkingSetInformation) {
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ Status = MiGetWorkingSetInfo (MemoryInformation,
+ MemoryInformationLength,
+ TargetProcess);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (TargetProcess);
+ }
+ try {
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = ((((PMEMORY_WORKING_SET_INFORMATION)
+ MemoryInformation)->NumberOfEntries - 1) *
+ sizeof(ULONG)) +
+ sizeof(MEMORY_WORKING_SET_INFORMATION);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ }
+
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+ KeAttachProcess (&TargetProcess->Pcb);
+
+ //
+ // Get working set mutex and block APCs.
+ //
+
+ LOCK_WS_AND_ADDRESS_SPACE (TargetProcess);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (TargetProcess->AddressSpaceDeleted != 0) {
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (TargetProcess);
+ }
+ return STATUS_PROCESS_IS_TERMINATING;
+ }
+
+ //
+ // Locate the VAD that contiains the base address or the VAD
+ // which follows the base address.
+ //
+
+ Vad = TargetProcess->VadRoot;
+
+ for (;;) {
+
+ if (Vad == (PMMVAD)NULL) {
+ break;
+ }
+
+ if ((BaseAddress >= Vad->StartingVa) &&
+ (BaseAddress <= Vad->EndingVa)) {
+ Found = TRUE;
+ break;
+ }
+
+ if (BaseAddress < Vad->StartingVa) {
+ if (Vad->LeftChild == (PMMVAD)NULL) {
+ break;
+ }
+ Vad = Vad->LeftChild;
+
+ } else {
+ if (BaseAddress < Vad->EndingVa) {
+ break;
+ }
+ if (Vad->RightChild == (PMMVAD)NULL) {
+ break;
+ }
+ Vad = Vad->RightChild;
+ }
+ }
+
+ if (!Found) {
+
+ //
+ // There is no virtual address allocated at the base
+ // address. Return the size of the hole starting at
+ // the base address.
+ //
+
+ if (Vad == NULL) {
+ TheRegionSize = ((ULONG)MM_HIGHEST_VAD_ADDRESS + 1) -
+ (ULONG)PAGE_ALIGN(BaseAddress);
+ } else {
+ if (Vad->StartingVa < BaseAddress) {
+
+ //
+ // We are looking at the Vad which occupies the range
+ // just before the desired range. Get the next Vad.
+ //
+
+ Vad = MiGetNextVad (Vad);
+ if (Vad == NULL) {
+ TheRegionSize = ((ULONG)MM_HIGHEST_VAD_ADDRESS + 1) -
+ (ULONG)PAGE_ALIGN(BaseAddress);
+ } else {
+ TheRegionSize = (ULONG)Vad->StartingVa -
+ (ULONG)PAGE_ALIGN(BaseAddress);
+ }
+ } else {
+ TheRegionSize = (ULONG)Vad->StartingVa -
+ (ULONG)PAGE_ALIGN(BaseAddress);
+ }
+ }
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (TargetProcess);
+ }
+
+ //
+ // Establish an exception handler and write the information and
+ // returned length.
+ //
+
+ if ( MemoryInformationClass == MemoryBasicInformation ) {
+ try {
+
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->AllocationBase =
+ NULL;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->AllocationProtect =
+ 0;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->BaseAddress =
+ PAGE_ALIGN(BaseAddress);
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->RegionSize =
+ TheRegionSize;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->State = MEM_FREE;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->Protect = PAGE_NOACCESS;
+ ((PMEMORY_BASIC_INFORMATION)MemoryInformation)->Type = 0;
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Just return success.
+ //
+ }
+
+ return STATUS_SUCCESS;
+ }
+ return STATUS_INVALID_ADDRESS;
+ }
+
+ //
+ // Found a vad.
+ //
+
+ Va = PAGE_ALIGN(BaseAddress);
+ Info.BaseAddress = Va;
+
+ //
+ // There is a page mapped at the base address.
+ //
+
+ if (Vad->u.VadFlags.PrivateMemory) {
+ Info.Type = MEM_PRIVATE;
+ } else if (Vad->u.VadFlags.ImageMap == 0) {
+ Info.Type = MEM_MAPPED;
+
+ if ( MemoryInformationClass == MemoryMappedFilenameInformation ) {
+ if (Vad->ControlArea) {
+ FilePointer = Vad->ControlArea->FilePointer;
+ }
+ if ( !FilePointer ) {
+ FilePointer = (PVOID)1;
+ } else {
+ ObReferenceObject(FilePointer);
+ }
+ }
+
+ } else {
+ Info.Type = MEM_IMAGE;
+ }
+
+ Info.State = MiQueryAddressState (Va, Vad, TargetProcess, &Info.Protect);
+
+ Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
+
+ while (Va <= Vad->EndingVa) {
+
+ NewState = MiQueryAddressState (Va,
+ Vad,
+ TargetProcess,
+ &NewProtect);
+
+ if ((NewState != Info.State) || (NewProtect != Info.Protect)) {
+
+ //
+ // The state for this address does not match, calculate
+ // size and return.
+ //
+
+ break;
+ }
+ Va = (PVOID)((ULONG)Va + PAGE_SIZE);
+ } // end while
+
+ Info.RegionSize = ((ULONG)Va - (ULONG)Info.BaseAddress);
+ Info.AllocationBase = Vad->StartingVa;
+ Info.AllocationProtect = MI_CONVERT_FROM_PTE_PROTECTION (
+ Vad->u.VadFlags.Protection);
+
+ //
+ // A range has been found, release the mutexes, deattach from the
+ // target process and return the information.
+ //
+
+ UNLOCK_WS (TargetProcess);
+ UNLOCK_ADDRESS_SPACE (TargetProcess);
+ KeDetachProcess();
+
+ if ( ProcessHandle != NtCurrentProcess() ) {
+ ObDereferenceObject (TargetProcess);
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_SHOW_NT_CALLS) {
+ if ( !MmWatchProcess ) {
+ DbgPrint("queryvm base %lx allocbase %lx protect %lx size %lx\n",
+ Info.BaseAddress, Info.AllocationBase, Info.AllocationProtect,
+ Info.RegionSize);
+ DbgPrint(" state %lx protect %lx type %lx\n",
+ Info.State, Info.Protect, Info.Type);
+ }
+ }
+#endif //DBG
+
+ if ( MemoryInformationClass == MemoryBasicInformation ) {
+ try {
+
+ *(PMEMORY_BASIC_INFORMATION)MemoryInformation = Info;
+
+ if (ARGUMENT_PRESENT(ReturnLength)) {
+ *ReturnLength = sizeof(MEMORY_BASIC_INFORMATION);
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ }
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // Try to return the name of the file that is mapped.
+ //
+
+ if ( !FilePointer ) {
+ return STATUS_INVALID_ADDRESS;
+ } else if ( FilePointer == (PVOID)1 ) {
+ return STATUS_FILE_INVALID;
+ }
+
+ //
+ // We have a referenced pointer to the file. Call ObQueryNameString
+ // and get the file name
+ //
+
+ Status = ObQueryNameString(
+ FilePointer,
+ MemoryInformation,
+ MemoryInformationLength,
+ ReturnLength
+ );
+ ObDereferenceObject(FilePointer);
+ return Status;
+}
+
+
+ULONG
+MiQueryAddressState (
+ IN PVOID Va,
+ IN PMMVAD Vad,
+ IN PEPROCESS TargetProcess,
+ OUT PULONG ReturnedProtect
+ )
+
+/*++
+
+Routine Description:
+
+
+Arguments:
+
+Return Value:
+
+ Returns the state (MEM_COMMIT, MEM_RESERVE, MEM_PRIVATE).
+
+Environment:
+
+ Kernel mode. Working set lock and address creation lock held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ MMPTE CapturedProtoPte;
+ PMMPTE ProtoPte;
+ ULONG PteIsZero;
+ ULONG State;
+ ULONG Protect;
+
+#ifdef LARGE_PAGES
+ if (Vad->u.VadFlags.LargePages) {
+ *ReturnedProtect = MI_CONVERT_FROM_PTE_PROTECTION (
+ Vad->u.VadFlags.Protection);
+ return MEM_COMMIT;
+ }
+#endif //LARGE_PAGES
+
+ PointerPde = MiGetPdeAddress (Va);
+ PointerPte = MiGetPteAddress (Va);
+
+ ASSERT ((Vad->StartingVa <= Va) && (Vad->EndingVa >= Va));
+
+ PteIsZero = TRUE;
+
+ if (MiDoesPdeExistAndMakeValid(PointerPde, TargetProcess, FALSE)) {
+
+ //
+ // A PTE exists at this address, see if it is zero.
+ //
+
+ if (PointerPte->u.Long != 0) {
+
+ PteIsZero = FALSE;
+
+ //
+ // There is a non-zero PTE at this address, use
+ // it to build the information block.
+ //
+
+ if (MiIsPteDecommittedPage (PointerPte)) {
+ Protect = 0;
+ State = MEM_RESERVE;
+ } else {
+
+ State = MEM_COMMIT;
+ if (Vad->u.VadFlags.PhysicalMapping == 1) {
+
+ //
+ // Physical mapping, there is no corresponding
+ // PFN element to get the page protection from.
+ //
+
+ Protect = MI_CONVERT_FROM_PTE_PROTECTION (
+ Vad->u.VadFlags.Protection);
+ } else {
+ Protect = MiGetPageProtection (PointerPte,
+ TargetProcess);
+
+ if ((PointerPte->u.Soft.Valid == 0) &&
+ (PointerPte->u.Soft.Prototype == 1) &&
+ (Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != (PCONTROL_AREA)NULL)) {
+
+ //
+ // Make sure protoPTE is committed.
+ //
+
+ ProtoPte = MiGetProtoPteAddress(Vad,Va);
+
+ CapturedProtoPte = MiCaptureSystemPte (ProtoPte,
+ TargetProcess);
+ if (CapturedProtoPte.u.Long == 0) {
+ State = MEM_RESERVE;
+ Protect = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (PteIsZero) {
+
+ //
+ // There is no PDE at this address, the template from
+ // the VAD supplies the information unless the VAD is
+ // for an image file. For image files the individual
+ // protection is on the prototype PTE.
+ //
+
+ //
+ // Get the default protection information.
+ //
+
+ State = MEM_RESERVE;
+ Protect = 0;
+
+ if (Vad->u.VadFlags.PhysicalMapping == 1) {
+
+ //
+ // Must be banked memory, just return reserved.
+ //
+
+ NOTHING;
+
+ } else if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != (PCONTROL_AREA)NULL)) {
+
+ //
+ // This VAD refers to a section. Even though the PTE is
+ // zero, the actual page may be committed in the section.
+ //
+
+ ProtoPte = MiGetProtoPteAddress(Vad,Va);
+
+ CapturedProtoPte = MiCaptureSystemPte (ProtoPte,
+ TargetProcess);
+
+ if (CapturedProtoPte.u.Long != 0) {
+ State = MEM_COMMIT;
+
+ if (Vad->u.VadFlags.ImageMap == 0) {
+ Protect = MI_CONVERT_FROM_PTE_PROTECTION (
+ Vad->u.VadFlags.Protection);
+ } else {
+
+ //
+ // This is an image file, the protection is in the
+ // prototype PTE.
+ //
+
+ Protect = MiGetPageProtection (&CapturedProtoPte,
+ TargetProcess);
+ }
+ }
+
+ } else {
+
+ //
+ // Get the protection from the corresponding VAD.
+ //
+
+ if (Vad->u.VadFlags.MemCommit) {
+ State = MEM_COMMIT;
+ Protect = MI_CONVERT_FROM_PTE_PROTECTION (
+ Vad->u.VadFlags.Protection);
+ }
+ }
+ }
+
+ *ReturnedProtect = Protect;
+ return State;
+}
+
+
+
+NTSTATUS
+MiGetWorkingSetInfo (
+ IN PMEMORY_WORKING_SET_INFORMATION WorkingSetInfo,
+ IN ULONG Length,
+ IN PEPROCESS Process
+ )
+
+{
+ PMDL Mdl;
+ PMEMORY_WORKING_SET_INFORMATION Info;
+ PMEMORY_WORKING_SET_BLOCK Entry;
+ PMEMORY_WORKING_SET_BLOCK LastEntry;
+ PMMWSLE Wsle;
+ PMMWSLE LastWsle;
+ ULONG WsSize;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ NTSTATUS status;
+
+ //
+ // Allocate an MDL to map the request.
+ //
+
+ Mdl = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(MDL) + sizeof(ULONG) +
+ BYTES_TO_PAGES (Length) * sizeof(ULONG),
+ ' mM');
+
+ if (Mdl == NULL) {
+ return(STATUS_INSUFFICIENT_RESOURCES);
+ }
+
+ //
+ // Initialize MDL for request.
+ //
+
+ MmInitializeMdl(Mdl, WorkingSetInfo, Length);
+
+ try {
+ MmProbeAndLockPages (Mdl, KeGetPreviousMode(), IoWriteAccess);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ ExFreePool (Mdl);
+ return GetExceptionCode();
+ }
+
+ Info = MmGetSystemAddressForMdl (Mdl);
+
+ if (PsGetCurrentProcess() != Process) {
+ KeAttachProcess (&Process->Pcb);
+ }
+
+ LOCK_WS (Process);
+
+ status = STATUS_SUCCESS;
+
+ if (Process->AddressSpaceDeleted != 0) {
+ status = STATUS_PROCESS_IS_TERMINATING;
+ }
+
+ WsSize = Process->Vm.WorkingSetSize;
+ Info->NumberOfEntries = WsSize;
+
+ if ((WsSize * sizeof(ULONG)) >= Length) {
+ status = STATUS_INFO_LENGTH_MISMATCH;
+ }
+
+ if (status != STATUS_SUCCESS) {
+ UNLOCK_WS (Process);
+ KeDetachProcess ();
+ MmUnlockPages (Mdl);
+ ExFreePool (Mdl);
+ return status;
+ }
+
+ Wsle = MmWsle;
+ LastWsle = &MmWsle[MmWorkingSetList->LastEntry];
+ Entry = &Info->WorkingSetInfo[0];
+ LastEntry = (PMEMORY_WORKING_SET_BLOCK)(
+ (PCHAR)Info + (Length & (~(sizeof(ULONG) - 1))));
+
+ do {
+ if (Wsle->u1.e1.Valid == 1) {
+ Entry->VirtualPage = Wsle->u1.e1.VirtualPageNumber;
+ PointerPte = MiGetPteAddress (Wsle->u1.VirtualAddress);
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ Entry->Shared = Pfn1->u3.e1.PrototypePte;
+ if (Pfn1->u3.e1.PrototypePte == 0) {
+ Entry->Protection = Pfn1->OriginalPte.u.Soft.Protection;
+ } else {
+ if (Wsle->u1.e1.SameProtectAsProto == 1) {
+ Entry->Protection = Pfn1->OriginalPte.u.Soft.Protection;
+ } else {
+ Entry->Protection = Wsle->u1.e1.Protection;
+ }
+ }
+ Entry += 1;
+ }
+ Wsle += 1;
+ }while ((Entry < LastEntry) && (Wsle <= LastWsle));
+
+ UNLOCK_WS (Process);
+ KeDetachProcess ();
+ MmUnlockPages (Mdl);
+ ExFreePool (Mdl);
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/mm/readwrt.c b/private/ntos/mm/readwrt.c
new file mode 100644
index 000000000..31b02996a
--- /dev/null
+++ b/private/ntos/mm/readwrt.c
@@ -0,0 +1,971 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ readwrt.c
+
+Abstract:
+
+ This module contains the routines which implement the capability
+ to read and write the virtual memory of a target process.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// The maximum amount to try to Probe and Lock is 14 pages, this
+// way it always fits in a 16 page allocation.
+//
+
+#define MAX_LOCK_SIZE ((ULONG)(14 * PAGE_SIZE))
+
+//
+// The maximum to move in a single block is 64k bytes.
+//
+
+#define MAX_MOVE_SIZE (LONG)0x10000
+
+//
+// The minimum to move is a single block is 128 bytes.
+//
+
+#define MINIMUM_ALLOCATION (LONG)128
+
+//
+// Define the pool move threshold value.
+//
+
+#define POOL_MOVE_THRESHOLD 511
+
+//
+// Define foreward referenced procedure prototypes.
+//
+
+ULONG
+MiGetExceptionInfo (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PULONG BadVa
+ );
+
+NTSTATUS
+MiDoMappedCopy (
+ IN PEPROCESS FromProcess,
+ IN PVOID FromAddress,
+ IN PEPROCESS ToProcess,
+ OUT PVOID ToAddress,
+ IN ULONG BufferSize,
+ IN KPROCESSOR_MODE PreviousMode,
+ OUT PULONG NumberOfBytesRead
+ );
+
+NTSTATUS
+MiDoPoolCopy (
+ IN PEPROCESS FromProcess,
+ IN PVOID FromAddress,
+ IN PEPROCESS ToProcess,
+ OUT PVOID ToAddress,
+ IN ULONG BufferSize,
+ OUT PULONG NumberOfBytesRead
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,MiGetExceptionInfo)
+#pragma alloc_text(PAGE,NtReadVirtualMemory)
+#pragma alloc_text(PAGE,NtWriteVirtualMemory)
+#pragma alloc_text(PAGE,MiDoMappedCopy)
+#pragma alloc_text(PAGE,MiDoPoolCopy)
+#endif
+
+#define COPY_STACK_SIZE 64
+
+
+NTSTATUS
+NtReadVirtualMemory(
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress,
+ OUT PVOID Buffer,
+ IN ULONG BufferSize,
+ OUT PULONG NumberOfBytesRead OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function copies the specified address range from the specified
+ process into the specified address range of the current process.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies the base address in the specified process
+ to be read.
+
+ Buffer - Supplies the address of a buffer which receives the
+ contents from the specified process address space.
+
+ BufferSize - Supplies the requested number of bytes to read from
+ the specified process.
+
+ NumberOfBytesRead - Receives the actual number of bytes
+ transferred into the specified buffer.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+
+ ULONG BytesCopied;
+ KPROCESSOR_MODE PreviousMode;
+ PEPROCESS Process;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ //
+ // Get the previous mode and probe output argument if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+
+#ifdef MIPS
+
+ //
+ // Handle the PCR case for mips.
+ //
+
+ if (((ULONG)BaseAddress >= KSEG0_BASE) ||
+ (((ULONG)BaseAddress + BufferSize) > (ULONG)KSEG0_BASE) ||
+ (((ULONG)BaseAddress + BufferSize) < (ULONG)BaseAddress)) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+ if (((ULONG)Buffer >= KSEG0_BASE) ||
+ (((ULONG)Buffer + BufferSize) > (ULONG)KSEG0_BASE) ||
+ (((ULONG)Buffer + BufferSize) < (ULONG)Buffer)) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+#elif defined(_PPC_)
+
+ //
+ // Handle the PCR case for PPC.
+ //
+
+ if (((ULONG)BaseAddress >= KIPCR) &&
+ ((ULONG)BaseAddress < (KIPCR2 + PAGE_SIZE)) &&
+ (((ULONG)BaseAddress + BufferSize) < (KIPCR2 + PAGE_SIZE)) &&
+ (((ULONG)BaseAddress + BufferSize) >= (ULONG)BaseAddress)) {
+ ;
+ } else if (BaseAddress > MM_HIGHEST_USER_ADDRESS) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+ if (Buffer > MM_HIGHEST_USER_ADDRESS) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+#else
+
+ if ((BaseAddress > MM_HIGHEST_USER_ADDRESS) ||
+ (Buffer > MM_HIGHEST_USER_ADDRESS)) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+#endif
+
+ if (ARGUMENT_PRESENT(NumberOfBytesRead)) {
+ try {
+ ProbeForWriteUlong(NumberOfBytesRead);
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+ }
+
+ //
+ // If the buffer size is not zero, then attempt to read data from the
+ // specified process address space into the current process address
+ // space.
+ //
+
+ BytesCopied = 0;
+ Status = STATUS_SUCCESS;
+ if (BufferSize != 0) {
+
+ //
+ // Reference the target process.
+ //
+
+ Status = ObReferenceObjectByHandle(ProcessHandle,
+ PROCESS_VM_READ,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL);
+
+ //
+ // If the process was successfully referenced, then attempt to
+ // read the specified memory either by direct mapping or copying
+ // through nonpaged pool.
+ //
+
+ if (Status == STATUS_SUCCESS) {
+
+ Status = MmCopyVirtualMemory(Process,
+ BaseAddress,
+ PsGetCurrentProcess(),
+ Buffer,
+ BufferSize,
+ PreviousMode,
+ &BytesCopied);
+
+ //
+ // Dereference the target process.
+ //
+
+ ObDereferenceObject(Process);
+ }
+ }
+
+ //
+ // If requested, return the number of bytes read.
+ //
+
+ if (ARGUMENT_PRESENT(NumberOfBytesRead)) {
+ try {
+ *NumberOfBytesRead = BytesCopied;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+ }
+
+ return Status;
+}
+
+NTSTATUS
+NtWriteVirtualMemory(
+ IN HANDLE ProcessHandle,
+ OUT PVOID BaseAddress,
+ IN PVOID Buffer,
+ IN ULONG BufferSize,
+ OUT PULONG NumberOfBytesWritten OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function copies the specified address range from the current
+ process into the specified address range of the specified process.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies the base address to be written to in the
+ specified process.
+
+ Buffer - Supplies the address of a buffer which contains the
+ contents to be written into the specified process
+ address space.
+
+ BufferSize - Supplies the requested number of bytes to write
+ into the specified process.
+
+ NumberOfBytesWritten - Receives the actual number of
+ bytes transferred into the specified address
+ space.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+ ULONG BytesCopied;
+ KPROCESSOR_MODE PreviousMode;
+ PEPROCESS Process;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ //
+ // Get the previous mode and probe output argument if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+
+ if ((BaseAddress > MM_HIGHEST_USER_ADDRESS) ||
+ (Buffer > MM_HIGHEST_USER_ADDRESS)) {
+ return STATUS_ACCESS_VIOLATION;
+ }
+
+ if (ARGUMENT_PRESENT(NumberOfBytesWritten)) {
+ try {
+ ProbeForWriteUlong(NumberOfBytesWritten);
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+ }
+
+ //
+ // If the buffer size is not zero, then attempt to write data from the
+ // current process address space into the target process address space.
+ //
+
+ BytesCopied = 0;
+ Status = STATUS_SUCCESS;
+ if (BufferSize != 0) {
+
+ //
+ // Reference the target process.
+ //
+
+ Status = ObReferenceObjectByHandle(ProcessHandle,
+ PROCESS_VM_WRITE,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL);
+
+ //
+ // If the process was successfully referenced, then attempt to
+ // write the specified memory either by direct mapping or copying
+ // through nonpaged pool.
+ //
+
+ if (Status == STATUS_SUCCESS) {
+
+ Status = MmCopyVirtualMemory(PsGetCurrentProcess(),
+ Buffer,
+ Process,
+ BaseAddress,
+ BufferSize,
+ PreviousMode,
+ &BytesCopied);
+
+ //
+ // Dereference the target process.
+ //
+
+ ObDereferenceObject(Process);
+ }
+ }
+
+ //
+ // If requested, return the number of bytes read.
+ //
+
+ if (ARGUMENT_PRESENT(NumberOfBytesWritten)) {
+ try {
+ *NumberOfBytesWritten = BytesCopied;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ NOTHING;
+ }
+ }
+
+ return Status;
+}
+
+
+
+NTSTATUS
+MmCopyVirtualMemory(
+ IN PEPROCESS FromProcess,
+ IN PVOID FromAddress,
+ IN PEPROCESS ToProcess,
+ OUT PVOID ToAddress,
+ IN ULONG BufferSize,
+ IN KPROCESSOR_MODE PreviousMode,
+ OUT PULONG NumberOfBytesCopied
+ )
+{
+ NTSTATUS Status;
+ KIRQL OldIrql;
+ PEPROCESS ProcessToLock;
+
+
+ ProcessToLock = FromProcess;
+ if (FromProcess == PsGetCurrentProcess()) {
+ ProcessToLock = ToProcess;
+ }
+
+ //
+ // Make sure the process still has an address space.
+ //
+
+ ExAcquireSpinLock (&MmSystemSpaceLock, &OldIrql);
+ if (ProcessToLock->AddressSpaceDeleted != 0) {
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ return STATUS_PROCESS_IS_TERMINATING;
+ }
+ ProcessToLock->VmOperation += 1;
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+
+ //
+ // If the buffer size is greater than the pool move threshold,
+ // then attempt to write the memory via direct mapping.
+ //
+
+ if (BufferSize > POOL_MOVE_THRESHOLD) {
+ Status = MiDoMappedCopy(FromProcess,
+ FromAddress,
+ ToProcess,
+ ToAddress,
+ BufferSize,
+ PreviousMode,
+ NumberOfBytesCopied);
+
+ //
+ // If the completion status is not a working quota problem,
+ // then finish the service. Otherwise, attempt to write the
+ // memory through nonpaged pool.
+ //
+
+ if (Status != STATUS_WORKING_SET_QUOTA) {
+ goto CompleteService;
+ }
+
+ *NumberOfBytesCopied = 0;
+ }
+
+ //
+ // There was not enough working set quota to write the memory via
+ // direct mapping or the size of the write was below the pool move
+ // threshold. Attempt to write the specified memory through nonpaged
+ // pool.
+ //
+
+ Status = MiDoPoolCopy(FromProcess,
+ FromAddress,
+ ToProcess,
+ ToAddress,
+ BufferSize,
+ NumberOfBytesCopied);
+
+ //
+ // Dereference the target process.
+ //
+
+CompleteService:
+
+ //
+ // Indicate that the vm operation is complete.
+ //
+
+ ExAcquireSpinLock (&MmSystemSpaceLock, &OldIrql);
+ ProcessToLock->VmOperation -= 1;
+ if ((ProcessToLock->VmOperation == 0) &&
+ (ProcessToLock->VmOperationEvent != NULL)) {
+ KeSetEvent (ProcessToLock->VmOperationEvent, 0, FALSE);
+ }
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ return Status;
+}
+
+
+ULONG
+MiGetExceptionInfo (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PULONG BadVa
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines a exception record and extracts the virtual
+ address of an access violation, guard page violation, or in-page error.
+
+Arguments:
+
+ ExceptionPointers - Supplies a pointer to the exception record.
+
+ BadVa - Receives the virtual address which caused the access violation.
+
+Return Value:
+
+ EXECUTE_EXCEPTION_HANDLER
+
+--*/
+
+{
+ PEXCEPTION_RECORD ExceptionRecord;
+
+ PAGED_CODE();
+
+ //
+ // If the exception code is an access violation, guard page violation,
+ // or an in-page read error, then return the faulting address. Otherwise.
+ // return a special address value.
+ //
+
+ ExceptionRecord = ExceptionPointers->ExceptionRecord;
+ if ((ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) ||
+ (ExceptionRecord->ExceptionCode == STATUS_GUARD_PAGE_VIOLATION) ||
+ (ExceptionRecord->ExceptionCode == STATUS_IN_PAGE_ERROR)) {
+
+ //
+ // The virtual address which caused the exception is the 2nd
+ // parameter in the exception information array.
+ //
+
+ *BadVa = ExceptionRecord->ExceptionInformation[1];
+
+ } else {
+
+ //
+ // Unexpected exception - set the number of bytes copied to zero.
+ //
+
+ *BadVa = 0xFFFFFFFF;
+ }
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+NTSTATUS
+MiDoMappedCopy (
+ IN PEPROCESS FromProcess,
+ IN PVOID FromAddress,
+ IN PEPROCESS ToProcess,
+ OUT PVOID ToAddress,
+ IN ULONG BufferSize,
+ IN KPROCESSOR_MODE PreviousMode,
+ OUT PULONG NumberOfBytesRead
+ )
+
+/*++
+
+Routine Description:
+
+ This function copies the specified address range from the specified
+ process into the specified address range of the current process.
+
+Arguments:
+
+ FromProcess - Supplies an open handle to a process object.
+
+ FromAddress - Supplies the base address in the specified process
+ to be read.
+
+ ToProcess - Supplies an open handle to a process object.
+
+ ToAddress - Supplies the address of a buffer which receives the
+ contents from the specified process address space.
+
+ BufferSize - Supplies the requested number of bytes to read from
+ the specified process.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ NumberOfBytesRead - Receives the actual number of bytes
+ transferred into the specified buffer.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+
+ ULONG AmountToMove;
+ ULONG BadVa;
+ PEPROCESS CurrentProcess;
+ BOOLEAN FailedMove;
+ BOOLEAN FailedProbe;
+ PULONG InVa;
+ ULONG LeftToMove;
+ PULONG MappedAddress;
+ ULONG MaximumMoved;
+ PMDL Mdl;
+ ULONG MdlHack[(sizeof(MDL)/4) + (MAX_LOCK_SIZE >> PAGE_SHIFT) + 1];
+ PULONG OutVa;
+
+ PAGED_CODE();
+
+ //
+ // Get the address of the current process object and initialize copy
+ // parameters.
+ //
+
+ CurrentProcess = PsGetCurrentProcess();
+
+ InVa = FromAddress;
+ OutVa = ToAddress;
+
+ MaximumMoved = MAX_LOCK_SIZE;
+ if (BufferSize <= MAX_LOCK_SIZE) {
+ MaximumMoved = BufferSize;
+ }
+
+ Mdl = (PMDL)&MdlHack[0];
+
+ //
+ // Map the data into the system part of the address space, then copy it.
+ //
+
+ LeftToMove = BufferSize;
+ AmountToMove = MaximumMoved;
+ while (LeftToMove > 0) {
+
+ if (LeftToMove < AmountToMove) {
+
+ //
+ // Set to move the remaining bytes.
+ //
+
+ AmountToMove = LeftToMove;
+ }
+
+ KeDetachProcess();
+ KeAttachProcess (&FromProcess->Pcb);
+
+ //
+ // We may be touching a user's memory which could be invalid,
+ // declare an exception handler.
+ //
+
+ try {
+
+ //
+ // Probe to make sure that the specified buffer is accessable in
+ // the target process.
+ //
+
+ MappedAddress = NULL;
+
+ if (((PVOID)InVa == FromAddress) &&
+ ((PVOID)InVa <= MM_HIGHEST_USER_ADDRESS)) {
+ FailedProbe = TRUE;
+ ProbeForRead (FromAddress, BufferSize, sizeof(CHAR));
+ }
+
+ //
+ // Initialize MDL for request.
+ //
+
+ MmInitializeMdl(Mdl,
+ InVa,
+ AmountToMove);
+
+ FailedMove = TRUE;
+ MmProbeAndLockPages (Mdl, PreviousMode, IoReadAccess);
+ FailedMove = FALSE;
+
+ MappedAddress = MmMapLockedPages (Mdl, KernelMode);
+
+ //
+ // Deattach from the FromProcess and attach to the ToProcess.
+ //
+
+ KeDetachProcess();
+ KeAttachProcess (&ToProcess->Pcb);
+
+ //
+ // Now operating in the context of the ToProcess.
+ //
+
+ if (((PVOID)InVa == FromAddress)
+ && (ToAddress <= MM_HIGHEST_USER_ADDRESS)) {
+ ProbeForWrite (ToAddress, BufferSize, sizeof(CHAR));
+ FailedProbe = FALSE;
+ }
+
+ RtlCopyMemory (OutVa, MappedAddress, AmountToMove);
+ } except (MiGetExceptionInfo (GetExceptionInformation(), &BadVa)) {
+
+
+ //
+ // If an exception occurs during the move operation or probe,
+ // return the exception code as the status value.
+ //
+
+ KeDetachProcess();
+ if (MappedAddress != NULL) {
+ MmUnmapLockedPages (MappedAddress, Mdl);
+ MmUnlockPages (Mdl);
+ }
+
+ if (GetExceptionCode() == STATUS_WORKING_SET_QUOTA) {
+ return STATUS_WORKING_SET_QUOTA;
+ }
+
+ if (FailedProbe) {
+ return GetExceptionCode();
+
+ } else {
+
+ //
+ // The failure occurred during the move operation, determine
+ // which move failed, and calculate the number of bytes
+ // actually moved.
+ //
+
+ if (FailedMove) {
+ if (BadVa != 0xFFFFFFFF) {
+ *NumberOfBytesRead = BadVa - (ULONG)FromAddress;
+ }
+
+ } else {
+ *NumberOfBytesRead = BadVa - (ULONG)ToAddress;
+ }
+ }
+
+ return STATUS_PARTIAL_COPY;
+ }
+ MmUnmapLockedPages (MappedAddress, Mdl);
+ MmUnlockPages (Mdl);
+
+ LeftToMove -= AmountToMove;
+ InVa = (PVOID)((ULONG)InVa + AmountToMove);
+ OutVa = (PVOID)((ULONG)OutVa + AmountToMove);
+ }
+
+ KeDetachProcess();
+
+ //
+ // Set number of bytes moved.
+ //
+
+ *NumberOfBytesRead = BufferSize;
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+MiDoPoolCopy (
+ IN PEPROCESS FromProcess,
+ IN PVOID FromAddress,
+ IN PEPROCESS ToProcess,
+ OUT PVOID ToAddress,
+ IN ULONG BufferSize,
+ OUT PULONG NumberOfBytesRead
+ )
+
+/*++
+
+Routine Description:
+
+ This function copies the specified address range from the specified
+ process into the specified address range of the current process.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies the base address in the specified process
+ to be read.
+
+ Buffer - Supplies the address of a buffer which receives the
+ contents from the specified process address space.
+
+ BufferSize - Supplies the requested number of bytes to read from
+ the specified process.
+
+ NumberOfBytesRead - Receives the actual number of bytes
+ transferred into the specified buffer.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+
+ ULONG AmountToMove;
+ ULONG BadVa;
+ PEPROCESS CurrentProcess;
+ BOOLEAN FailedMove;
+ BOOLEAN FailedProbe;
+ PULONG InVa;
+ ULONG LeftToMove;
+ ULONG MaximumMoved;
+ PULONG OutVa;
+ PULONG PoolArea;
+ LONGLONG StackArray[COPY_STACK_SIZE];
+ ULONG FreePool;
+
+ PAGED_CODE();
+
+ //
+ // Get the address of the current process object and initialize copy
+ // parameters.
+ //
+
+ CurrentProcess = PsGetCurrentProcess();
+
+ InVa = FromAddress;
+ OutVa = ToAddress;
+
+ //
+ // Allocate non-paged memory to copy in and out of.
+ //
+
+ MaximumMoved = MAX_MOVE_SIZE;
+ if (BufferSize <= MAX_MOVE_SIZE) {
+ MaximumMoved = BufferSize;
+ }
+
+ if (BufferSize <= (COPY_STACK_SIZE * sizeof(LONGLONG))) {
+ PoolArea = (PULONG)&StackArray[0];
+ FreePool = FALSE;
+ } else {
+ PoolArea = ExAllocatePoolWithTag (NonPagedPool, MaximumMoved, 'wRmM');
+
+ while (PoolArea == NULL) {
+ if (MaximumMoved <= MINIMUM_ALLOCATION) {
+ PoolArea = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MaximumMoved, 'wRmM');
+
+ } else {
+ MaximumMoved = MaximumMoved >> 1;
+ PoolArea = ExAllocatePoolWithTag (NonPagedPool, MaximumMoved, 'wRmM');
+ }
+ }
+ FreePool = TRUE;
+ }
+
+ //
+ // Copy the data into pool, then copy back into the ToProcess.
+ //
+
+ LeftToMove = BufferSize;
+ AmountToMove = MaximumMoved;
+ while (LeftToMove > 0) {
+
+ if (LeftToMove < AmountToMove) {
+
+ //
+ // Set to move the remaining bytes.
+ //
+
+ AmountToMove = LeftToMove;
+ }
+
+ KeDetachProcess();
+ KeAttachProcess (&FromProcess->Pcb);
+
+ //
+ // We may be touching a user's memory which could be invalid,
+ // declare an exception handler.
+ //
+
+ try {
+
+ //
+ // Probe to make sure that the specified buffer is accessable in
+ // the target process.
+ //
+
+ if (((PVOID)InVa == FromAddress) &&
+ ((PVOID)InVa <= MM_HIGHEST_USER_ADDRESS)) {
+ FailedProbe = TRUE;
+ ProbeForRead (FromAddress, BufferSize, sizeof(CHAR));
+ }
+
+ FailedMove = TRUE;
+ RtlCopyMemory (PoolArea, InVa, AmountToMove);
+ FailedMove = FALSE;
+
+ KeDetachProcess();
+ KeAttachProcess (&ToProcess->Pcb);
+
+ //
+ // Now operating in the context of the ToProcess.
+ //
+
+ if (((PVOID)InVa == FromAddress)
+ && (ToAddress <= MM_HIGHEST_USER_ADDRESS)) {
+ ProbeForWrite (ToAddress, BufferSize, sizeof(CHAR));
+ FailedProbe = FALSE;
+ }
+
+ RtlCopyMemory (OutVa, PoolArea, AmountToMove);
+
+ } except (MiGetExceptionInfo (GetExceptionInformation(), &BadVa)) {
+
+ //
+ // If an exception occurs during the move operation or probe,
+ // return the exception code as the status value.
+ //
+
+ KeDetachProcess();
+
+ if (FreePool) {
+ ExFreePool (PoolArea);
+ }
+ if (FailedProbe) {
+ return GetExceptionCode();
+
+ } else {
+
+ //
+ // The failure occurred during the move operation, determine
+ // which move failed, and calculate the number of bytes
+ // actually moved.
+ //
+
+ if (FailedMove) {
+
+ //
+ // The failure occurred getting the data.
+ //
+
+ if (BadVa != 0xFFFFFFFF) {
+ *NumberOfBytesRead = BadVa - (ULONG)FromAddress;
+ }
+
+ } else {
+
+ //
+ // The failure occurred writing the data.
+ //
+
+ *NumberOfBytesRead = BadVa - (ULONG)ToAddress;
+ }
+ }
+
+ return STATUS_PARTIAL_COPY;
+ }
+
+ LeftToMove -= AmountToMove;
+ InVa = (PVOID)((ULONG)InVa + AmountToMove);
+ OutVa = (PVOID)((ULONG)OutVa + AmountToMove);
+ }
+
+ if (FreePool) {
+ ExFreePool (PoolArea);
+ }
+ KeDetachProcess();
+
+ //
+ // Set number of bytes moved.
+ //
+
+ *NumberOfBytesRead = BufferSize;
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/mm/sectsup.c b/private/ntos/mm/sectsup.c
new file mode 100644
index 000000000..65856beb3
--- /dev/null
+++ b/private/ntos/mm/sectsup.c
@@ -0,0 +1,2796 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sectsup.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ section object.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiSectionInitialization)
+#endif
+
+MMEVENT_COUNT_LIST MmEventCountList;
+
+NTSTATUS
+MiFlushSectionInternal (
+ IN PMMPTE StartingPte,
+ IN PMMPTE FinalPte,
+ IN PSUBSECTION FirstSubsection,
+ IN PSUBSECTION LastSubsection,
+ IN ULONG Synchronize,
+ OUT PIO_STATUS_BLOCK IoStatus
+ );
+
+ULONG
+FASTCALL
+MiCheckProtoPtePageState (
+ IN PMMPTE PrototypePte,
+ IN ULONG PfnLockHeld
+ );
+
+ULONG MmSharedCommit = 0;
+extern ULONG MMCONTROL;
+
+//
+// Define segment dereference thread wait object types.
+//
+
+typedef enum _SEGMENT_DERFERENCE_OBJECT {
+ SegmentDereference,
+ UsedSegmentCleanup,
+ SegMaximumObject
+ } BALANCE_OBJECT;
+
+extern POBJECT_TYPE IoFileObjectType;
+
+GENERIC_MAPPING MiSectionMapping = {
+ STANDARD_RIGHTS_READ |
+ SECTION_QUERY | SECTION_MAP_READ,
+ STANDARD_RIGHTS_WRITE |
+ SECTION_MAP_WRITE,
+ STANDARD_RIGHTS_EXECUTE |
+ SECTION_MAP_EXECUTE,
+ SECTION_ALL_ACCESS
+};
+
+VOID
+VadTreeWalk (
+ PMMVAD Start
+ );
+
+VOID
+MiRemoveUnusedSegments(
+ VOID
+ );
+
+
+VOID
+FASTCALL
+MiInsertBasedSection (
+ IN PSECTION Section
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a virtual address descriptor into the tree and
+ reorders the splay tree as appropriate.
+
+Arguments:
+
+ Section - Supplies a pointer to a based section.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the section based mutex.
+
+--*/
+
+{
+ PMMADDRESS_NODE *Root;
+
+ ASSERT (Section->Address.EndingVa > Section->Address.StartingVa);
+
+ Root = &MmSectionBasedRoot;
+
+ MiInsertNode ( &Section->Address, Root);
+ return;
+}
+
+
+VOID
+FASTCALL
+MiRemoveBasedSection (
+ IN PSECTION Section
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes a based section from the tree.
+
+Arguments:
+
+ Section - pointer to the based section object to remove.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Must be holding the section based mutex.
+
+--*/
+
+{
+ PMMADDRESS_NODE *Root;
+
+ Root = &MmSectionBasedRoot;
+
+ MiRemoveNode ( &Section->Address, Root);
+
+ return;
+}
+
+
+PVOID
+MiFindEmptySectionBaseDown (
+ IN ULONG SizeOfRange,
+ IN PVOID HighestAddressToEndAt
+ )
+
+/*++
+
+Routine Description:
+
+ The function examines the virtual address descriptors to locate
+ an unused range of the specified size and returns the starting
+ address of the range. This routine looks from the top down.
+
+Arguments:
+
+ SizeOfRange - Supplies the size in bytes of the range to locate.
+
+ HighestAddressToEndAt - Supplies the virtual address to begin looking
+ at.
+
+Return Value:
+
+ Returns the starting address of a suitable range.
+
+--*/
+
+{
+ return MiFindEmptyAddressRangeDownTree ( SizeOfRange,
+ HighestAddressToEndAt,
+ X64K,
+ MmSectionBasedRoot);
+}
+
+
+VOID
+MiSegmentDelete (
+ PSEGMENT Segment
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is called by the object management procedures whenever
+ the last reference to a segment object has been removed. This routine
+ releases the pool allocated for the prototype PTEs and performs
+ consistency checks on those PTEs.
+
+ For segments which map files, the file object is dereferenced.
+
+ Note, that for a segment which maps a file, no PTEs may be valid
+ or transition, while a segment which is backed by a paging file
+ may have transition pages, but no valid pages (there can be no
+ PTEs which refer to the segment).
+
+
+Arguments:
+
+ Segment - a pointer to the segment structure.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ volatile PFILE_OBJECT File;
+ volatile PCONTROL_AREA ControlArea;
+ PEVENT_COUNTER Event;
+ MMPTE PteContents;
+ PSUBSECTION Subsection;
+ PSUBSECTION NextSubsection;
+
+ PointerPte = Segment->PrototypePte;
+ LastPte = PointerPte + Segment->NonExtendedPtes;
+
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ DbgPrint("MM:deleting segment %lx control %lx\n",Segment, Segment->ControlArea);
+ }
+#endif
+
+ ControlArea = Segment->ControlArea;
+ LOCK_PFN (OldIrql2);
+ if (ControlArea->DereferenceList.Flink != NULL) {
+
+ //
+ // Remove this from the list of usused segments.
+ //
+
+ ExAcquireSpinLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+ RemoveEntryList (&ControlArea->DereferenceList);
+ MmUnusedSegmentCount -= 1;
+ ExReleaseSpinLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+ }
+ UNLOCK_PFN (OldIrql2);
+
+ if (ControlArea->u.Flags.Image ||
+ ControlArea->u.Flags.File ) {
+
+ //
+ // If there have been committed pages in this segment, adjust
+ // the total commit count.
+ //
+
+
+ //
+ // Unload kernel debugger symbols if any where loaded.
+ //
+
+ if (ControlArea->u.Flags.DebugSymbolsLoaded != 0) {
+
+ //
+ // TEMP TEMP TEMP rip out when debugger converted
+ //
+
+ ANSI_STRING AnsiName;
+ NTSTATUS Status;
+
+ Status = RtlUnicodeStringToAnsiString( &AnsiName,
+ (PUNICODE_STRING)&Segment->ControlArea->FilePointer->FileName,
+ TRUE );
+
+ if (NT_SUCCESS( Status)) {
+ DbgUnLoadImageSymbols( &AnsiName,
+ Segment->BasedAddress,
+ (ULONG)PsGetCurrentProcess());
+ RtlFreeAnsiString( &AnsiName );
+ }
+ LOCK_PFN (OldIrql);
+ ControlArea->u.Flags.DebugSymbolsLoaded = 0;
+ UNLOCK_PFN (OldIrql);
+ }
+
+ //
+ // If the segment was deleted due to a name collision at insertion
+ // we don't want to dereference the file pointer.
+ //
+
+ if (ControlArea->u.Flags.BeingCreated == FALSE) {
+
+ //
+ // Clear the segment context and dereference the file object
+ // for this Segment.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ MiMakeSystemAddressValidPfn (Segment);
+ File = (volatile PFILE_OBJECT)Segment->ControlArea->FilePointer;
+ ControlArea = (volatile PCONTROL_AREA)Segment->ControlArea;
+
+ Event = ControlArea->WaitingForDeletion;
+ ControlArea->WaitingForDeletion = NULL;
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != NULL) {
+ KeSetEvent (&Event->Event, 0, FALSE);
+ }
+
+#if DBG
+ if (ControlArea->u.Flags.Image == 1) {
+ ASSERT (ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject != (PVOID)ControlArea);
+ } else {
+ ASSERT (ControlArea->FilePointer->SectionObjectPointer->DataSectionObject != (PVOID)ControlArea);
+ }
+#endif //DBG
+
+ ObDereferenceObject (ControlArea->FilePointer);
+ }
+
+ if (ControlArea->u.Flags.Image == 0) {
+
+ //
+ // This is a mapped data file. None of the prototype
+ // PTEs may be referencing a physical page (valid or transition).
+ //
+
+#if DBG
+ while (PointerPte < LastPte) {
+
+ //
+ // Prototype PTEs for Segments backed by paging file
+ // are either in demand zero, page file format, or transition.
+ //
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+ ASSERT ((PointerPte->u.Soft.Prototype == 1) ||
+ (PointerPte->u.Long == 0));
+ PointerPte += 1;
+ }
+#endif //DBG
+
+ //
+ // Deallocate the control area and subsections.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ Subsection = Subsection->NextSubsection;
+
+ while (Subsection != NULL) {
+ ExFreePool (Subsection->SubsectionBase);
+ NextSubsection = Subsection->NextSubsection;
+ ExFreePool (Subsection);
+ Subsection = NextSubsection;
+ }
+
+ if (Segment->NumberOfCommittedPages != 0) {
+ MiReturnCommitment (Segment->NumberOfCommittedPages);
+ MmSharedCommit -= Segment->NumberOfCommittedPages;
+ }
+
+ RtlZeroMemory (Segment->ControlArea, sizeof (CONTROL_AREA)); //fixfix remove
+ ExFreePool (Segment->ControlArea);
+ RtlZeroMemory (Segment, sizeof (SEGMENT)); //fixfix remove
+ ExFreePool (Segment);
+
+ //
+ // The file mapped Segment object is now deleted.
+ //
+
+ return;
+ }
+ }
+
+ //
+ // This is a page file backed or image Segment. The Segment is being
+ // deleted, remove all references to the paging file and physical memory.
+ //
+
+ //
+ // The PFN mutex is required for deallocating pages from a paging
+ // file and for deleting transition PTEs.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ MiMakeSystemAddressValidPfn (PointerPte);
+
+ while (PointerPte < LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // We are on a page boundary, make sure this PTE is resident.
+ //
+
+ if (MmIsAddressValid (PointerPte) == FALSE) {
+
+ MiMakeSystemAddressValidPfn (PointerPte);
+ }
+ }
+
+ PteContents = *PointerPte;
+
+ //
+ // Prototype PTEs for Segments backed by paging file
+ // are either in demand zero, page file format, or transition.
+ //
+
+ ASSERT (PteContents.u.Hard.Valid == 0);
+
+ if (PteContents.u.Soft.Prototype == 0) {
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // Prototype PTE in transition, put the page on the free list.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero and the page is not on the freelist,
+ // move the page to the free list, if the reference
+ // count is not zero, ignore this page.
+ // When the refernce count goes to zero, it will be placed on the
+ // free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+
+ } else {
+
+ //
+ // This is not a prototype PTE, if any paging file
+ // space has been allocated, release it.
+ //
+
+ if (IS_PTE_NOT_DEMAND_ZERO (PteContents)) {
+ MiReleasePageFileSpace (PteContents);
+ }
+ }
+ }
+#if DBG
+ *PointerPte = ZeroPte;
+#endif
+ PointerPte += 1;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // If their have been committed pages in this segment, adjust
+ // the total commit count.
+ //
+
+ if (Segment->NumberOfCommittedPages != 0) {
+ MiReturnCommitment (Segment->NumberOfCommittedPages);
+ MmSharedCommit -= Segment->NumberOfCommittedPages;
+ }
+
+ ExFreePool (Segment->ControlArea);
+ ExFreePool (Segment);
+
+ return;
+}
+
+VOID
+MiSectionDelete (
+ PVOID Object
+ )
+
+/*++
+
+Routine Description:
+
+
+ This routine is called by the object management procedures whenever
+ the last reference to a section object has been removed. This routine
+ dereferences the associated segment object and checks to see if
+ the segment object should be deleted by queueing the segment to the
+ segment deletion thread.
+
+Arguments:
+
+ Object - a pointer to the body of the section object.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PSECTION Section;
+ volatile PCONTROL_AREA ControlArea;
+ ULONG DereferenceSegment = FALSE;
+ KIRQL OldIrql;
+ ULONG UserRef;
+
+ Section = (PSECTION)Object;
+
+ if (Section->Segment == (PSEGMENT)NULL) {
+
+ //
+ // The section was never initialized, no need to remove
+ // any structures.
+ //
+ return;
+ }
+
+ UserRef = Section->u.Flags.UserReference;
+ ControlArea = (volatile PCONTROL_AREA)Section->Segment->ControlArea;
+
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ DbgPrint("MM:deleting section %lx control %lx\n",Section, ControlArea);
+ }
+#endif
+
+ if (Section->Address.StartingVa != NULL) {
+
+ //
+ // This section is based, remove the base address from the
+ // treee.
+ //
+
+ //
+ // Get the allocation base mutex.
+ //
+
+ ExAcquireFastMutex (&MmSectionBasedMutex);
+
+ MiRemoveBasedSection (Section);
+
+ ExReleaseFastMutex (&MmSectionBasedMutex);
+
+ }
+
+ //
+ // Decrement the number of section references to the segment for this
+ // section. This requires APCs to be blocked and the PfnMutex to
+ // synchonize upon.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea->NumberOfSectionReferences -= 1;
+ ControlArea->NumberOfUserReferences -= UserRef;
+
+ //
+ // This routine returns with the PFN lock released.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+
+ return;
+}
+
+
+VOID
+MiDereferenceSegmentThread (
+ IN PVOID StartContext
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is the thread for derefencing segments which have
+ no references from any sections or mapped views AND there are
+ no prototype PTEs within the segment which are in the transition
+ state (i.e., no PFN database references to the segment).
+
+ It also does double duty and is used for expansion of paging files.
+
+Arguments:
+
+ StartContext - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ PMMPAGE_FILE_EXPANSION PageExpand;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ static KWAIT_BLOCK WaitBlockArray[SegMaximumObject];
+ PVOID WaitObjects[SegMaximumObject];
+ NTSTATUS Status;
+
+ StartContext; //avoid compiler warning.
+
+ //
+ // Make this a real time thread.
+ //
+
+ (VOID) KeSetPriorityThread (&PsGetCurrentThread()->Tcb,
+ LOW_REALTIME_PRIORITY + 2);
+
+ WaitObjects[SegmentDereference] = (PVOID)&MmDereferenceSegmentHeader.Semaphore;
+ WaitObjects[UsedSegmentCleanup] = (PVOID)&MmUnusedSegmentCleanup;
+
+ for (;;) {
+
+ Status = KeWaitForMultipleObjects(SegMaximumObject,
+ &WaitObjects[0],
+ WaitAny,
+ WrVirtualMemory,
+ UserMode,
+ FALSE,
+ NULL,
+ &WaitBlockArray[0]);
+
+ //
+ // Switch on the wait status.
+ //
+
+ switch (Status) {
+
+ case SegmentDereference:
+
+ //
+ // An entry is available to deference, acquire the spinlock
+ // and remove the entry.
+ //
+
+ ExAcquireSpinLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+
+ if (IsListEmpty(&MmDereferenceSegmentHeader.ListHead)) {
+
+ //
+ // There is nothing in the list, rewait.
+ //
+
+ ExReleaseSpinLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+ break;
+ }
+
+ NextEntry = RemoveHeadList(&MmDereferenceSegmentHeader.ListHead);
+
+ ExReleaseSpinLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+
+ ControlArea = CONTAINING_RECORD( NextEntry,
+ CONTROL_AREA,
+ DereferenceList );
+
+ if (ControlArea->Segment != NULL) {
+
+ //
+ // This is a control area, delete it.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ DbgPrint("MM:dereferencing segment %lx control %lx\n",
+ ControlArea->Segment, ControlArea);
+ }
+#endif
+
+ //
+ // Indicate this entry is not on any list.
+ //
+
+ ControlArea->DereferenceList.Flink = NULL;
+
+ ASSERT (ControlArea->u.Flags.FilePointerNull == 1);
+ MiSegmentDelete (ControlArea->Segment);
+
+ } else {
+
+ //
+ // This is a request to expand or reduce the paging files.
+ //
+
+ PageExpand = (PMMPAGE_FILE_EXPANSION)ControlArea;
+
+ if (PageExpand->RequestedExpansionSize == 0xFFFFFFFF) {
+
+ //
+ // Attempt to reduce the size of the paging files.
+ //
+
+ ExFreePool (PageExpand);
+
+ MiAttemptPageFileReduction ();
+ } else {
+
+ //
+ // Attempt to expand the size of the paging files.
+ //
+
+ PageExpand->ActualExpansion = MiExtendPagingFiles (
+ PageExpand->RequestedExpansionSize);
+
+ KeSetEvent (&PageExpand->Event, 0, FALSE);
+ MiRemoveUnusedSegments();
+ }
+ }
+ break;
+
+ case UsedSegmentCleanup:
+
+ MiRemoveUnusedSegments();
+
+ KeClearEvent (&MmUnusedSegmentCleanup);
+
+ break;
+
+ default:
+
+ KdPrint(("MMSegmentderef: Illegal wait status, %lx =\n", Status));
+ break;
+ } // end switch
+
+ } //end for
+
+ return;
+}
+
+
+ULONG
+MiSectionInitialization (
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the section object type descriptor at system
+ initialization and stores the address of the object type descriptor
+ in global storage.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ TRUE - Initialization was successful.
+
+ FALSE - Initialization Failed.
+
+
+
+--*/
+
+{
+ OBJECT_TYPE_INITIALIZER ObjectTypeInitializer;
+ UNICODE_STRING TypeName;
+ HANDLE ThreadHandle;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ UNICODE_STRING SectionName;
+ PSECTION Section;
+ HANDLE Handle;
+ PSEGMENT Segment;
+ PCONTROL_AREA ControlArea;
+ NTSTATUS Status;
+
+ MmSectionBasedRoot = (PMMADDRESS_NODE)NULL;
+
+ //
+ // Initialize the common fields of the Object Type Initializer record
+ //
+
+ RtlZeroMemory( &ObjectTypeInitializer, sizeof( ObjectTypeInitializer ) );
+ ObjectTypeInitializer.Length = sizeof( ObjectTypeInitializer );
+ ObjectTypeInitializer.InvalidAttributes = OBJ_OPENLINK;
+ ObjectTypeInitializer.GenericMapping = MiSectionMapping;
+ ObjectTypeInitializer.PoolType = PagedPool;
+ ObjectTypeInitializer.DefaultPagedPoolCharge = sizeof(SECTION);
+
+ //
+ // Initialize string descriptor.
+ //
+
+ RtlInitUnicodeString (&TypeName, L"Section");
+
+ //
+ // Create the section object type descriptor
+ //
+
+ ObjectTypeInitializer.ValidAccessMask = SECTION_ALL_ACCESS;
+ ObjectTypeInitializer.DeleteProcedure = MiSectionDelete;
+ ObjectTypeInitializer.GenericMapping = MiSectionMapping;
+ ObjectTypeInitializer.UseDefaultObject = TRUE;
+ if ( !NT_SUCCESS(ObCreateObjectType(&TypeName,
+ &ObjectTypeInitializer,
+ (PSECURITY_DESCRIPTOR) NULL,
+ &MmSectionObjectType
+ )) ) {
+ return FALSE;
+ }
+
+ //
+ // Initialize listhead, spinlock and semaphore for
+ // segment dereferencing thread.
+ //
+
+ KeInitializeSpinLock (&MmDereferenceSegmentHeader.Lock);
+ InitializeListHead (&MmDereferenceSegmentHeader.ListHead);
+ KeInitializeSemaphore (&MmDereferenceSegmentHeader.Semaphore, 0, MAXLONG);
+
+ InitializeListHead (&MmUnusedSegmentList);
+ KeInitializeEvent (&MmUnusedSegmentCleanup, NotificationEvent, FALSE);
+
+ //
+ // Create the Segment deferencing thread.
+ //
+
+ InitializeObjectAttributes( &ObjectAttributes,
+ NULL,
+ 0,
+ NULL,
+ NULL );
+
+ if ( !NT_SUCCESS(PsCreateSystemThread(
+ &ThreadHandle,
+ THREAD_ALL_ACCESS,
+ &ObjectAttributes,
+ 0,
+ NULL,
+ MiDereferenceSegmentThread,
+ NULL
+ )) ) {
+ return FALSE;
+ }
+ ZwClose (ThreadHandle);
+
+ //
+ // Create the permanent section which maps physical memory.
+ //
+
+ Segment = (PSEGMENT)ExAllocatePoolWithTag (PagedPool,
+ sizeof(SEGMENT),
+ 'gSmM');
+ if (Segment == NULL) {
+ return FALSE;
+ }
+
+ ControlArea = ExAllocatePoolWithTag (NonPagedPool,
+ (ULONG)sizeof(CONTROL_AREA),
+ MMCONTROL);
+ if (ControlArea == NULL) {
+ return FALSE;
+ }
+
+ RtlZeroMemory (Segment, sizeof(SEGMENT));
+ RtlZeroMemory (ControlArea, sizeof(CONTROL_AREA));
+
+ ControlArea->Segment = Segment;
+ ControlArea->NumberOfSectionReferences = 1;
+ ControlArea->u.Flags.PhysicalMemory = 1;
+
+ Segment->ControlArea = ControlArea;
+ Segment->SegmentPteTemplate = ZeroPte;
+
+ //
+ // Now that the segment object is created, create a section object
+ // which refers to the segment object.
+ //
+
+ RtlInitUnicodeString (&SectionName, L"\\Device\\PhysicalMemory");
+
+ InitializeObjectAttributes( &ObjectAttributes,
+ &SectionName,
+ OBJ_PERMANENT,
+ NULL,
+ NULL
+ );
+
+ Status = ObCreateObject (KernelMode,
+ MmSectionObjectType,
+ &ObjectAttributes,
+ KernelMode,
+ NULL,
+ sizeof(SECTION),
+ sizeof(SECTION),
+ 0,
+ (PVOID *)&Section);
+ if (!NT_SUCCESS(Status)) {
+ return FALSE;
+ }
+
+ Section->Segment = Segment;
+ Section->SizeOfSection.QuadPart = ((LONGLONG)1 << PHYSICAL_ADDRESS_BITS) - 1;
+ Section->u.LongFlags = 0;
+ Section->InitialPageProtection = PAGE_READWRITE;
+
+ Status = ObInsertObject ((PVOID)Section,
+ NULL,
+ SECTION_MAP_READ,
+ 0,
+ (PVOID *)NULL,
+ &Handle);
+
+ if (!NT_SUCCESS( Status )) {
+ return FALSE;
+ }
+
+ if ( !NT_SUCCESS (NtClose ( Handle))) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+BOOLEAN
+MmForceSectionClosed (
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
+ IN BOOLEAN DelayClose
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines the Section object pointers. If they are NULL,
+ no further action is taken and the value TRUE is returned.
+
+ If the Section object pointer is not NULL, the section reference count
+ and the map view count are checked. If both counts are zero, the
+ segment associated with the file is deleted and the file closed.
+ If one of the counts is non-zero, no action is taken and the
+ value FALSE is returned.
+
+Arguments:
+
+ SectionObjectPointer - Supplies a pointer to a section object.
+
+ DelayClose - Supplies the value TRUE if the close operation should
+ occur as soon as possible in the event this section
+ cannot be closed now due to outstanding references.
+
+Return Value:
+
+ TRUE - the segment was deleted and the file closed or no segment was
+ located.
+
+ FALSE - the segment was not deleted and no action was performed OR
+ an I/O error occurred trying to write the pages.
+
+--*/
+
+{
+ PCONTROL_AREA ControlArea;
+ KIRQL OldIrql;
+ ULONG state;
+
+ //
+ // Check the status of the control area, if the control area is in use
+ // or the control area is being deleted, this operation cannot continue.
+ //
+
+ state = MiCheckControlAreaStatus (CheckBothSection,
+ SectionObjectPointer,
+ DelayClose,
+ &ControlArea,
+ &OldIrql);
+
+ if (ControlArea == NULL) {
+ return (BOOLEAN)state;
+ }
+
+ //
+ // PFN LOCK IS NOW HELD!
+ //
+
+ //
+ // Set the being deleted flag and up the number of mapped views
+ // for the segment. Upping the number of mapped views prevents
+ // the segment from being deleted and passed to the deletion thread
+ // while we are forcing a delete.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ ASSERT (ControlArea->NumberOfMappedViews == 0);
+ ControlArea->NumberOfMappedViews = 1;
+
+ //
+ // This is a page file backed or image Segment. The Segment is being
+ // deleted, remove all references to the paging file and physical memory.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Delete the section by flushing all modified pages back to the section
+ // if it is a file and freeing up the pages such that the PfnReferenceCount
+ // goes to zero.
+ //
+
+ MiCleanSection (ControlArea);
+ return TRUE;
+}
+
+VOID
+MiCleanSection (
+ IN PCONTROL_AREA ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines each prototype PTE in the section and
+ takes the appropriate action to "delete" the prototype PTE.
+
+ If the PTE is dirty and is backed by a file (not a paging file),
+ the corresponding page is written to the file.
+
+ At the completion of this service, the section which was
+ operated upon is no longer usable.
+
+ NOTE - ALL I/O ERRORS ARE IGNORED. IF ANY WRITES FAIL, THE
+ DIRTY PAGES ARE MARKED CLEAN AND THE SECTION IS DELETED.
+
+Arguments:
+
+ ControlArea - Supplies a pointer to the control area for the section.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE LastWritten;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ PMMPTE WrittenPte;
+ MMPTE WrittenContents;
+ KIRQL OldIrql;
+ PMDL Mdl;
+ PKEVENT IoEvent;
+ PSUBSECTION Subsection;
+ PULONG Page;
+ PULONG LastPage;
+ PULONG EndingPage;
+ LARGE_INTEGER StartingOffset;
+ LARGE_INTEGER TempOffset;
+ NTSTATUS Status;
+ IO_STATUS_BLOCK IoStatus;
+ ULONG WriteNow = FALSE;
+ ULONG ImageSection = FALSE;
+ ULONG DelayCount = 0;
+ ULONG First;
+
+ if (ControlArea->u.Flags.Image) {
+ ImageSection = TRUE;
+ }
+ ASSERT (ControlArea->FilePointer);
+
+ PointerPte = ControlArea->Segment->PrototypePte;
+ LastPte = PointerPte + ControlArea->Segment->NonExtendedPtes;
+
+ IoEvent = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSizeOfMdl(NULL, PAGE_SIZE *
+ MmModifiedWriteClusterSize)
+ + sizeof(KEVENT),
+ 'ldmM');
+
+ Mdl = (PMDL)(IoEvent + 1);
+
+ KeInitializeEvent (IoEvent, NotificationEvent, FALSE);
+
+ LastWritten = NULL;
+ EndingPage = (PULONG)(Mdl + 1) + MmModifiedWriteClusterSize;
+ LastPage = NULL;
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ //
+ // The PFN mutex is required for deallocating pages from a paging
+ // file and for deleting transition PTEs.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Stop the modified page writer from writting pages to this
+ // file, and if any paging I/O is in progress, wait for it
+ // to complete.
+ //
+
+ ControlArea->u.Flags.NoModifiedWriting = 1;
+
+ while (ControlArea->ModifiedWriteCount != 0) {
+
+ //
+ // There is modified page writting in progess. Set the
+ // flag in the control area indicating the modified page
+ // writer should signal when a write to this control area
+ // is complete. Release the PFN LOCK and wait in an
+ // atomic operation. Once the wait is satified, recheck
+ // to make sure it was this file's I/O that was written.
+ //
+
+ ControlArea->u.Flags.SetMappedFileIoComplete = 1;
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(&MmMappedFileIoComplete,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+ LOCK_PFN (OldIrql);
+ }
+
+ for (;;) {
+
+ First = TRUE;
+ while (PointerPte < LastPte) {
+
+ if ((((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) || First) {
+ First = FALSE;
+
+ if ((ImageSection) ||
+ (MiCheckProtoPtePageState(PointerPte, FALSE))) {
+ MiMakeSystemAddressValidPfn (PointerPte);
+ } else {
+
+ //
+ // Paged pool page is not resident, hence no transition or valid
+ // prototype PTEs can be present in it. Skip it.
+ //
+
+ PointerPte = (PMMPTE)((((ULONG)PointerPte | PAGE_SIZE - 1)) + 1);
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+ goto WriteItOut;
+ }
+ }
+
+ PteContents = *PointerPte;
+
+ //
+ // Prototype PTEs for Segments backed by paging file
+ // are either in demand zero, page file format, or transition.
+ //
+
+ ASSERT (PteContents.u.Hard.Valid == 0);
+
+ if (PteContents.u.Soft.Prototype == 0) {
+
+ if (PteContents.u.Soft.Transition == 1) {
+
+ //
+ // Prototype PTE in transition, there are 3 possible cases:
+ // 1. The page is part of an image which is shareable and
+ // refers to the paging file - dereference page file
+ // space and free the physical page.
+ // 2. The page refers to the segment but is not modified -
+ // free the phyisical page.
+ // 3. The page refers to the segment and is modified -
+ // write the page to the file and free the physical page.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+ if (DelayCount < 20) {
+
+ //
+ // There must be an I/O in progress on this
+ // page. Wait for the I/O operation to complete.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+
+ DelayCount += 1;
+
+ //
+ // Redo the loop, if the delay count is greater than
+ // 20, assume that this thread is deadlocked and
+ // don't purge this page. The file system can deal
+ // with the write operation in progress.
+ //
+
+ LOCK_PFN (OldIrql);
+ MiMakeSystemAddressValidPfn (PointerPte);
+ continue;
+#if DBG
+ } else {
+
+ //
+ // The I/O still has not completed, just ignore the fact
+ // that the i/o is in progress and delete the page.
+ //
+
+ KdPrint(("MM:CLEAN - page number %lx has i/o outstanding\n",
+ PteContents.u.Trans.PageFrameNumber));
+#endif //DBG
+ }
+ }
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 0) {
+
+ //
+ // Paging file reference (case 1).
+ //
+
+ MI_SET_PFN_DELETED (Pfn1);
+ if (!ImageSection) {
+
+ //
+ // This is not an image section, it must be a
+ // page file backed section, therefore decrement
+ // the PFN reference count for the control area.
+ //
+
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+ }
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero and the page is not on the freelist,
+ // move the page to the free list, if the reference
+ // count is not zero, ignore this page.
+ // When the refernce count goes to zero, it will be placed
+ // on the free list.
+ //
+
+ if ((Pfn1->u3.e2.ReferenceCount == 0) &&
+ (Pfn1->u3.e1.PageLocation != FreePageList)) {
+
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+
+ }
+ PointerPte->u.Long = 0;
+
+ //
+ // If a cluster of pages to write has been completed,
+ // set the WriteNow flag.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+
+ } else {
+
+ if ((Pfn1->u3.e1.Modified == 0) || (ImageSection)) {
+
+ //
+ // Non modified or image file page (case 2).
+ //
+
+ MI_SET_PFN_DELETED (Pfn1);
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero and the page is not on the freelist,
+ // move the page to the free list, if the reference
+ // count is not zero, ignore this page.
+ // When the refernce count goes to zero, it will be placed on the
+ // free list.
+ //
+
+ if ((Pfn1->u3.e2.ReferenceCount == 0) &&
+ (Pfn1->u3.e1.PageLocation != FreePageList)) {
+
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+
+ PointerPte->u.Long = 0;
+
+ //
+ // If a cluster of pages to write has been completed,
+ // set the WriteNow flag.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+
+ } else {
+
+ //
+ // Check to see if this is the first page of a cluster.
+ //
+
+ if (LastWritten == NULL) {
+ LastPage = (PULONG)(Mdl + 1);
+ ASSERT (MiGetSubsectionAddress(&Pfn1->OriginalPte) ==
+ Subsection);
+
+ //
+ // Calculate the offset to read into the file.
+ // offset = base + ((thispte - basepte) << PAGE_SHIFT)
+ //
+
+ StartingOffset.QuadPart = MI_STARTING_OFFSET (
+ Subsection,
+ Pfn1->PteAddress);
+
+ MI_INITIALIZE_ZERO_MDL (Mdl);
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+
+ Mdl->StartVa =
+ (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT);
+ Mdl->Size = (CSHORT)(sizeof(MDL) +
+ (sizeof(ULONG) * MmModifiedWriteClusterSize));
+ }
+
+ LastWritten = PointerPte;
+ Mdl->ByteCount += PAGE_SIZE;
+
+ //
+ // If the cluster is now full, set the write now flag.
+ //
+
+ if (Mdl->ByteCount == (PAGE_SIZE * MmModifiedWriteClusterSize)) {
+ WriteNow = TRUE;
+ }
+
+ MiUnlinkPageFromList (Pfn1);
+ Pfn1->u3.e1.Modified = 0;
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ //
+ // Clear the modified bit for the page and set the write
+ // in progress bit.
+ //
+
+ *LastPage = PteContents.u.Trans.PageFrameNumber;
+
+ LastPage += 1;
+ }
+ }
+ } else {
+
+ if (IS_PTE_NOT_DEMAND_ZERO (PteContents)) {
+ MiReleasePageFileSpace (PteContents);
+ }
+ PointerPte->u.Long = 0;
+
+ //
+ // If a cluster of pages to write has been completed,
+ // set the WriteNow flag.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+ }
+ } else {
+
+ //
+ // This is a normal prototype PTE in mapped file format.
+ //
+
+ if (LastWritten != NULL) {
+ WriteNow = TRUE;
+ }
+ }
+
+ //
+ // Write the current cluster if it is complete,
+ // full, or the loop is now complete.
+ //
+
+ PointerPte += 1;
+ DelayCount = 0;
+
+WriteItOut:
+
+ if ((WriteNow) ||
+ ((PointerPte == LastPte) && (LastWritten != NULL))) {
+
+ //
+ // Issue the write request.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ WriteNow = FALSE;
+
+ KeClearEvent (IoEvent);
+
+ //
+ // Make sure the write does not go past the
+ // end of file. (segment size).
+ //
+
+ TempOffset.QuadPart = ((LONGLONG)Subsection->EndingSector <<
+ MMSECTOR_SHIFT) +
+ Subsection->u.SubsectionFlags.SectorEndOffset;
+
+ if ((StartingOffset.QuadPart + Mdl->ByteCount) >
+ TempOffset.QuadPart) {
+
+ ASSERT ((ULONG)(TempOffset.QuadPart -
+ StartingOffset.QuadPart) >
+ (Mdl->ByteCount - PAGE_SIZE));
+
+ Mdl->ByteCount = (ULONG)(TempOffset.QuadPart -
+ StartingOffset.QuadPart);
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_FLUSH_SECTION) {
+ DbgPrint("MM:flush page write begun %lx\n",
+ Mdl->ByteCount);
+ }
+#endif //DBG
+
+ Status = IoSynchronousPageWrite (
+ ControlArea->FilePointer,
+ Mdl,
+ &StartingOffset,
+ IoEvent,
+ &IoStatus );
+
+ if (NT_SUCCESS(Status)) {
+
+ KeWaitForSingleObject( IoEvent,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ Page = (PULONG)(Mdl + 1);
+
+ LOCK_PFN (OldIrql);
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
+
+ //
+ // The next PTE is not in a different page, make
+ // sure this page did not leave memory when the
+ // I/O was in progress.
+ //
+
+ MiMakeSystemAddressValidPfn (PointerPte);
+ }
+
+ //
+ // I/O complete unlock pages.
+ //
+ // NOTE that the error status is ignored.
+ //
+
+ while (Page < LastPage) {
+
+ Pfn2 = MI_PFN_ELEMENT (*Page);
+
+ //
+ // Make sure the page is still transition.
+ //
+
+ WrittenPte = Pfn2->PteAddress;
+
+ MiDecrementReferenceCount (*Page);
+
+ if (!MI_IS_PFN_DELETED (Pfn2)) {
+
+ //
+ // Make sure the prototype PTE is
+ // still in the working set.
+ //
+
+ MiMakeSystemAddressValidPfn (WrittenPte);
+
+ if (Pfn2->PteAddress != WrittenPte) {
+
+ //
+ // The PFN mutex was released to make the
+ // page table page valid, and while it
+ // was released, the phyiscal page
+ // was reused. Go onto the next one.
+ //
+
+ Page += 1;
+ continue;
+ }
+
+ WrittenContents = *WrittenPte;
+
+ if ((WrittenContents.u.Soft.Prototype == 0) &&
+ (WrittenContents.u.Soft.Transition == 1)) {
+
+ MI_SET_PFN_DELETED (Pfn2);
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+
+ MiDecrementShareCount (Pfn2->PteFrame);
+
+ //
+ // Check the reference count for the page,
+ // if the reference count is zero and the
+ // page is not on the freelist, move the page
+ // to the free list, if the reference
+ // count is not zero, ignore this page.
+ // When the refernce count goes to zero,
+ // it will be placed on the free list.
+ //
+
+ if ((Pfn2->u3.e2.ReferenceCount == 0) &&
+ (Pfn2->u3.e1.PageLocation != FreePageList)) {
+
+ MiUnlinkPageFromList (Pfn2);
+ MiReleasePageFileSpace (Pfn2->OriginalPte);
+ MiInsertPageInList (
+ MmPageLocationList[FreePageList],
+ *Page);
+ }
+ }
+ WrittenPte->u.Long = 0;
+ }
+ Page += 1;
+ }
+
+ //
+ // Indicate that there is no current cluster being built.
+ //
+
+ LastWritten = NULL;
+ }
+
+ } // end while
+
+ //
+ // Get the next subsection if any.
+ //
+
+ if (Subsection->NextSubsection == (PSUBSECTION)NULL) {
+ break;
+ }
+ Subsection = Subsection->NextSubsection;
+ PointerPte = Subsection->SubsectionBase;
+ LastPte = PointerPte + Subsection->PtesInSubsection;
+
+
+ } // end for
+
+ ControlArea->NumberOfMappedViews = 0;
+
+ ASSERT (ControlArea->NumberOfPfnReferences == 0);
+
+ if (ControlArea->u.Flags.FilePointerNull == 0) {
+ ControlArea->u.Flags.FilePointerNull = 1;
+ if (ControlArea->u.Flags.Image) {
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject)) =
+ NULL;
+ } else {
+ ASSERT (((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) != NULL);
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) =
+ NULL;
+ }
+ }
+ UNLOCK_PFN (OldIrql);
+
+ ExFreePool (IoEvent);
+
+ //
+ // Delete the segment structure.
+ //
+
+ MiSegmentDelete (ControlArea->Segment);
+
+ return;
+}
+
+NTSTATUS
+MmGetFileNameForSection (
+ IN HANDLE Section,
+ OUT PSTRING FileName
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the file name for the corresponding section.
+
+Arguments:
+
+ Section - Supplies the handle of the section to get the name of.
+
+ FileName - Returns the name of the corresonding section.
+
+Return Value:
+
+ TBS
+
+Environment:
+
+ Kernel mode, APC_LEVEL or below, no mutexes held.
+
+--*/
+
+{
+
+ PSECTION SectionObject;
+ POBJECT_NAME_INFORMATION FileNameInfo;
+ ULONG whocares;
+ NTSTATUS Status;
+ ULONG Dereference;
+
+ Dereference = TRUE;
+#define xMAX_NAME 1024
+
+ if ( (ULONG)Section & 1 ) {
+ SectionObject = (PSECTION)((ULONG)Section & 0xfffffffe);
+ Dereference = FALSE;
+ } else {
+ Status = ObReferenceObjectByHandle ( Section,
+ 0,
+ MmSectionObjectType,
+ KernelMode,
+ (PVOID *)&SectionObject,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+ }
+ if (SectionObject->u.Flags.Image == 0) {
+ if ( Dereference ) ObDereferenceObject (SectionObject);
+ return STATUS_SECTION_NOT_IMAGE;
+ }
+
+ FileNameInfo = ExAllocatePoolWithTag (PagedPool, xMAX_NAME, ' mM');
+ if ( !FileNameInfo ) {
+ if ( Dereference ) ObDereferenceObject (SectionObject);
+ return STATUS_NO_MEMORY;
+ }
+
+ Status = ObQueryNameString(
+ SectionObject->Segment->ControlArea->FilePointer,
+ FileNameInfo,
+ xMAX_NAME,
+ &whocares
+ );
+
+ if ( Dereference ) ObDereferenceObject (SectionObject);
+ if ( !NT_SUCCESS(Status) ) {
+ ExFreePool(FileNameInfo);
+ return Status;
+ }
+
+ FileName->Length = 0;
+ FileName->MaximumLength = (FileNameInfo->Name.Length/sizeof(WCHAR)) + 1;
+ FileName->Buffer = ExAllocatePoolWithTag (PagedPool,
+ FileName->MaximumLength,
+ ' mM');
+ if ( !FileName->Buffer ) {
+ ExFreePool(FileNameInfo);
+ return STATUS_NO_MEMORY;
+ }
+ RtlUnicodeStringToAnsiString((PANSI_STRING)FileName,&FileNameInfo->Name,FALSE);
+ FileName->Buffer[FileName->Length] = '\0';
+ ExFreePool(FileNameInfo);
+
+ return STATUS_SUCCESS;
+}
+
+VOID
+MiCheckControlArea (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS CurrentProcess,
+ IN KIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the reference counts for the specified
+ control area, and if the counts are all zero, it marks the
+ control area for deletion and queues it to the deletion thread.
+
+
+ *********************** NOTE ********************************
+ This routine returns with the PFN LOCK RELEASED!!!!!
+
+Arguments:
+
+ ControlArea - Supplies a pointer to the control area to check.
+
+ CurrentProcess - Supplies a pointer to the current process if and ONLY
+ IF the working set lock is held.
+
+ PreviousIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ NONE.
+
+Environment:
+
+ Kernel mode, PFN lock held, PFN lock release upon return!!!
+
+--*/
+
+{
+ PEVENT_COUNTER PurgeEvent = NULL;
+ ULONG DeleteOnClose = FALSE;
+ ULONG DereferenceSegment = FALSE;
+
+
+ MM_PFN_LOCK_ASSERT();
+ if ((ControlArea->NumberOfMappedViews == 0) &&
+ (ControlArea->NumberOfSectionReferences == 0)) {
+
+ ASSERT (ControlArea->NumberOfUserReferences == 0);
+
+ if (ControlArea->FilePointer != (PFILE_OBJECT)NULL) {
+
+ if (ControlArea->NumberOfPfnReferences == 0) {
+
+ //
+ // There are no views and no physical pages referenced
+ // by the Segment, derferenced the Segment object.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ DereferenceSegment = TRUE;
+
+ ASSERT (ControlArea->u.Flags.FilePointerNull == 0);
+ ControlArea->u.Flags.FilePointerNull = 1;
+ if (ControlArea->u.Flags.Image) {
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject)) =
+ NULL;
+ } else {
+ ASSERT (((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) != NULL);
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) =
+ NULL;
+ }
+ } else {
+
+ //
+ // Insert this segment into the unused segment list (unless
+ // it is already on the list).
+ //
+
+ if (ControlArea->DereferenceList.Flink == NULL) {
+ InsertTailList ( &MmUnusedSegmentList,
+ &ControlArea->DereferenceList);
+ MmUnusedSegmentCount += 1;
+ }
+
+ //
+ // Indicate if this section should be deleted now that
+ // the reference counts are zero.
+ //
+
+ DeleteOnClose = ControlArea->u.Flags.DeleteOnClose;
+
+ //
+ // The number of mapped views are zero, the number of
+ // section references are zero, but there are some
+ // pages of the file still resident. If this is
+ // an image with Global Memory, "purge" the subsections
+ // which contian the global memory and reset them to
+ // point back to the file.
+ //
+
+ if (ControlArea->u.Flags.GlobalMemory == 1) {
+ ASSERT (ControlArea->u.Flags.Image == 1);
+
+ ControlArea->u.Flags.BeingPurged = 1;
+ ControlArea->NumberOfMappedViews = 1;
+
+ MiPurgeImageSection (ControlArea, CurrentProcess);
+
+ ControlArea->u.Flags.BeingPurged = 0;
+ ControlArea->NumberOfMappedViews -= 1;
+ if ((ControlArea->NumberOfMappedViews == 0) &&
+ (ControlArea->NumberOfSectionReferences == 0) &&
+ (ControlArea->NumberOfPfnReferences == 0)) {
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ DereferenceSegment = TRUE;
+ ControlArea->u.Flags.FilePointerNull = 1;
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject)) =
+ NULL;
+ } else {
+
+ PurgeEvent = ControlArea->WaitingForDeletion;
+ ControlArea->WaitingForDeletion = NULL;
+ }
+ }
+
+ //
+ // If delete on close is set and the segment was
+ // not deleted, up the count of mapped views so the
+ // control area will not be deleted when the PFN lock
+ // is released.
+ //
+
+ if (DeleteOnClose && !DereferenceSegment) {
+ ControlArea->NumberOfMappedViews = 1;
+ ControlArea->u.Flags.BeingDeleted = 1;
+ }
+ }
+
+ } else {
+
+ //
+ // This Segment is backed by a paging file, dereference the
+ // Segment object when the number of views goes from 1 to 0
+ // without regard to the number of PFN references.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ DereferenceSegment = TRUE;
+ }
+ }
+
+ UNLOCK_PFN (PreviousIrql);
+
+ if (DereferenceSegment || DeleteOnClose) {
+
+ //
+ // Release the working set mutex, if it is held as the object
+ // management routines may page fault, ect..
+ //
+
+ if (CurrentProcess) {
+ UNLOCK_WS (CurrentProcess);
+ }
+
+ if (DereferenceSegment) {
+
+ //
+ // Delete the segment.
+ //
+
+ MiSegmentDelete (ControlArea->Segment);
+
+ } else {
+
+ //
+ // The segment should be forced closed now.
+ //
+
+ MiCleanSection (ControlArea);
+ }
+
+ ASSERT (PurgeEvent == NULL);
+
+ //
+ // Reaquire the working set lock, if a process was specified.
+ //
+
+ if (CurrentProcess) {
+ LOCK_WS (CurrentProcess);
+ }
+
+ } else {
+
+ //
+ // If any threads are waiting for the segment, indicate the
+ // the purge operation has completed.
+ //
+
+ if (PurgeEvent != NULL) {
+ KeSetEvent (&PurgeEvent->Event, 0, FALSE);
+ }
+
+ if (MmUnusedSegmentCount > (MmUnusedSegmentCountMaximum << 2)) {
+ KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE);
+ }
+ }
+
+ return;
+}
+
+VOID
+MiCheckForControlAreaDeletion (
+ IN PCONTROL_AREA ControlArea
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the reference counts for the specified
+ control area, and if the counts are all zero, it marks the
+ control area for deletion and queues it to the deletion thread.
+
+Arguments:
+
+ ControlArea - Supplies a pointer to the control area to check.
+
+ CurrentProcess - Supplies a pointer to the current process IF
+ the working set lock is held. If the working
+ set lock is NOT HELD, this value is NULL.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+
+ MM_PFN_LOCK_ASSERT();
+ if ((ControlArea->NumberOfPfnReferences == 0) &&
+ (ControlArea->NumberOfMappedViews == 0) &&
+ (ControlArea->NumberOfSectionReferences == 0 )) {
+
+ //
+ // This segment is no longer mapped in any address space
+ // nor are there any prototype PTEs within the segment
+ // which are valid or in a transition state. Queue
+ // the segment to the segment-dereferencer thread
+ // which will dereference the segment object, potentially
+ // causing the segment to be deleted.
+ //
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+ ASSERT (ControlArea->u.Flags.FilePointerNull == 0);
+ ControlArea->u.Flags.FilePointerNull = 1;
+
+ if (ControlArea->u.Flags.Image) {
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject)) =
+ NULL;
+ } else {
+ ((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) =
+ NULL;
+ }
+
+ ExAcquireSpinLock (&MmDereferenceSegmentHeader.Lock, &OldIrql);
+
+ ASSERT (ControlArea->DereferenceList.Flink != NULL);
+
+ //
+ // Remove the entry from the unused segment list and put
+ // on the dereference list.
+ //
+
+ RemoveEntryList (&ControlArea->DereferenceList);
+ MmUnusedSegmentCount -= 1;
+ InsertTailList (&MmDereferenceSegmentHeader.ListHead,
+ &ControlArea->DereferenceList);
+ ExReleaseSpinLock (&MmDereferenceSegmentHeader.Lock, OldIrql);
+
+ KeReleaseSemaphore (&MmDereferenceSegmentHeader.Semaphore,
+ 0L,
+ 1L,
+ FALSE);
+ }
+ return;
+}
+
+
+ULONG
+MiCheckControlAreaStatus (
+ IN SECTION_CHECK_TYPE SectionCheckType,
+ IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
+ IN ULONG DelayClose,
+ OUT PCONTROL_AREA *ControlAreaOut,
+ OUT PKIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine checks the status of the control area for the specified
+ SectionObjectPointers. If the control area is in use, that is, the
+ number of section references and the number of mapped views are not
+ both zero, no action is taken and the function returns FALSE.
+
+ If there is no control area associated with the specified
+ SectionObjectPointers or the control area is in the process of being
+ created or deleted, no action is taken and the value TRUE is returned.
+
+ If, there are no section objects and the control area is not being
+ created or deleted, the address of the control area is returned
+ in the ControlArea argument, the address of a pool block to free
+ is returned in the SegmentEventOut argument and the PFN_LOCK is
+ still held at the return.
+
+Arguments:
+
+ *SegmentEventOut - Returns a pointer to NonPaged Pool which much be
+ freed by the caller when the PFN_LOCK is released.
+ This value is NULL if no pool is allocated and the
+ PFN_LOCK is not held.
+
+ SecionCheckType - Supplies the type of section to check on, one of
+ CheckImageSection, CheckDataSection, CheckBothSection.
+
+ SectionObjectPointers - Supplies the section object pointers through
+ which the control area can be located.
+
+ DelayClose - Supplies a boolean which if TRUE and the control area
+ is being used, the delay on close field should be set
+ in the control area.
+
+ *ControlAreaOut - Returns the addresss of the control area.
+
+ PreviousIrql - Returns, in the case the PFN_LOCK is held, the previous
+ IRQL so the lock can be released properly.
+
+Return Value:
+
+ FALSE if the control area is in use, TRUE if the control area is gone or
+ in the process or being created or deleted.
+
+Environment:
+
+ Kernel mode, PFN lock NOT held.
+
+--*/
+
+
+{
+ PEVENT_COUNTER IoEvent;
+ PEVENT_COUNTER SegmentEvent;
+ ULONG DeallocateSegmentEvent = TRUE;
+ PCONTROL_AREA ControlArea;
+ ULONG SectRef;
+ KIRQL OldIrql;
+
+ //
+ // Allocate an event to wait on in case the segment is in the
+ // process of being deleted. This event cannot be allocated
+ // with the PFN database locked as pool expansion would deadlock.
+ //
+
+ *ControlAreaOut = NULL;
+
+ //
+ // Acquire the PFN mutex and examine the section object pointer
+ // value within the file object.
+ //
+
+ //
+ // File control blocks live in non-paged pool.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ SegmentEvent = MiGetEventCounter ();
+
+ if (SectionCheckType != CheckImageSection) {
+ ControlArea = ((PCONTROL_AREA)(SectionObjectPointers->DataSectionObject));
+ } else {
+ ControlArea = ((PCONTROL_AREA)(SectionObjectPointers->ImageSectionObject));
+ }
+
+ if (ControlArea == NULL) {
+
+ if (SectionCheckType != CheckBothSection) {
+
+ //
+ // This file no longer has an associated segment.
+ //
+
+ MiFreeEventCounter (SegmentEvent, TRUE);
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+ } else {
+ ControlArea = ((PCONTROL_AREA)(SectionObjectPointers->ImageSectionObject));
+ if (ControlArea == NULL) {
+
+ //
+ // This file no longer has an associated segment.
+ //
+
+ MiFreeEventCounter (SegmentEvent, TRUE);
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+ }
+ }
+ }
+
+ //
+ // Depending on the type of section, check for the pertinant
+ // reference count being non-zero.
+ //
+
+ if (SectionCheckType != CheckUserDataSection) {
+ SectRef = ControlArea->NumberOfSectionReferences;
+ } else {
+ SectRef = ControlArea->NumberOfUserReferences;
+ }
+
+ if ((SectRef != 0) ||
+ (ControlArea->NumberOfMappedViews != 0) ||
+ (ControlArea->u.Flags.BeingCreated)) {
+
+
+ //
+ // The segment is currently in use or being created.
+ //
+
+ if (DelayClose) {
+
+ //
+ // The section should be deleted when the reference
+ // counts are zero, set the delete on close flag.
+ //
+
+ ControlArea->u.Flags.DeleteOnClose = 1;
+ }
+
+ MiFreeEventCounter (SegmentEvent, TRUE);
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+
+ //
+ // The segment has no references, delete it. If the segment
+ // is already being deleted, set the event field in the control
+ // area and wait on the event.
+ //
+
+ if (ControlArea->u.Flags.BeingDeleted) {
+
+ //
+ // The segment object is in the process of being deleted.
+ // Check to see if another thread is waiting for the deletion,
+ // otherwise create and event object to wait upon.
+ //
+
+ if (ControlArea->WaitingForDeletion == NULL) {
+
+ //
+ // Create an event a put it's address in the control area.
+ //
+
+ DeallocateSegmentEvent = FALSE;
+ ControlArea->WaitingForDeletion = SegmentEvent;
+ IoEvent = SegmentEvent;
+ } else {
+ IoEvent = ControlArea->WaitingForDeletion;
+ IoEvent->RefCount += 1;
+ }
+
+ //
+ // Release the mutex and wait for the event.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(&IoEvent->Event,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ MiFreeEventCounter (IoEvent, TRUE);
+ if (DeallocateSegmentEvent) {
+ MiFreeEventCounter (SegmentEvent, TRUE);
+ }
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+ }
+
+ //
+ // Return with the PFN database locked.
+ //
+
+ MiFreeEventCounter (SegmentEvent, FALSE);
+ *ControlAreaOut = ControlArea;
+ *PreviousIrql = OldIrql;
+ return FALSE;
+}
+
+
+PEVENT_COUNTER
+MiGetEventCounter (
+ )
+
+/*++
+
+Routine Description:
+
+ This function maintains a list of "events" to allow waiting
+ on segment operations (deletion, creation, purging).
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Event to be used for waiting (stored into the control area).
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PEVENT_COUNTER Support;
+ PLIST_ENTRY NextEntry;
+
+ MM_PFN_LOCK_ASSERT();
+
+ if (MmEventCountList.Count == 0) {
+ ASSERT (IsListEmpty(&MmEventCountList.ListHead));
+ OldIrql = APC_LEVEL;
+ UNLOCK_PFN (OldIrql);
+ Support = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ sizeof(EVENT_COUNTER),
+ 'xEmM');
+ KeInitializeEvent (&Support->Event, NotificationEvent, FALSE);
+ LOCK_PFN (OldIrql);
+ } else {
+ ASSERT (!IsListEmpty(&MmEventCountList.ListHead));
+ MmEventCountList.Count -= 1;
+ NextEntry = RemoveHeadList (&MmEventCountList.ListHead);
+ Support = CONTAINING_RECORD (NextEntry,
+ EVENT_COUNTER,
+ ListEntry );
+ //ASSERT (Support->RefCount == 0);
+ KeClearEvent (&Support->Event);
+ }
+ Support->RefCount = 1;
+ Support->ListEntry.Flink = NULL;
+ return Support;
+}
+
+
+VOID
+MiFreeEventCounter (
+ IN PEVENT_COUNTER Support,
+ IN ULONG Flush
+ )
+
+/*++
+
+Routine Description:
+
+ This routine frees an event counter back to the free list.
+
+Arguments:
+
+ Support - Supplies a pointer to the event counter.
+
+ Flush - Supplies TRUE if the PFN lock can be released and the event
+ counter pool block actually freed. The PFN lock will be
+ reacquired before returning.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+{
+
+ MM_PFN_LOCK_ASSERT();
+
+ ASSERT (Support->RefCount != 0);
+ ASSERT (Support->ListEntry.Flink == NULL);
+ Support->RefCount -= 1;
+ if (Support->RefCount == 0) {
+ InsertTailList (&MmEventCountList.ListHead,
+ &Support->ListEntry);
+ MmEventCountList.Count += 1;
+ }
+ if ((Flush) && (MmEventCountList.Count > 4)) {
+ MiFlushEventCounter();
+ }
+ return;
+}
+
+
+VOID
+MiFlushEventCounter (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine examines the list of event counters and attempts
+ to free up to 10 (if there are more than 4).
+
+ It will release and reacquire the PFN lock when it frees the
+ event counters!
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, PFN lock held.
+
+--*/
+
+
+{
+ KIRQL OldIrql;
+ PEVENT_COUNTER Support[10];
+ ULONG i = 0;
+ PLIST_ENTRY NextEntry;
+
+ MM_PFN_LOCK_ASSERT();
+
+ while ((MmEventCountList.Count > 4) && (i < 10)) {
+ NextEntry = RemoveHeadList (&MmEventCountList.ListHead);
+ Support[i] = CONTAINING_RECORD (NextEntry,
+ EVENT_COUNTER,
+ ListEntry );
+ Support[i]->ListEntry.Flink = NULL;
+ i += 1;
+ MmEventCountList.Count -= 1;
+ }
+
+ if (i == 0) {
+ return;
+ }
+
+ OldIrql = APC_LEVEL;
+ UNLOCK_PFN (OldIrql);
+
+ do {
+ i -= 1;
+ ExFreePool(Support[i]);
+ } while (i > 0);
+
+ LOCK_PFN (OldIrql);
+
+ return;
+}
+
+
+BOOLEAN
+MmCanFileBeTruncated (
+ IN PSECTION_OBJECT_POINTERS SectionPointer,
+ IN PLARGE_INTEGER NewFileSize
+ )
+
+/*++
+
+Routine Description:
+
+ This routine does the following:
+
+ 1. Checks to see if a image section is in use for the file,
+ if so it returns FALSE.
+
+ 2. Checks to see if a user section exists for the file, if
+ it does, it checks to make sure the new file size is greater
+ than the size of the file, if not it returns FALSE.
+
+ 3. If no image section exists, and no user created data section
+ exists or the files size is greater, then TRUE is returned.
+
+Arguments:
+
+ SectionPointer - Supplies a pointer to the section object pointers
+ from the file object.
+
+ NewFileSize - Supplies a pointer to the size the file is getting set to.
+
+Return Value:
+
+ TRUE if the file can be truncated, FALSE if it cannot be.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ LARGE_INTEGER LocalOffset;
+ KIRQL OldIrql;
+
+ //
+ // Capture caller's file size, since we may modify it.
+ //
+
+ if (ARGUMENT_PRESENT(NewFileSize)) {
+
+ LocalOffset = *NewFileSize;
+ NewFileSize = &LocalOffset;
+ }
+
+ if (MmCanFileBeTruncatedInternal( SectionPointer, NewFileSize, &OldIrql )) {
+
+ UNLOCK_PFN (OldIrql);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+ULONG
+MmCanFileBeTruncatedInternal (
+ IN PSECTION_OBJECT_POINTERS SectionPointer,
+ IN PLARGE_INTEGER NewFileSize OPTIONAL,
+ OUT PKIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine does the following:
+
+ 1. Checks to see if a image section is in use for the file,
+ if so it returns FALSE.
+
+ 2. Checks to see if a user section exists for the file, if
+ it does, it checks to make sure the new file size is greater
+ than the size of the file, if not it returns FALSE.
+
+ 3. If no image section exists, and no user created data section
+ exists or the files size is greater, then TRUE is returned.
+
+Arguments:
+
+ SectionPointer - Supplies a pointer to the section object pointers
+ from the file object.
+
+ NewFileSize - Supplies a pointer to the size the file is getting set to.
+
+ PreviousIrql - If returning TRUE, returns Irql to use when unlocking
+ Pfn database.
+
+Return Value:
+
+ TRUE if the file can be truncated (PFN locked).
+ FALSE if it cannot be truncated (PFN not locked).
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ LARGE_INTEGER SegmentSize;
+ PCONTROL_AREA ControlArea;
+ PSUBSECTION Subsection;
+
+ if (!MmFlushImageSection (SectionPointer, MmFlushForWrite)) {
+ return FALSE;
+ }
+
+ LOCK_PFN (OldIrql);
+
+ ControlArea = (PCONTROL_AREA)(SectionPointer->DataSectionObject);
+
+ if (ControlArea != NULL) {
+
+ if (ControlArea->u.Flags.BeingCreated) {
+ goto UnlockAndReturn;
+ }
+
+ //
+ // If there are user references and the size is less than the
+ // size of the user view, don't allow the trucation.
+ //
+
+ if (ControlArea->NumberOfUserReferences != 0) {
+
+ //
+ // You cannot purge the entire section if there is a user
+ // reference.
+ //
+
+ if (!ARGUMENT_PRESENT(NewFileSize)) {
+ goto UnlockAndReturn;
+ }
+
+ //
+ // Locate last subsection and get total size.
+ //
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+ while (Subsection->NextSubsection != NULL) {
+ Subsection = Subsection->NextSubsection;
+ }
+
+ SegmentSize.QuadPart =
+ ((LONGLONG)Subsection->EndingSector << MMSECTOR_SHIFT) +
+ Subsection->u.SubsectionFlags.SectorEndOffset;
+
+ if (NewFileSize->QuadPart < SegmentSize.QuadPart) {
+ goto UnlockAndReturn;
+ }
+
+ //
+ // If there are mapped views, we will skip the last page
+ // of the section if the size passed in falls in that page.
+ // The caller (like Cc) may want to clear this fractional page.
+ //
+
+ SegmentSize.QuadPart += PAGE_SIZE - 1;
+ SegmentSize.LowPart &= ~(PAGE_SIZE - 1);
+ if (NewFileSize->QuadPart < SegmentSize.QuadPart) {
+ *NewFileSize = SegmentSize;
+ }
+ }
+ }
+
+ *PreviousIrql = OldIrql;
+ return TRUE;
+
+UnlockAndReturn:
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+}
+
+
+VOID
+MiRemoveUnusedSegments (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine removes unused segments (no section refernces,
+ no mapped views only PFN references that are in transition state).
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ PLIST_ENTRY NextEntry;
+ PCONTROL_AREA ControlArea;
+ NTSTATUS Status;
+
+ while (MmUnusedSegmentCount > MmUnusedSegmentCountGoal) {
+
+ //
+ // Eliminate some of the unused segments which are only
+ // kept in memory because they contain transition pages.
+ //
+
+ Status = STATUS_SUCCESS;
+
+ LOCK_PFN (OldIrql);
+
+ if (IsListEmpty(&MmUnusedSegmentList)) {
+
+ //
+ // There is nothing in the list, rewait.
+ //
+
+ ASSERT (MmUnusedSegmentCount == 0);
+ UNLOCK_PFN (OldIrql);
+ break;
+ }
+
+ NextEntry = RemoveHeadList(&MmUnusedSegmentList);
+ MmUnusedSegmentCount -= 1;
+
+ ControlArea = CONTAINING_RECORD( NextEntry,
+ CONTROL_AREA,
+ DereferenceList );
+#if DBG
+ if (MmDebug & MM_DBG_SECTIONS) {
+ DbgPrint("MM: cleaning segment %lx control %lx\n",
+ ControlArea->Segment, ControlArea);
+ }
+#endif
+
+ //
+ // Indicate this entry is not on any list.
+ //
+
+#if DBG
+ if (ControlArea->u.Flags.BeingDeleted == 0) {
+ if (ControlArea->u.Flags.Image) {
+ ASSERT (((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->ImageSectionObject)) != NULL);
+ } else {
+ ASSERT (((PCONTROL_AREA)(ControlArea->FilePointer->SectionObjectPointer->DataSectionObject)) != NULL);
+ }
+ }
+#endif //DBG
+
+ //
+ // Set the flink to NULL indicating this control area
+ // is not on any lists.
+ //
+
+ ControlArea->DereferenceList.Flink = NULL;
+
+ if ((ControlArea->NumberOfMappedViews == 0) &&
+ (ControlArea->NumberOfSectionReferences == 0) &&
+ (ControlArea->u.Flags.BeingDeleted == 0)) {
+
+ //
+ // If there is paging I/O in progress on this
+ // segment, just put this at the tail of the list, as
+ // the call to MiCleanSegment would block waiting
+ // for the I/O to complete. As this could tie up
+ // the thread, don't do it.
+ //
+
+ if (ControlArea->ModifiedWriteCount > 0) {
+ InsertTailList ( &MmUnusedSegmentList,
+ &ControlArea->DereferenceList);
+ MmUnusedSegmentCount += 1;
+ UNLOCK_PFN (OldIrql);
+ continue;
+ }
+
+ //
+ // Up the number of mapped views to prevent other threads
+ // from freeing this.
+ //
+
+ ControlArea->NumberOfMappedViews = 1;
+ UNLOCK_PFN (OldIrql);
+ {
+ PSUBSECTION Subsection;
+ PSUBSECTION LastSubsection;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ IO_STATUS_BLOCK IoStatus;
+
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+ PointerPte = &Subsection->SubsectionBase[0];
+ LastSubsection = Subsection;
+ while (LastSubsection->NextSubsection != NULL) {
+ LastSubsection = LastSubsection->NextSubsection;
+ }
+ LastPte = &LastSubsection->SubsectionBase
+ [LastSubsection->PtesInSubsection - 1];
+
+ //
+ // Preacquire the file to prevent deadlocks with other flushers
+ //
+
+ FsRtlAcquireFileForCcFlush (ControlArea->FilePointer);
+
+ Status = MiFlushSectionInternal (PointerPte,
+ LastPte,
+ Subsection,
+ LastSubsection,
+ FALSE,
+ &IoStatus);
+ //
+ // Now release the file
+ //
+
+ FsRtlReleaseFileForCcFlush (ControlArea->FilePointer);
+ }
+
+ LOCK_PFN (OldIrql);
+
+ if (!NT_SUCCESS(Status)) {
+ if ((Status == STATUS_FILE_LOCK_CONFLICT) ||
+ (ControlArea->u.Flags.Networked == 0)) {
+
+ //
+ // If an error occurs, don't flush this section, unless
+ // it's a networked file and the status is not
+ // LOCK_CONFLICT.
+ //
+
+ ControlArea->NumberOfMappedViews -= 1;
+ UNLOCK_PFN (OldIrql);
+ continue;
+ }
+ }
+
+ if (!((ControlArea->NumberOfMappedViews == 1) &&
+ (ControlArea->NumberOfSectionReferences == 0) &&
+ (ControlArea->u.Flags.BeingDeleted == 0))) {
+ ControlArea->NumberOfMappedViews -= 1;
+ UNLOCK_PFN (OldIrql);
+ continue;
+ }
+
+ ControlArea->u.Flags.BeingDeleted = 1;
+
+ //
+ // Don't let any pages be written by the modified
+ // page writer from this point on.
+ //
+
+ ControlArea->u.Flags.NoModifiedWriting = 1;
+ ASSERT (ControlArea->u.Flags.FilePointerNull == 0);
+ UNLOCK_PFN (OldIrql);
+ MiCleanSection (ControlArea);
+ } else {
+
+ //
+ // The segment was not eligible for deletion. Just
+ // leave it off the unused segment list and continue the
+ // loop.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ }
+
+ } //end while
+ return;
+}
diff --git a/private/ntos/mm/shutdown.c b/private/ntos/mm/shutdown.c
new file mode 100644
index 000000000..333d16e4f
--- /dev/null
+++ b/private/ntos/mm/shutdown.c
@@ -0,0 +1,366 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ shutdown.c
+
+Abstract:
+
+ This module contains the initialization for the memory management
+ system.
+
+Author:
+
+ Lou Perazzoli (loup) 21-Aug-1991
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+extern ULONG MmSystemShutdown;
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGELK,MmShutdownSystem)
+#endif
+
+ULONG MmZeroPageFile;
+
+
+
+BOOLEAN
+MmShutdownSystem (
+ IN BOOLEAN RebootPending
+ )
+
+/*++
+
+Routine Description:
+
+ This function performs the shutdown of memory management. This
+ is accomplished by writing out all modified pages which are
+ destined for files other than the paging file.
+
+Arguments:
+
+ RebootPending - Indicates whether or not a reboot is to be performed after the system
+ has been shut down. This parameter is ignored by this routine.
+
+Return Value:
+
+ TRUE if the pages were successfully written, FALSE otherwise.
+
+--*/
+
+{
+ ULONG ModifiedPage;
+ PMMPFN Pfn1;
+ PSUBSECTION Subsection;
+ PCONTROL_AREA ControlArea;
+ PULONG Page;
+ ULONG MdlHack[(sizeof(MDL)/4) + MM_MAXIMUM_WRITE_CLUSTER];
+ PMDL Mdl;
+ NTSTATUS Status;
+ KEVENT IoEvent;
+ IO_STATUS_BLOCK IoStatus;
+ KIRQL OldIrql;
+ LARGE_INTEGER StartingOffset;
+ ULONG count;
+ ULONG j, k;
+ ULONG first;
+ ULONG write;
+ PMMPAGING_FILE PagingFile;
+
+ UNREFERENCED_PARAMETER( RebootPending );
+
+ //
+ // Don't do this more than once.
+ //
+
+ if (!MmSystemShutdown) {
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ Mdl = (PMDL)&MdlHack;
+ Page = (PULONG)(Mdl + 1);
+
+ KeInitializeEvent (&IoEvent, NotificationEvent, FALSE);
+
+ MmInitializeMdl(Mdl,
+ NULL,
+ PAGE_SIZE);
+
+ Mdl->MdlFlags |= MDL_PAGES_LOCKED;
+
+ LOCK_PFN (OldIrql);
+
+ ModifiedPage = MmModifiedPageListHead.Flink;
+ while (ModifiedPage != MM_EMPTY_LIST) {
+
+ //
+ // There are modified pages.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (ModifiedPage);
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+
+ //
+ // This page is destined for a file.
+ //
+
+ Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte);
+ ControlArea = Subsection->ControlArea;
+ if ((!ControlArea->u.Flags.Image) &&
+ (!ControlArea->u.Flags.NoModifiedWriting)) {
+
+ MiUnlinkPageFromList (Pfn1);
+
+ //
+ // Issue the write.
+ //
+
+ Pfn1->u3.e1.Modified = 0;
+
+ //
+ // Up the reference count for the physical page as there
+ // is I/O in progress.
+ //
+
+ Pfn1->u3.e2.ReferenceCount += 1;
+
+ *Page = ModifiedPage;
+ ControlArea->NumberOfMappedViews += 1;
+ ControlArea->NumberOfPfnReferences += 1;
+
+ UNLOCK_PFN (OldIrql);
+
+ StartingOffset.QuadPart = MI_STARTING_OFFSET (Subsection,
+ Pfn1->PteAddress);
+
+ Mdl->StartVa = (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT);
+ KeClearEvent (&IoEvent);
+ Status = IoSynchronousPageWrite (
+ ControlArea->FilePointer,
+ Mdl,
+ &StartingOffset,
+ &IoEvent,
+ &IoStatus );
+
+ //
+ // Ignore all I/O failures - there is nothing that can be
+ // done at this point.
+ //
+
+ if (!NT_SUCCESS(Status)) {
+ KeSetEvent (&IoEvent, 0, FALSE);
+ }
+
+ Status = KeWaitForSingleObject (&IoEvent,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ &MmTwentySeconds);
+
+ if (Status == STATUS_TIMEOUT) {
+
+ //
+ // The write did not complete in 20 seconds, assume
+ // that the file systems are hung and return an
+ // error.
+ //
+
+ Pfn1->u3.e1.Modified = 1;
+ return(FALSE);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ LOCK_PFN (OldIrql);
+ MiDecrementReferenceCount (ModifiedPage);
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfPfnReferences -= 1;
+ if (ControlArea->NumberOfPfnReferences == 0) {
+
+ //
+ // This routine return with the PFN lock released!.
+ //
+
+ MiCheckControlArea (ControlArea, NULL, OldIrql);
+ LOCK_PFN (OldIrql);
+ }
+
+ //
+ // Restart scan at the front of the list.
+ //
+
+ ModifiedPage = MmModifiedPageListHead.Flink;
+ continue;
+ }
+ }
+ ModifiedPage = Pfn1->u1.Flink;
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // If a high number of modified pages still exist, start the
+ // modified page writer and wait for 5 seconds.
+ //
+
+ if (MmAvailablePages < (MmFreeGoal * 2)) {
+ LARGE_INTEGER FiveSeconds = {(ULONG)(-5 * 1000 * 1000 * 10), -1};
+
+ KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE);
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &FiveSeconds);
+ }
+
+ //
+ // Indicate to the modified page writer that the system has
+ // shutdown.
+ //
+
+ MmSystemShutdown = 1;
+
+ //
+ // Check to see if the paging file should be overwritten.
+ // Only free blocks are written.
+ //
+
+ if (MmZeroPageFile) {
+
+ //
+ // Get pages to complete the write request.
+ //
+
+ Mdl->StartVa = NULL;
+ j = 0;
+ Page = (PULONG)(Mdl + 1);
+
+ LOCK_PFN (OldIrql);
+
+ if (MmAvailablePages < (MmModifiedWriteClusterSize + 20)) {
+ UNLOCK_PFN(OldIrql);
+ return TRUE;
+ }
+
+ do {
+ *Page = MiRemoveZeroPage (j);
+ Pfn1 = MI_PFN_ELEMENT (*Page);
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->OriginalPte.u.Long = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+ Page += 1;
+ j += 1;
+ } while (j < MmModifiedWriteClusterSize);
+
+ k = 0;
+
+ while (k < MmNumberOfPagingFiles) {
+
+ PagingFile = MmPagingFile[k];
+
+ count = 0;
+ write = FALSE;
+
+ for (j = 1; j < PagingFile->Size; j++) {
+
+ if (RtlCheckBit (PagingFile->Bitmap, j) == 0) {
+
+ if (count == 0) {
+ first = j;
+ }
+ count += 1;
+ if (count == MmModifiedWriteClusterSize) {
+ write = TRUE;
+ }
+ } else {
+ if (count != 0) {
+
+ //
+ // Issue a write.
+ //
+
+ write = TRUE;
+ }
+ }
+
+ if ((j == (PagingFile->Size - 1)) &&
+ (count != 0)) {
+ write = TRUE;
+ }
+
+ if (write) {
+
+ UNLOCK_PFN (OldIrql);
+
+ StartingOffset.QuadPart = (LONGLONG)first << PAGE_SHIFT;
+ Mdl->ByteCount = count << PAGE_SHIFT;
+ KeClearEvent (&IoEvent);
+
+ Status = IoSynchronousPageWrite (
+ PagingFile->File,
+ Mdl,
+ &StartingOffset,
+ &IoEvent,
+ &IoStatus);
+
+ //
+ // Ignore all I/O failures - there is nothing that can be
+ // done at this point.
+ //
+
+ if (!NT_SUCCESS(Status)) {
+ KeSetEvent (&IoEvent, 0, FALSE);
+ }
+
+ Status = KeWaitForSingleObject (&IoEvent,
+ WrPageOut,
+ KernelMode,
+ FALSE,
+ &MmTwentySeconds);
+
+ if (Status == STATUS_TIMEOUT) {
+
+ //
+ // The write did not complete in 20 seconds, assume
+ // that the file systems are hung and return an
+ // error.
+ //
+
+ return(FALSE);
+ }
+
+ if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) {
+ MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl);
+ }
+
+ LOCK_PFN (OldIrql);
+ count = 0;
+ write = FALSE;
+ }
+ }
+ k += 1;
+ }
+ j = 0;
+ Page = (PULONG)(Mdl + 1);
+ do {
+ MiDecrementReferenceCount (*Page);
+ Page += 1;
+ j += 1;
+ } while (j < MmModifiedWriteClusterSize);
+ UNLOCK_PFN (OldIrql);
+ }
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ }
+ return TRUE;
+}
+
diff --git a/private/ntos/mm/sources.inc b/private/ntos/mm/sources.inc
new file mode 100644
index 000000000..db0eb6f07
--- /dev/null
+++ b/private/ntos/mm/sources.inc
@@ -0,0 +1,87 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+MAJORCOMP=ntos
+MINORCOMP=mm
+
+TARGETNAME=mm
+TARGETTYPE=LIBRARY
+
+INCLUDES=..;..\..\inc;..\..\ke
+MIPS_OPTIONS=-nodwalign
+GPSIZE=32
+
+MSC_WARNING_LEVEL=/W3 /WX
+
+C_DEFINES=$(C_DEFINES) -D_NTSYSTEM_
+
+SOURCES=..\acceschk.c \
+ ..\addrsup.c \
+ ..\allocpag.c \
+ ..\allocvm.c \
+ ..\checkpfn.c \
+ ..\checkpte.c \
+ ..\creasect.c \
+ ..\deleteva.c \
+ ..\dmpaddr.c \
+ ..\extsect.c \
+ ..\flushbuf.c \
+ ..\flushsec.c \
+ ..\forksup.c \
+ ..\freevm.c \
+ ..\iosup.c \
+ ..\lockvm.c \
+ ..\mapcache.c \
+ ..\mapview.c \
+ ..\miglobal.c \
+ ..\mmfault.c \
+ ..\mminit.c \
+ ..\mmsup.c \
+ ..\mmquota.c \
+ ..\modwrite.c \
+ ..\pagfault.c \
+ ..\pfndec.c \
+ ..\pfnlist.c \
+ ..\procsup.c \
+ ..\protect.c \
+ ..\querysec.c \
+ ..\queryvm.c \
+ ..\readwrt.c \
+ ..\sectsup.c \
+ ..\shutdown.c \
+ ..\sysload.c \
+ ..\sysptes.c \
+ ..\umapview.c \
+ ..\vadtree.c \
+ ..\wslist.c \
+ ..\wsmanage.c \
+ ..\wstree.c \
+ ..\wrtfault.c \
+ ..\zeropage.c
+
+PRECOMPILED_INCLUDE=..\mi.h
+PRECOMPILED_PCH=mi.pch
+PRECOMPILED_OBJ=mi.obj
+
+SOURCES_USED=..\sources.inc
diff --git a/private/ntos/mm/super.c b/private/ntos/mm/super.c
new file mode 100644
index 000000000..958b5211f
--- /dev/null
+++ b/private/ntos/mm/super.c
@@ -0,0 +1,651 @@
+/*++
+
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ super.c
+
+Abstract:
+
+ This module contains the routines which implement the SuperSection
+ object.
+
+Author:
+
+ Lou Perazzoli (loup) 4-Apr-92
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+#include "zwapi.h"
+
+
+VOID
+MiSuperSectionDelete (
+ PVOID Object
+ );
+
+BOOLEAN
+MiSuperSectionInitialization (
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiSuperSectionInitialization)
+#endif
+
+#define MM_MAX_SUPERSECTION_COUNT (32)
+
+POBJECT_TYPE MmSuperSectionObjectType;
+
+#define STATUS_TOO_MANY_SECTIONS ((NTSTATUS)0xC0033333)
+#define STATUS_INCOMPLETE_MAP ((NTSTATUS)0xC0033334)
+
+
+extern GENERIC_MAPPING MiSectionMapping;
+
+typedef struct _MMSUPER_SECTION {
+ ULONG NumberOfSections;
+ PSECTION SectionPointers[1];
+} MMSUPER_SECTION, *PMMSUPER_SECTION;
+
+
+NTSTATUS
+NtCreateSuperSection (
+ OUT PHANDLE SuperSectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
+ IN ULONG Count,
+ IN HANDLE SectionHandles[]
+ )
+
+/*++
+
+Routine Description:
+
+ This routine creates a super section object. A supersection
+ object consists a group of sections that are mapped as images.
+
+Arguments:
+
+ SuperSectionHandle - Returns a handle to the created supersection.
+
+ DesiredAccess - Supplies the desired access for the super section.
+
+ ObjectAttributes - Supplies the object attributes for the super
+ section.
+
+ Count - Supplies the number of sections contained in the section
+ handle array.
+
+ SectionHandles[] - Supplies the section handles to place into
+ the supersection.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+--*/
+
+{
+ NTSTATUS Status;
+ PSECTION Section;
+ PCONTROL_AREA ControlArea;
+ HANDLE CapturedHandle;
+ HANDLE CapturedHandles[MM_MAX_SUPERSECTION_COUNT];
+ PMMSUPER_SECTION SuperSection;
+ KPROCESSOR_MODE PreviousMode;
+ ULONG RefCount;
+ ULONG i;
+ KIRQL OldIrql;
+
+ if (Count > MM_MAX_SUPERSECTION_COUNT) {
+ return STATUS_TOO_MANY_SECTIONS;
+ }
+
+ try {
+
+ if (PreviousMode != KernelMode) {
+ ProbeForWriteHandle (SuperSectionHandle);
+ }
+
+ i= 0;
+ do {
+ CapturedHandles[i] = SectionHandles[i];
+ i += 1;
+ } while (i < Count);
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ Status = ObCreateObject (PreviousMode,
+ MmSuperSectionObjectType,
+ ObjectAttributes,
+ PreviousMode,
+ NULL,
+ sizeof(MMSUPER_SECTION) +
+ (sizeof(PSECTION) * (Count - 1)),
+ sizeof(MMSUPER_SECTION) +
+ (sizeof(PSECTION) * (Count - 1)),
+ 0,
+ (PVOID *)&SuperSection);
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ SuperSection->NumberOfSections = Count;
+
+ i = 0;
+ RefCount = 0;
+ do {
+
+ //
+ // Get a referenced pointer to the specified objects with
+ // the desired access.
+ //
+
+ Status = ObReferenceObjectByHandle(CapturedHandles[i],
+ DesiredAccess,
+ MmSectionObjectType,
+ PreviousMode,
+ (PVOID *)&Section,
+ NULL);
+
+ if (NT_SUCCESS(Status) != FALSE) {
+ if (Section->u.Flags.Image == 0) {
+
+ //
+ // This is not an image section, return an error.
+ //
+
+ Status = STATUS_SECTION_NOT_IMAGE;
+ goto ServiceFailed;
+ }
+ RefCount += 1;
+ SuperSection->SectionPointers[i] = Section;
+ } else {
+ goto ServiceFailed;
+ }
+
+ i += 1;
+ } while (i < Count);
+
+ i= 0;
+ do {
+
+ //
+ // For each section increment the number of section references
+ // count.
+ //
+
+ ControlArea = SuperSection->SectionPointers[i]->Segment->ControlArea;
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfSectionReferences += 1;
+ ControlArea->NumberOfUserReferences += 1;
+ UNLOCK_PFN (OldIrql);
+ i++;
+
+ } while (i < Count);
+
+
+ Status = ObInsertObject (SuperSection,
+ NULL,
+ DesiredAccess,
+ 0,
+ (PVOID *)NULL,
+ &CapturedHandle);
+
+ try {
+ *SuperSectionHandle = CapturedHandle;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return Status;
+ }
+ return Status;
+
+ServiceFailed:
+ while (RefCount > 0) {
+ RefCount -= 1;
+ ObDereferenceObject(SuperSection->SectionPointers[RefCount]);
+ }
+
+ //
+ // Delete the supersection object as it was never inserted into
+ // a handle table.
+ //
+
+ ObDereferenceObject (SuperSection);
+ return Status;
+}
+
+
+NTSTATUS
+NtOpenSuperSection (
+ OUT PHANDLE SuperSectionHandle,
+ IN ACCESS_MASK DesiredAccess,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL,
+ )
+
+/*++
+
+Routine Description:
+
+ This routine opens a super section object. A supersection
+ object consists a group of sections that are mapped as images.
+
+Arguments:
+
+ SuperSectionHandle - Returns a handle to the created supersection.
+
+ DesiredAccess - Supplies the desired access for the super section.
+
+ ObjectAttributes - Supplies the object attributes for the super
+ section.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+--*/
+
+{
+ HANDLE Handle;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+
+ //
+ // Get previous processor mode and probe output arguments if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ try {
+ ProbeForWriteHandle(SuperSectionHandle);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+ }
+
+ //
+ // Open handle to the super section object with the specified desired
+ // access.
+ //
+
+ Status = ObOpenObjectByName (ObjectAttributes,
+ MmSuperSectionObjectType,
+ PreviousMode,
+ NULL,
+ DesiredAccess,
+ NULL,
+ &Handle
+ );
+
+ try {
+ *SuperSectionHandle = Handle;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return Status;
+ }
+
+ return Status;
+}
+
+
+NTSTATUS
+NtMapViewOfSuperSection (
+ IN HANDLE SuperSectionHandle,
+ IN HANDLE ProcessHandle,
+ IN OUT PULONG Count,
+ OUT PVOID BaseAddress[],
+ OUT ULONG ViewSize[],
+ IN SECTION_INHERIT InheritDisposition,
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps into the specified process a view of each
+ section contained within the supersection.
+
+Arguments:
+
+ SuperSectionHandle - Supplies a handle to the supersection.
+
+ ProcessHandle - Supplies a handle to the process in which to
+ map the supersection's sections.
+
+ Count - Supplies the number of elements in the BaseAddress and
+ ViewSize arrays, returns the number of views actually
+ mapped.
+
+
+ BaseAddresses[] - Returns the base address of each view that was mapped.
+
+ ViewSize[] - Returns the view size of each view that was mapped.
+
+ InheritDisposition - Supplies the inherit disposition to be applied
+ to each section which is contained in the
+ super section.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+--*/
+
+{
+ NTSTATUS Status;
+ PVOID CapturedBases[MM_MAX_SUPERSECTION_COUNT];
+ ULONG CapturedViews[MM_MAX_SUPERSECTION_COUNT];
+ PMMSUPER_SECTION SuperSection;
+ KPROCESSOR_MODE PreviousMode;
+ ULONG i;
+ ULONG CapturedCount;
+ ULONG NumberMapped;
+ PEPROCESS Process;
+ LARGE_INTEGER LargeZero = {0,0};
+
+
+ PreviousMode = KeGetPreviousMode();
+
+ try {
+ ProbeForWriteUlong (Count);
+ CapturedCount = *Count;
+
+ if (PreviousMode != KernelMode) {
+ ProbeForWrite (BaseAddress,
+ sizeof(PVOID) * CapturedCount,
+ sizeof(PVOID));
+ ProbeForWrite (ViewSize,
+ sizeof(ULONG) * CapturedCount,
+ sizeof(ULONG));
+ }
+
+ } except (ExSystemExceptionFilter()) {
+
+ //
+ // If an exception occurs during the probe or capture
+ // of the initial values, then handle the exception and
+ // return the exception code as the status value.
+ //
+
+ return GetExceptionCode();
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ //
+ // Reference the supersection object.
+ //
+
+ Status = ObReferenceObjectByHandle ( SuperSectionHandle,
+ SECTION_MAP_EXECUTE,
+ MmSuperSectionObjectType,
+ PreviousMode,
+ (PVOID *)&SuperSection,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ ObDereferenceObject (Process);
+ return Status;
+ }
+
+ if (CapturedCount < SuperSection->NumberOfSections) {
+ ObDereferenceObject (Process);
+ ObDereferenceObject (SuperSection);
+ return STATUS_BUFFER_TOO_SMALL;
+ }
+
+ NumberMapped = 0;
+ do {
+
+ //
+ // For each section within the supersection, map a view in
+ // the specified process.
+ //
+
+ Status = MmMapViewOfSection (SuperSection->SectionPointers[i],
+ Process,
+ &CapturedBases[i],
+ 0,
+ 0,
+ &LargeZero,
+ &CapturedViews[i],
+ InheritDisposition,
+ 0,
+ PAGE_EXECUTE);
+
+ if (NT_SUCCESS (Status) == FALSE) {
+ Status = STATUS_INCOMPLETE_MAP;
+ break;
+ }
+ NumberMapped++;
+ } while (NumberMapped < SuperSection->NumberOfSections);
+
+ //
+ // Dereference the supersection and the process.
+ //
+
+ ObDereferenceObject (SuperSection);
+ ObDereferenceObject (Process);
+
+ try {
+ *Count = NumberMapped;
+ i = 0;
+
+ do {
+
+ //
+ // Store the captured view base and sizes for each section
+ // that was mapped.
+ //
+
+ BaseAddress[i] = CapturedBases[i];
+ ViewSize[i] = CapturedViews[i];
+
+ i++;
+ } while (i < NumberMapped);
+
+ } except (ExSystemExceptionFilter()) {
+ NOTHING;
+ }
+
+ return(Status);
+}
+
+
+
+#if 0
+
+NTSTATUS
+NtQuerySuperSection (
+ IN HANDLE SuperSectionHandle,
+ IN SUPERSECTION_INFORMATION_CLASS SectionInformationClass,
+ OUT PVOID SuperSectionInformation,
+ IN ULONG SuperSectionInformationLength,
+ OUT PULONG ReturnLength OPTIONAL
+ )
+
+Routine Description:
+
+ This function returns information about an opened supersection object.
+ This function provides the capability to determine the basic section
+ information about each section in the supersection or the image
+ information about each section in the supersection.
+
+Arguments:
+
+ SuperSectionHandle - Supplies an open handle to a section object.
+
+ SectionInformationClass - The section information class about
+ which to retrieve information.
+
+ SuperSectionInformation - A pointer to a buffer that receives the
+ specified information. The format and content of the buffer
+ depend on the specified section class.
+
+
+ SuperSectionInformationLength - Specifies the length in bytes of the
+ section information buffer.
+
+ ReturnLength - An optional pointer which, if specified, receives
+ the number of bytes placed in the section information buffer.
+
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+
+#endif //0
+
+VOID
+MiSuperSectionDelete (
+ PVOID Object
+ )
+
+/*++
+
+Routine Description:
+
+
+ This routine is called by the object management procedures whenever
+ the last reference to a super section object has been removed.
+ This routine dereferences the associated segment objects.
+
+Arguments:
+
+ Object - a pointer to the body of the supersection object.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMSUPER_SECTION SuperSection;
+ PCONTROL_AREA ControlArea;
+ KIRQL OldIrql;
+ ULONG i = 0;
+
+ SuperSection = (PMMSUPER_SECTION)Object;
+
+ do {
+
+ //
+ // For each section increment the number of section references
+ // count.
+ //
+
+ ControlArea = SuperSection->SectionPointers[i]->Segment->ControlArea;
+
+ LOCK_PFN (OldIrql);
+ ControlArea->NumberOfSectionReferences -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+ UNLOCK_PFN (OldIrql);
+ ObDereferenceObject (SuperSection->SectionPointers[i]);
+ i++;
+
+ } while (i < SuperSection->NumberOfSections);
+
+ return;
+}
+
+BOOLEAN
+MiSuperSectionInitialization (
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the section object type descriptor at system
+ initialization and stores the address of the object type descriptor
+ in global storage.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ TRUE - Initialization was successful.
+
+ FALSE - Initialization Failed.
+
+
+
+--*/
+
+{
+ OBJECT_TYPE_INITIALIZER ObjectTypeInitializer;
+ UNICODE_STRING TypeName;
+
+ //
+ // Initialize the common fields of the Object Type Initializer record
+ //
+
+ RtlZeroMemory( &ObjectTypeInitializer, sizeof( ObjectTypeInitializer ) );
+ ObjectTypeInitializer.Length = sizeof( ObjectTypeInitializer );
+ ObjectTypeInitializer.InvalidAttributes = OBJ_OPENLINK;
+ ObjectTypeInitializer.GenericMapping = MiSectionMapping;
+ ObjectTypeInitializer.PoolType = PagedPool;
+
+ //
+ // Initialize string descriptor.
+ //
+
+ RtlInitUnicodeString (&TypeName, L"SuperSection");
+
+ //
+ // Create the section object type descriptor
+ //
+
+ ObjectTypeInitializer.ValidAccessMask = SECTION_ALL_ACCESS;
+ ObjectTypeInitializer.DeleteProcedure = MiSuperSectionDelete;
+ ObjectTypeInitializer.GenericMapping = MiSectionMapping;
+ ObjectTypeInitializer.UseDefaultObject = TRUE;
+ if ( !NT_SUCCESS(ObCreateObjectType(&TypeName,
+ &ObjectTypeInitializer,
+ (PSECURITY_DESCRIPTOR) NULL,
+ &MmSuperSectionObjectType
+ )) ) {
+ return FALSE;
+ }
+
+ return TRUE;
+
+}
diff --git a/private/ntos/mm/sysload.c b/private/ntos/mm/sysload.c
new file mode 100644
index 000000000..d06fe29fa
--- /dev/null
+++ b/private/ntos/mm/sysload.c
@@ -0,0 +1,2533 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ sysload.c
+
+Abstract:
+
+ This module contains the code to load DLLs into the system
+ portion of the address space and calls the DLL at it's
+ initialization entry point.
+
+Author:
+
+ Lou Perazzoli 21-May-1991
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+extern ULONG MmPagedPoolCommit;
+
+KMUTANT MmSystemLoadLock;
+
+ULONG MmTotalSystemDriverPages;
+
+ULONG MmDriverCommit;
+
+//
+// ****** temporary ******
+//
+// Define reference to external spin lock.
+//
+// ****** temporary ******
+//
+
+extern KSPIN_LOCK PsLoadedModuleSpinLock;
+
+#if DBG
+ULONG MiPagesConsumed;
+#endif
+
+ULONG
+CacheImageSymbols(
+ IN PVOID ImageBase
+ );
+
+NTSTATUS
+MiResolveImageReferences(
+ PVOID ImageBase,
+ IN PUNICODE_STRING ImageFileDirectory,
+ OUT PCHAR *MissingProcedureName,
+ OUT PWSTR *MissingDriverName
+ );
+
+NTSTATUS
+MiSnapThunk(
+ IN PVOID DllBase,
+ IN PVOID ImageBase,
+ IN OUT PIMAGE_THUNK_DATA Thunk,
+ IN PIMAGE_EXPORT_DIRECTORY ExportDirectory,
+ IN ULONG ExportSize,
+ IN BOOLEAN SnapForwarder,
+ OUT PCHAR *MissingProcedureName
+ );
+
+NTSTATUS
+MiLoadImageSection (
+ IN PSECTION SectionPointer,
+ OUT PVOID *ImageBase
+ );
+
+VOID
+MiEnablePagingOfDriver (
+ IN PVOID ImageHandle
+ );
+
+VOID
+MiSetPagingOfDriver (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte
+ );
+
+PVOID
+MiLookupImageSectionByName (
+ IN PVOID Base,
+ IN BOOLEAN MappedAsImage,
+ IN PCHAR SectionName,
+ OUT PULONG SectionSize
+ );
+
+NTSTATUS
+MiUnloadSystemImageByForce (
+ IN ULONG NumberOfPtes,
+ IN PVOID ImageBase
+ );
+
+
+NTSTATUS
+MmCheckSystemImage(
+ IN HANDLE ImageFileHandle
+ );
+
+LONG
+MiMapCacheExceptionFilter (
+ OUT PNTSTATUS Status,
+ IN PEXCEPTION_POINTERS ExceptionPointer
+ );
+
+VOID
+MiSetImageProtectWrite (
+ IN PSEGMENT Segment
+ );
+
+ULONG
+MiSetProtectionOnTransitionPte (
+ IN PMMPTE PointerPte,
+ IN ULONG ProtectionMask
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,MmCheckSystemImage)
+#pragma alloc_text(PAGE,MmLoadSystemImage)
+#pragma alloc_text(PAGE,MiResolveImageReferences)
+#pragma alloc_text(PAGE,MiSnapThunk)
+#pragma alloc_text(PAGE,MiUnloadSystemImageByForce)
+#pragma alloc_text(PAGE,MiEnablePagingOfDriver)
+#pragma alloc_text(PAGE,MmPageEntireDriver)
+#pragma alloc_text(PAGE,MiSetImageProtectWrite)
+
+#if !defined(NT_UP)
+#pragma alloc_text(PAGE,MmVerifyImageIsOkForMpUse)
+#endif // NT_UP
+
+#pragma alloc_text(PAGELK,MiLoadImageSection)
+#pragma alloc_text(PAGELK,MmFreeDriverInitialization)
+#pragma alloc_text(PAGELK,MmUnloadSystemImage)
+#pragma alloc_text(PAGELK,MiSetPagingOfDriver)
+#pragma alloc_text(PAGELK,MmResetDriverPaging)
+#endif
+
+
+
+NTSTATUS
+MmLoadSystemImage (
+ IN PUNICODE_STRING ImageFileName,
+ OUT PVOID *ImageHandle,
+ OUT PVOID *ImageBaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This routine reads the image pages from the specified section into
+ the system and returns the address of the DLL's header.
+
+ At successful completion, the Section is referenced so it remains
+ until the system image is unloaded.
+
+Arguments:
+
+ ImageName - Supplies the unicode name of the image to load.
+
+ ImageFileName - Supplies the full path name (including the image name)
+ of the image to load.
+
+ Section - Returns a pointer to the referenced section object of the
+ image that was loaded.
+
+ ImageBaseAddress - Returns the image base within the system.
+
+Return Value:
+
+ Status of the load operation.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ NTSTATUS Status;
+ PSECTION SectionPointer;
+ PIMAGE_NT_HEADERS NtHeaders;
+ UNICODE_STRING BaseName;
+ UNICODE_STRING BaseDirectory;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ HANDLE FileHandle = (HANDLE)0;
+ HANDLE SectionHandle;
+ IO_STATUS_BLOCK IoStatus;
+ CHAR NameBuffer[ MAXIMUM_FILENAME_LENGTH ];
+ PLIST_ENTRY NextEntry;
+ ULONG NumberOfPtes;
+ PCHAR MissingProcedureName;
+ PWSTR MissingDriverName;
+
+ PAGED_CODE();
+
+ KeWaitForSingleObject (&MmSystemLoadLock,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+#if DBG
+ if ( NtGlobalFlag & FLG_SHOW_LDR_SNAPS ) {
+ DbgPrint( "MM:SYSLDR Loading %wZ\n", ImageFileName );
+ }
+#endif
+ MissingProcedureName = NULL;
+ MissingDriverName = NULL;
+
+ //
+ // Attempt to open the driver image itself. If this fails, then the
+ // driver image cannot be located, so nothing else matters.
+ //
+
+ InitializeObjectAttributes( &ObjectAttributes,
+ ImageFileName,
+ OBJ_CASE_INSENSITIVE,
+ NULL,
+ NULL );
+
+ Status = ZwOpenFile( &FileHandle,
+ FILE_EXECUTE,
+ &ObjectAttributes,
+ &IoStatus,
+ FILE_SHARE_READ | FILE_SHARE_DELETE,
+ 0 );
+
+ if (!NT_SUCCESS( Status )) {
+
+ //
+ // Don't raise hard error status for file not found.
+ //
+
+ goto return2;
+ }
+
+ Status = MmCheckSystemImage(FileHandle);
+ if ( Status == STATUS_IMAGE_CHECKSUM_MISMATCH || Status == STATUS_IMAGE_MP_UP_MISMATCH) {
+ goto return1;
+ }
+
+ if (ImageFileName->Buffer[0] == OBJ_NAME_PATH_SEPARATOR) {
+ PWCHAR p;
+ ULONG l;
+
+ p = &ImageFileName->Buffer[ImageFileName->Length>>1];
+ while (*(p-1) != OBJ_NAME_PATH_SEPARATOR) {
+ p--;
+ }
+ l = &ImageFileName->Buffer[ImageFileName->Length>>1] - p;
+ l *= sizeof(WCHAR);
+ BaseName.Length = (USHORT)l;
+ BaseName.Buffer = p;
+ } else {
+ BaseName.Length = ImageFileName->Length;
+ BaseName.Buffer = ImageFileName->Buffer;
+ }
+
+ BaseName.MaximumLength = BaseName.Length;
+ BaseDirectory = *ImageFileName;
+ BaseDirectory.Length -= BaseName.Length;
+ BaseDirectory.MaximumLength = BaseDirectory.Length;
+
+ //
+ // Check to see if this name already exists in the loader database.
+ //
+
+ NextEntry = PsLoadedModuleList.Flink;
+ while (NextEntry != &PsLoadedModuleList) {
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+ if (RtlEqualString((PSTRING)ImageFileName,
+ (PSTRING)&DataTableEntry->FullDllName,
+ TRUE)) {
+
+ *ImageHandle = DataTableEntry;
+ *ImageBaseAddress = DataTableEntry->DllBase;
+ DataTableEntry->LoadCount = +1;
+ Status = STATUS_IMAGE_ALREADY_LOADED;
+ goto return2;
+ }
+
+ NextEntry = NextEntry->Flink;
+ }
+
+ //
+ // Now attempt to create an image section for the file. If this fails,
+ // then the driver file is not an image.
+ //
+
+ Status = ZwCreateSection (&SectionHandle,
+ SECTION_ALL_ACCESS,
+ (POBJECT_ATTRIBUTES) NULL,
+ (PLARGE_INTEGER) NULL,
+ PAGE_EXECUTE,
+ SEC_IMAGE,
+ FileHandle );
+ if (!NT_SUCCESS( Status )) {
+ goto return1;
+ }
+
+ //
+ // Now reference the section handle.
+ //
+
+ Status = ObReferenceObjectByHandle (SectionHandle,
+ SECTION_MAP_EXECUTE,
+ MmSectionObjectType,
+ KernelMode,
+ (PVOID *) &SectionPointer,
+ (POBJECT_HANDLE_INFORMATION) NULL );
+
+ ZwClose (SectionHandle);
+ if (!NT_SUCCESS (Status)) {
+ goto return1;
+ }
+
+ if ((SectionPointer->Segment->BasedAddress == (PVOID)MmSystemSpaceViewStart) &&
+ (SectionPointer->Segment->ControlArea->NumberOfMappedViews == 0)) {
+ NumberOfPtes = 0;
+ Status = MmMapViewInSystemSpace (SectionPointer,
+ ImageBaseAddress,
+ &NumberOfPtes);
+ if ((NT_SUCCESS( Status ) &&
+ (*ImageBaseAddress == SectionPointer->Segment->BasedAddress))) {
+ SectionPointer->Segment->ControlArea->u.Flags.ImageMappedInSystemSpace = 1;
+ NumberOfPtes = (NumberOfPtes + 1) >> PAGE_SHIFT;
+ MiSetImageProtectWrite (SectionPointer->Segment);
+ goto BindImage;
+ }
+ }
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+ Status = MiLoadImageSection (SectionPointer, ImageBaseAddress);
+
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ NumberOfPtes = SectionPointer->Segment->TotalNumberOfPtes;
+ ObDereferenceObject (SectionPointer);
+ SectionPointer = (PVOID)0xFFFFFFFF;
+
+ if (!NT_SUCCESS( Status )) {
+ goto return1;
+ }
+
+ //
+ // Apply the fixups to the section and resolve its image references.
+ //
+
+ try {
+ Status = (NTSTATUS)LdrRelocateImage(*ImageBaseAddress,
+ "SYSLDR",
+ (ULONG)STATUS_SUCCESS,
+ (ULONG)STATUS_CONFLICTING_ADDRESSES,
+ (ULONG)STATUS_INVALID_IMAGE_FORMAT
+ );
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ KdPrint(("MM:sysload - LdrRelocateImage failed status %lx\n",
+ Status));
+ }
+ if ( !NT_SUCCESS(Status) ) {
+
+ //
+ // Unload the system image and dereference the section.
+ //
+
+ MiUnloadSystemImageByForce (NumberOfPtes, *ImageBaseAddress);
+ goto return1;
+ }
+
+BindImage:
+
+ try {
+ MissingProcedureName = NameBuffer;
+ Status = MiResolveImageReferences(*ImageBaseAddress,
+ &BaseDirectory,
+ &MissingProcedureName,
+ &MissingDriverName
+ );
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ KdPrint(("MM:sysload - ResolveImageReferences failed status %lx\n",
+ Status));
+ }
+ if ( !NT_SUCCESS(Status) ) {
+ MiUnloadSystemImageByForce (NumberOfPtes, *ImageBaseAddress);
+ goto return1;
+ }
+
+#if DBG
+ if (NtGlobalFlag & FLG_SHOW_LDR_SNAPS) {
+ KdPrint (("MM:loaded driver - consumed %ld. pages\n",MiPagesConsumed));
+ }
+#endif
+
+ //
+ // Allocate a data table entry for structured exception handling.
+ //
+
+ DataTableEntry = ExAllocatePoolWithTag (NonPagedPool,
+ sizeof(LDR_DATA_TABLE_ENTRY) +
+ BaseName.Length + sizeof(UNICODE_NULL),
+ 'dLmM');
+ if (DataTableEntry == NULL) {
+ MiUnloadSystemImageByForce (NumberOfPtes, *ImageBaseAddress);
+ Status = STATUS_INSUFFICIENT_RESOURCES;
+ goto return1;
+ }
+
+ //
+ // Initialize the address of the DLL image file header and the entry
+ // point address.
+ //
+
+ NtHeaders = RtlImageNtHeader(*ImageBaseAddress);
+
+ DataTableEntry->DllBase = *ImageBaseAddress;
+ DataTableEntry->EntryPoint =
+ (PVOID)((ULONG)*ImageBaseAddress + NtHeaders->OptionalHeader.AddressOfEntryPoint);
+ DataTableEntry->SizeOfImage = NumberOfPtes << PAGE_SHIFT;
+ DataTableEntry->CheckSum = NtHeaders->OptionalHeader.CheckSum;
+ DataTableEntry->SectionPointer = (PVOID)SectionPointer;
+
+ //
+ // Store the DLL name.
+ //
+
+ DataTableEntry->BaseDllName.Buffer = (PWSTR)(DataTableEntry + 1);
+ DataTableEntry->BaseDllName.Length = BaseName.Length;
+ DataTableEntry->BaseDllName.MaximumLength = BaseName.Length;
+ RtlMoveMemory (DataTableEntry->BaseDllName.Buffer,
+ BaseName.Buffer,
+ BaseName.Length );
+ DataTableEntry->BaseDllName.Buffer[BaseName.Length/sizeof(WCHAR)] = UNICODE_NULL;
+
+ DataTableEntry->FullDllName.Buffer = ExAllocatePoolWithTag (PagedPool,
+ ImageFileName->Length + sizeof(UNICODE_NULL),
+ 'TDmM');
+ if (DataTableEntry->FullDllName.Buffer == NULL) {
+
+ //
+ // Pool could not be allocated, just set the length to 0.
+ //
+
+ DataTableEntry->FullDllName.Length = 0;
+ DataTableEntry->FullDllName.MaximumLength = 0;
+ } else {
+ DataTableEntry->FullDllName.Length = ImageFileName->Length;
+ DataTableEntry->FullDllName.MaximumLength = ImageFileName->Length;
+ RtlMoveMemory (DataTableEntry->FullDllName.Buffer,
+ ImageFileName->Buffer,
+ ImageFileName->Length);
+ DataTableEntry->FullDllName.Buffer[ImageFileName->Length/sizeof(WCHAR)] = UNICODE_NULL;
+ }
+
+ //
+ // Initialize the flags, load count, and insert the data table entry
+ // in the loaded module list.
+ //
+
+ DataTableEntry->Flags = LDRP_ENTRY_PROCESSED;
+ DataTableEntry->LoadCount = 1;
+
+ if (CacheImageSymbols (*ImageBaseAddress)) {
+
+ //
+ // TEMP TEMP TEMP rip out when debugger converted
+ //
+
+ ANSI_STRING AnsiName;
+ UNICODE_STRING UnicodeName;
+
+ //
+ // \SystemRoot is 11 characters in length
+ //
+ if (ImageFileName->Length > (11 * sizeof( WCHAR )) &&
+ !_wcsnicmp( ImageFileName->Buffer, L"\\SystemRoot", 11 )
+ ) {
+ UnicodeName = *ImageFileName;
+ UnicodeName.Buffer += 11;
+ UnicodeName.Length -= (11 * sizeof( WCHAR ));
+ sprintf( NameBuffer, "%ws%wZ", &SharedUserData->NtSystemRoot[2], &UnicodeName );
+ } else {
+ sprintf( NameBuffer, "%wZ", &BaseName );
+ }
+ RtlInitString( &AnsiName, NameBuffer );
+ DbgLoadImageSymbols( &AnsiName,
+ *ImageBaseAddress,
+ (ULONG)-1
+ );
+ DataTableEntry->Flags |= LDRP_DEBUG_SYMBOLS_LOADED;
+ }
+
+ //
+ // Acquire the loaded module list resource and insert this entry
+ // into the list.
+ //
+
+ KeEnterCriticalRegion();
+ ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE);
+
+ ExInterlockedInsertTailList(&PsLoadedModuleList,
+ &DataTableEntry->InLoadOrderLinks,
+ &PsLoadedModuleSpinLock);
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+
+ //
+ // Flush the instruction cache on all systems in the configuration.
+ //
+
+ KeSweepIcache (TRUE);
+ *ImageHandle = DataTableEntry;
+ Status = STATUS_SUCCESS;
+
+ if (SectionPointer == (PVOID)0xFFFFFFFF) {
+ MiEnablePagingOfDriver (DataTableEntry);
+ }
+
+return1:
+
+ if (FileHandle) {
+ ZwClose (FileHandle);
+ }
+ if (!NT_SUCCESS(Status)) {
+ ULONG ErrorParameters[ 3 ];
+ ULONG NumberOfParameters;
+ ULONG UnicodeStringParameterMask;
+ ULONG ErrorResponse;
+ ANSI_STRING AnsiString;
+ UNICODE_STRING ProcedureName;
+ UNICODE_STRING DriverName;
+
+ //
+ // Hard error time. A driver could not be loaded.
+ //
+
+ KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE);
+ ErrorParameters[ 0 ] = (ULONG)ImageFileName;
+ NumberOfParameters = 1;
+ UnicodeStringParameterMask = 1;
+
+ RtlInitUnicodeString( &ProcedureName, NULL );
+ if (Status == STATUS_DRIVER_ORDINAL_NOT_FOUND ||
+ Status == STATUS_DRIVER_ENTRYPOINT_NOT_FOUND ||
+ Status == STATUS_PROCEDURE_NOT_FOUND
+ ) {
+ NumberOfParameters = 3;
+ UnicodeStringParameterMask = 0x5;
+ RtlInitUnicodeString( &DriverName, MissingDriverName );
+ ErrorParameters[ 2 ] = (ULONG)&DriverName;
+ if ((ULONG)MissingProcedureName & 0xFFFF0000) {
+ //
+ // If not an ordinal, pass as unicode string
+ //
+
+ RtlInitAnsiString( &AnsiString, MissingProcedureName );
+ RtlAnsiStringToUnicodeString( &ProcedureName, &AnsiString, TRUE );
+ ErrorParameters[ 1 ] = (ULONG)&ProcedureName;
+ UnicodeStringParameterMask |= 0x2;
+ } else {
+ //
+ // Just pass ordinal values as is.
+ //
+
+ ErrorParameters[ 1 ] = (ULONG)MissingProcedureName;
+ }
+ } else {
+ NumberOfParameters = 2;
+ ErrorParameters[ 1 ] = (ULONG)Status;
+ Status = STATUS_DRIVER_UNABLE_TO_LOAD;
+ }
+
+ ZwRaiseHardError (Status,
+ NumberOfParameters,
+ UnicodeStringParameterMask,
+ ErrorParameters,
+ OptionOk,
+ &ErrorResponse);
+
+ if (ProcedureName.Buffer != NULL) {
+ RtlFreeUnicodeString( &ProcedureName );
+ }
+ return Status;
+ }
+
+return2:
+
+ KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE);
+ return Status;
+}
+
+
+NTSTATUS
+MiLoadImageSection (
+ IN PSECTION SectionPointer,
+ OUT PVOID *ImageBaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This routine loads the specified image into the kernel part of the
+ address space.
+
+Arguments:
+
+ Section - Supplies the section object for the image.
+
+ ImageBaseAddress - Returns the address that the image header is at.
+
+Return Value:
+
+ Status of the operation.
+
+--*/
+
+{
+ ULONG PagesRequired = 0;
+ PMMPTE ProtoPte;
+ PMMPTE FirstPte;
+ PMMPTE LastPte;
+ PMMPTE PointerPte;
+ PEPROCESS Process;
+ ULONG NumberOfPtes;
+ MMPTE PteContents;
+ MMPTE TempPte;
+ PMMPFN Pfn1;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+ PVOID UserVa;
+ PVOID SystemVa;
+ NTSTATUS Status;
+ NTSTATUS ExceptionStatus;
+ PVOID Base;
+ ULONG ViewSize;
+ LARGE_INTEGER SectionOffset;
+ BOOLEAN LoadSymbols;
+
+ //
+ // Calculate the number of pages required to load this image.
+ //
+
+ ProtoPte = SectionPointer->Segment->PrototypePte;
+ NumberOfPtes = SectionPointer->Segment->TotalNumberOfPtes;
+
+ while (NumberOfPtes != 0) {
+ PteContents = *ProtoPte;
+
+ if ((PteContents.u.Hard.Valid == 1) ||
+ (PteContents.u.Soft.Protection != MM_NOACCESS)) {
+ PagesRequired += 1;
+ }
+ NumberOfPtes -= 1;
+ ProtoPte += 1;
+ }
+
+ //
+ // See if ample pages exist to load this image.
+ //
+
+#if DBG
+ MiPagesConsumed = PagesRequired;
+#endif
+
+ LOCK_PFN (OldIrql);
+
+ if (MmResidentAvailablePages <= (LONG)PagesRequired) {
+ UNLOCK_PFN (OldIrql);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+ MmResidentAvailablePages -= PagesRequired;
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Reserve the necessary system address space.
+ //
+
+ FirstPte = MiReserveSystemPtes (SectionPointer->Segment->TotalNumberOfPtes,
+ SystemPteSpace,
+ 0,
+ 0,
+ FALSE );
+
+ if (FirstPte == NULL) {
+ LOCK_PFN (OldIrql);
+ MmResidentAvailablePages += PagesRequired;
+ UNLOCK_PFN (OldIrql);
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ //
+ // Map a view into the user portion of the address space.
+ //
+
+ Process = PsGetCurrentProcess();
+
+ ZERO_LARGE (SectionOffset);
+ Base = NULL;
+ ViewSize = 0;
+ if ( NtGlobalFlag & FLG_ENABLE_KDEBUG_SYMBOL_LOAD ) {
+ LoadSymbols = TRUE;
+ NtGlobalFlag &= ~FLG_ENABLE_KDEBUG_SYMBOL_LOAD;
+ } else {
+ LoadSymbols = FALSE;
+ }
+ Status = MmMapViewOfSection ( SectionPointer,
+ Process,
+ &Base,
+ 0,
+ 0,
+ &SectionOffset,
+ &ViewSize,
+ ViewUnmap,
+ 0,
+ PAGE_EXECUTE);
+
+ if ( LoadSymbols ) {
+ NtGlobalFlag |= FLG_ENABLE_KDEBUG_SYMBOL_LOAD;
+ }
+ if (Status == STATUS_IMAGE_MACHINE_TYPE_MISMATCH) {
+ Status = STATUS_INVALID_IMAGE_FORMAT;
+ }
+
+ if (!NT_SUCCESS(Status)) {
+ LOCK_PFN (OldIrql);
+ MmResidentAvailablePages += PagesRequired;
+ UNLOCK_PFN (OldIrql);
+ MiReleaseSystemPtes (FirstPte,
+ SectionPointer->Segment->TotalNumberOfPtes,
+ SystemPteSpace);
+
+ return Status;
+ }
+
+ //
+ // Allocate a physical page(s) and copy the image data.
+ //
+
+ ProtoPte = SectionPointer->Segment->PrototypePte;
+ NumberOfPtes = SectionPointer->Segment->TotalNumberOfPtes;
+ PointerPte = FirstPte;
+ SystemVa = MiGetVirtualAddressMappedByPte (PointerPte);
+ *ImageBaseAddress = SystemVa;
+ UserVa = Base;
+ TempPte = ValidKernelPte;
+
+ while (NumberOfPtes != 0) {
+ PteContents = *ProtoPte;
+ if ((PteContents.u.Hard.Valid == 1) ||
+ (PteContents.u.Soft.Protection != MM_NOACCESS)) {
+
+ LOCK_PFN (OldIrql);
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+ PageFrameIndex = MiRemoveAnyPage(
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+ PointerPte->u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+ MiInitializePfn (PageFrameIndex, PointerPte, 1);
+ UNLOCK_PFN (OldIrql);
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+ LastPte = PointerPte;
+#if DBG
+
+ {
+ PMMPFN Pfn;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn->u1.WsIndex == 0);
+ }
+#endif //DBG
+
+ try {
+
+ RtlMoveMemory (SystemVa, UserVa, PAGE_SIZE);
+
+ } except (MiMapCacheExceptionFilter (&ExceptionStatus,
+ GetExceptionInformation())) {
+
+ //
+ // An exception occurred, unmap the view and
+ // return the error to the caller.
+ //
+
+ ProtoPte = FirstPte;
+ LOCK_PFN (OldIrql);
+ while (ProtoPte <= PointerPte) {
+ if (ProtoPte->u.Hard.Valid == 1) {
+
+ //
+ // Delete the page.
+ //
+
+ PageFrameIndex = ProtoPte->u.Hard.PageFrameNumber;
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (PageFrameIndex);
+
+ *ProtoPte = ZeroPte;
+ }
+ ProtoPte += 1;
+ }
+
+ MmResidentAvailablePages += PagesRequired;
+ UNLOCK_PFN (OldIrql);
+ MiReleaseSystemPtes (FirstPte,
+ SectionPointer->Segment->TotalNumberOfPtes,
+ SystemPteSpace);
+ Status = MmUnmapViewOfSection (Process, Base);
+ ASSERT (NT_SUCCESS (Status));
+
+ return ExceptionStatus;
+ }
+
+ } else {
+
+ //
+ // PTE is no access.
+ //
+
+ *PointerPte = ZeroKernelPte;
+ }
+
+ NumberOfPtes -= 1;
+ ProtoPte += 1;
+ PointerPte += 1;
+ SystemVa = (PVOID)((ULONG)SystemVa + PAGE_SIZE);
+ UserVa = (PVOID)((ULONG)UserVa + PAGE_SIZE);
+ }
+
+ Status = MmUnmapViewOfSection (Process, Base);
+ ASSERT (NT_SUCCESS (Status));
+
+ //
+ // Indicate that this section has been loaded into the system.
+ //
+
+ SectionPointer->Segment->SystemImageBase = *ImageBaseAddress;
+
+ //
+ // Charge commitment for the number of pages that were used by
+ // the driver.
+ //
+
+ MiChargeCommitmentCantExpand (PagesRequired, TRUE);
+ MmDriverCommit += PagesRequired;
+ return Status;
+}
+
+VOID
+MmFreeDriverInitialization (
+ IN PVOID ImageHandle
+ )
+
+/*++
+
+Routine Description:
+
+ This routine removes the pages that relocate and debug information from
+ the address space of the driver.
+
+ NOTE: This routine looks at the last sections defined in the image
+ header and if that section is marked as DISCARDABLE in the
+ characteristics, it is removed from the image. This means
+ that all discardable sections at the end of the driver are
+ deleted.
+
+Arguments:
+
+ SectionObject - Supplies the section object for the image.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ KIRQL OldIrql;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ ULONG NumberOfPtes;
+ PVOID Base;
+ ULONG i;
+ PIMAGE_NT_HEADERS NtHeaders;
+ PIMAGE_SECTION_HEADER NtSection;
+ PIMAGE_SECTION_HEADER FoundSection;
+ ULONG PagesDeleted;
+ ULONG ResidentPages;
+
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ DataTableEntry = (PLDR_DATA_TABLE_ENTRY)ImageHandle;
+ Base = DataTableEntry->DllBase;
+
+ NumberOfPtes = DataTableEntry->SizeOfImage >> PAGE_SHIFT;
+ LastPte = MiGetPteAddress (Base) + NumberOfPtes;
+
+ NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(Base);
+
+ NtSection = (PIMAGE_SECTION_HEADER)((ULONG)NtHeaders +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeaders->FileHeader.SizeOfOptionalHeader
+ );
+
+ NtSection += NtHeaders->FileHeader.NumberOfSections;
+
+ FoundSection = NULL;
+ for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i++) {
+ NtSection -= 1;
+ if ((NtSection->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0) {
+ FoundSection = NtSection;
+ } else {
+
+ //
+ // There was a non discardable section between the this
+ // section and the last non discardable section, don't
+ // discard this section and don't look any more.
+ //
+
+ break;
+ }
+ }
+
+ if (FoundSection != NULL) {
+
+ PointerPte = MiGetPteAddress (ROUND_TO_PAGES (
+ (ULONG)Base + FoundSection->VirtualAddress));
+ NumberOfPtes = LastPte - PointerPte;
+
+ PagesDeleted = MiDeleteSystemPagableVm (PointerPte,
+ NumberOfPtes,
+ ZeroKernelPte.u.Long,
+ &ResidentPages);
+
+ MmResidentAvailablePages += PagesDeleted;
+ MiReturnCommitment (PagesDeleted);
+ MmDriverCommit -= PagesDeleted;
+#if DBG
+ MiPagesConsumed -= PagesDeleted;
+#endif
+ }
+
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
+VOID
+MiEnablePagingOfDriver (
+ IN PVOID ImageHandle
+ )
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PMMPTE LastPte;
+ PMMPTE PointerPte;
+ PVOID Base;
+ ULONG i;
+ PIMAGE_NT_HEADERS NtHeaders;
+ PIMAGE_SECTION_HEADER FoundSection;
+
+ //
+ // Don't page kernel mode code if customer does not want it paged.
+ //
+
+ if (MmDisablePagingExecutive) {
+ return;
+ }
+
+ //
+ // If the driver has pagable code, make it paged.
+ //
+
+ DataTableEntry = (PLDR_DATA_TABLE_ENTRY)ImageHandle;
+ Base = DataTableEntry->DllBase;
+
+ NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(Base);
+
+ FoundSection = (PIMAGE_SECTION_HEADER)((ULONG)NtHeaders +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeaders->FileHeader.SizeOfOptionalHeader
+ );
+
+ i = NtHeaders->FileHeader.NumberOfSections;
+ PointerPte = NULL;
+
+ while (i > 0) {
+#if DBG
+ if ((*(PULONG)FoundSection->Name == 'tini') ||
+ (*(PULONG)FoundSection->Name == 'egap')) {
+ DbgPrint("driver %wZ has lower case sections (init or pagexxx)\n",
+ &DataTableEntry->FullDllName);
+ }
+#endif //DBG
+
+ //
+ // Mark as pagable any section which starts with the
+ // first 4 characters PAGE or .eda (for the .edata section).
+ //
+
+ if ((*(PULONG)FoundSection->Name == 'EGAP') ||
+ (*(PULONG)FoundSection->Name == 'ade.')) {
+
+ //
+ // This section is pagable, save away the start and end.
+ //
+
+ if (PointerPte == NULL) {
+
+ //
+ // Previous section was NOT pagable, get the start address.
+ //
+
+ PointerPte = MiGetPteAddress (ROUND_TO_PAGES (
+ (ULONG)Base + FoundSection->VirtualAddress));
+ }
+ LastPte = MiGetPteAddress ((ULONG)Base +
+ FoundSection->VirtualAddress +
+ (NtHeaders->OptionalHeader.SectionAlignment - 1) +
+ (FoundSection->SizeOfRawData - PAGE_SIZE));
+
+ } else {
+
+ //
+ // This section is not pagable, if the previous section was
+ // pagable, enable it.
+ //
+
+ if (PointerPte != NULL) {
+ MiSetPagingOfDriver (PointerPte, LastPte);
+ PointerPte = NULL;
+ }
+ }
+ i -= 1;
+ FoundSection += 1;
+ }
+ if (PointerPte != NULL) {
+ MiSetPagingOfDriver (PointerPte, LastPte);
+ }
+ return;
+}
+
+VOID
+MiSetPagingOfDriver (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine marks the specified range of PTEs as pagable.
+
+Arguments:
+
+ PointerPte - Supplies the starting PTE.
+
+ LastPte - Supplies the ending PTE.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PVOID Base;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn;
+ MMPTE TempPte;
+ MMPTE PreviousPte;
+ KIRQL OldIrql1;
+ KIRQL OldIrql;
+
+ PAGED_CODE ();
+
+ if (MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(PointerPte))) {
+
+ //
+ // No need to lock physical addresses.
+ //
+
+ return;
+ }
+
+ //
+ // Lock this routine into memory.
+ //
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ LOCK_SYSTEM_WS (OldIrql1);
+ LOCK_PFN (OldIrql);
+
+ Base = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ while (PointerPte <= LastPte) {
+
+ //
+ // Check to make sure this PTE has not already been
+ // made pagable (or deleted). It is pagable if it
+ // is not valid, or if the PFN database wsindex element
+ // is non zero.
+ //
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn->u2.ShareCount == 1);
+
+ //
+ // Original PTE may need to be set for drivers loaded
+ // via osldr.
+ //
+
+ if (Pfn->OriginalPte.u.Long == 0) {
+ Pfn->OriginalPte.u.Long = MM_KERNEL_DEMAND_ZERO_PTE;
+ }
+
+ if (Pfn->u1.WsIndex == 0) {
+
+ TempPte = *PointerPte;
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn->OriginalPte.u.Soft.Protection);
+
+ PreviousPte.u.Flush = KeFlushSingleTb (Base,
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn);
+
+ //
+ // Flush the translation buffer and decrement the number of valid
+ // PTEs within the containing page table page. Note that for a
+ // private page, the page table page is still needed because the
+ // page is in transiton.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+ MmResidentAvailablePages += 1;
+ MmTotalSystemDriverPages += 1;
+ }
+ }
+ Base = (PVOID)((PCHAR)Base + PAGE_SIZE);
+ PointerPte += 1;
+ }
+
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_SYSTEM_WS (OldIrql1);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
+
+
+VOID
+MiLockPagesOfDriver (
+ IN PMMPTE PointerPte,
+ IN PMMPTE LastPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine marks the specified range of PTEs as NONpagable.
+
+Arguments:
+
+ PointerPte - Supplies the starting PTE.
+
+ LastPte - Supplies the ending PTE.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PVOID Base;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn;
+ MMPTE TempPte;
+ KIRQL OldIrql;
+ KIRQL OldIrql1;
+
+ PAGED_CODE ();
+
+ //
+ // Lock this routine in memory.
+ //
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ LOCK_SYSTEM_WS (OldIrql1);
+ LOCK_PFN (OldIrql);
+
+ Base = MiGetVirtualAddressMappedByPte (PointerPte);
+
+ while (PointerPte <= LastPte) {
+
+ //
+ // Check to make sure this PTE has not already been
+ // made pagable (or deleted). It is pagable if it
+ // is not valid, or if the PFN database wsindex element
+ // is non zero.
+ //
+
+ if (PointerPte->u.Hard.Valid == 1) {
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT (Pfn->u2.ShareCount == 1);
+
+ if (Pfn->u1.WsIndex == 0) {
+
+ TempPte = *PointerPte;
+
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn->OriginalPte.u.Soft.Protection);
+
+ KeFlushSingleTb (Base,
+ TRUE,
+ TRUE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ //
+ // Flush the translation buffer and decrement the number of valid
+ // PTEs within the containing page table page. Note that for a
+ // private page, the page table page is still needed because the
+ // page is in transiton.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+ Base = (PVOID)((PCHAR)Base + PAGE_SIZE);
+ PointerPte += 1;
+ MmResidentAvailablePages += 1;
+ MmTotalSystemDriverPages++;
+ }
+ }
+ }
+
+ UNLOCK_PFN (OldIrql);
+ UNLOCK_SYSTEM_WS (OldIrql1);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return;
+}
+
+
+PVOID
+MmPageEntireDriver (
+ IN PVOID AddressWithinSection
+ )
+
+/*++
+
+Routine Description:
+
+ This routine allows a driver to page out all of its code and
+ data regardless of the attributes of the various image sections.
+
+ Note, this routine can be called multiple times with no
+ intervening calls to MmResetDriverPaging.
+
+Arguments:
+
+ AddressWithinSection - Supplies an address within the driver, e.g.
+ DriverEntry.
+
+Return Value:
+
+ Base address of driver.
+
+Environment:
+
+ Kernel mode, APC_LEVEL or below.
+
+--*/
+
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PMMPTE FirstPte;
+ PMMPTE LastPte;
+ PVOID BaseAddress;
+
+ //
+ // Don't page kernel mode code if disabled via registry.
+ //
+
+ DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, FALSE);
+ if ((DataTableEntry->SectionPointer != (PVOID)0xffffffff) ||
+ (MmDisablePagingExecutive)) {
+
+ //
+ // Driver is mapped as image, always pagable.
+ //
+
+ return DataTableEntry->DllBase;
+ }
+ BaseAddress = DataTableEntry->DllBase;
+ FirstPte = MiGetPteAddress (DataTableEntry->DllBase);
+ LastPte = (FirstPte - 1) + (DataTableEntry->SizeOfImage >> PAGE_SHIFT);
+ MiSetPagingOfDriver (FirstPte, LastPte);
+
+ return BaseAddress;
+}
+
+
+VOID
+MmResetDriverPaging (
+ IN PVOID AddressWithinSection
+ )
+
+/*++
+
+Routine Description:
+
+ This routines resets the driver paging to what the image specified.
+ Hence image sections such as the IAT, .text, .data will be locked
+ down in memory.
+
+ Note, there is no requirement that MmPageEntireDriver was called.
+
+Arguments:
+
+ AddressWithinSection - Supplies an address within the driver, e.g.
+ DriverEntry.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC_LEVEL or below.
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PMMPTE LastPte;
+ PMMPTE PointerPte;
+ PVOID Base;
+ ULONG i;
+ PIMAGE_NT_HEADERS NtHeaders;
+ PIMAGE_SECTION_HEADER FoundSection;
+ KIRQL OldIrql;
+
+ PAGED_CODE();
+
+ //
+ // Don't page kernel mode code if disabled via registry.
+ //
+
+ if (MmDisablePagingExecutive) {
+ return;
+ }
+
+ if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) {
+ return;
+ }
+
+ //
+ // If the driver has pagable code, make it paged.
+ //
+
+ DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, FALSE);
+
+ if (DataTableEntry->SectionPointer != (PVOID)0xFFFFFFFF) {
+
+ //
+ // Driver is mapped by image hence already paged.
+ //
+
+ return;
+ }
+
+ Base = DataTableEntry->DllBase;
+
+ NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(Base);
+
+ FoundSection = (PIMAGE_SECTION_HEADER)((ULONG)NtHeaders +
+ sizeof(ULONG) +
+ sizeof(IMAGE_FILE_HEADER) +
+ NtHeaders->FileHeader.SizeOfOptionalHeader
+ );
+
+ i = NtHeaders->FileHeader.NumberOfSections;
+ PointerPte = NULL;
+
+ while (i > 0) {
+#if DBG
+ if ((*(PULONG)FoundSection->Name == 'tini') ||
+ (*(PULONG)FoundSection->Name == 'egap')) {
+ DbgPrint("driver %wZ has lower case sections (init or pagexxx)\n",
+ &DataTableEntry->FullDllName);
+ }
+#endif //DBG
+
+ //
+ // Don't lock down code for sections marked as discardable or
+ // sections marked with the first 4 characters PAGE or .eda
+ // (for the .edata section) or INIT.
+ //
+
+ if (((FoundSection->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0) ||
+ (*(PULONG)FoundSection->Name == 'EGAP') ||
+ (*(PULONG)FoundSection->Name == 'ade.') ||
+ (*(PULONG)FoundSection->Name == 'TINI')) {
+
+ NOTHING;
+
+ } else {
+
+ //
+ // This section is nonpagable.
+ //
+
+ PointerPte = MiGetPteAddress (
+ (ULONG)Base + FoundSection->VirtualAddress);
+ LastPte = MiGetPteAddress ((ULONG)Base +
+ FoundSection->VirtualAddress +
+ (FoundSection->SizeOfRawData - 1));
+ ASSERT (PointerPte <= LastPte);
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ LOCK_PFN (OldIrql);
+ MiLockCode (PointerPte, LastPte, MM_LOCK_BY_NONPAGE);
+ UNLOCK_PFN (OldIrql);
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ }
+ i -= 1;
+ FoundSection += 1;
+ }
+ return;
+}
+
+NTSTATUS
+MiUnloadSystemImageByForce (
+ IN ULONG NumberOfPtes,
+ IN PVOID ImageBase
+ )
+
+{
+ LDR_DATA_TABLE_ENTRY DataTableEntry;
+
+ RtlZeroMemory (&DataTableEntry, sizeof(LDR_DATA_TABLE_ENTRY));
+
+ DataTableEntry.DllBase = ImageBase;
+ DataTableEntry.SizeOfImage = NumberOfPtes << PAGE_SHIFT;
+
+ return MmUnloadSystemImage ((PVOID)&DataTableEntry);
+}
+
+
+NTSTATUS
+MmUnloadSystemImage (
+ IN PVOID ImageHandle
+ )
+
+/*++
+
+Routine Description:
+
+ This routine unloads a previously loaded system image and returns
+ the allocated resources.
+
+Arguments:
+
+ Section - Supplies a pointer to the section object of the image to unload.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PMMPTE FirstPte;
+ ULONG PagesRequired;
+ ULONG ResidentPages;
+ PMMPTE PointerPte;
+ ULONG NumberOfPtes;
+ KIRQL OldIrql;
+ PVOID BasedAddress;
+
+ KeWaitForSingleObject (&MmSystemLoadLock,
+ WrVirtualMemory,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ DataTableEntry = (PLDR_DATA_TABLE_ENTRY)ImageHandle;
+ BasedAddress = DataTableEntry->DllBase;
+
+ //
+ // Unload symbols from debugger.
+ //
+
+ if (DataTableEntry->Flags & LDRP_DEBUG_SYMBOLS_LOADED) {
+
+ //
+ // TEMP TEMP TEMP rip out when debugger converted
+ //
+
+ ANSI_STRING AnsiName;
+ NTSTATUS Status;
+
+ Status = RtlUnicodeStringToAnsiString( &AnsiName,
+ &DataTableEntry->BaseDllName,
+ TRUE );
+
+ if (NT_SUCCESS( Status)) {
+ DbgUnLoadImageSymbols( &AnsiName,
+ BasedAddress,
+ (ULONG)-1);
+ RtlFreeAnsiString( &AnsiName );
+ }
+ }
+
+ FirstPte = MiGetPteAddress (BasedAddress);
+ PointerPte = FirstPte;
+ NumberOfPtes = DataTableEntry->SizeOfImage >> PAGE_SHIFT;
+
+ PagesRequired = MiDeleteSystemPagableVm (PointerPte,
+ NumberOfPtes,
+ ZeroKernelPte.u.Long,
+ &ResidentPages);
+
+ LOCK_PFN (OldIrql);
+ MmResidentAvailablePages += ResidentPages;
+ UNLOCK_PFN (OldIrql);
+ MiReleaseSystemPtes (FirstPte,
+ NumberOfPtes,
+ SystemPteSpace);
+ MiReturnCommitment (PagesRequired);
+ MmDriverCommit -= PagesRequired;
+
+ //
+ // Search the loaded module list for the data table entry that describes
+ // the DLL that was just unloaded. It is possible an entry is not in the
+ // list if a failure occured at a point in loading the DLL just before
+ // the data table entry was generated.
+ //
+
+ if (DataTableEntry->InLoadOrderLinks.Flink != NULL) {
+ KeEnterCriticalRegion();
+ ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE);
+
+ ExAcquireSpinLock (&PsLoadedModuleSpinLock, &OldIrql);
+
+ RemoveEntryList(&DataTableEntry->InLoadOrderLinks);
+ ExReleaseSpinLock (&PsLoadedModuleSpinLock, OldIrql);
+ if (DataTableEntry->FullDllName.Buffer != NULL) {
+ ExFreePool (DataTableEntry->FullDllName.Buffer);
+ }
+ ExFreePool((PVOID)DataTableEntry);
+
+ ExReleaseResource (&PsLoadedModuleResource);
+ KeLeaveCriticalRegion();
+ }
+ MmUnlockPagableImageSection(ExPageLockHandle);
+
+ KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE);
+ return STATUS_SUCCESS;
+}
+
+
+NTSTATUS
+MiResolveImageReferences (
+ PVOID ImageBase,
+ IN PUNICODE_STRING ImageFileDirectory,
+ OUT PCHAR *MissingProcedureName,
+ OUT PWSTR *MissingDriverName
+ )
+
+/*++
+
+Routine Description:
+
+ This routine resolves the references from the newly loaded driver
+ to the kernel, hal and other drivers.
+
+Arguments:
+
+ ImageBase - Supplies the address of which the image header resides.
+
+ ImageFileDirectory - Supplies the directory to load referenced DLLs.
+
+Return Value:
+
+ TBS
+
+--*/
+
+{
+
+ PVOID ImportBase;
+ ULONG ImportSize;
+ PIMAGE_IMPORT_DESCRIPTOR ImportDescriptor;
+ NTSTATUS st;
+ ULONG ExportSize;
+ PIMAGE_EXPORT_DIRECTORY ExportDirectory;
+ PIMAGE_THUNK_DATA Thunk;
+ PSZ ImportName;
+ PLIST_ENTRY NextEntry;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ANSI_STRING AnsiString;
+ UNICODE_STRING ImportDescriptorName_U;
+ UNICODE_STRING DllToLoad;
+ PVOID Section;
+ PVOID BaseAddress;
+ ULONG LinkWin32k = 0;
+ ULONG LinkNonWin32k = 0;
+
+ PAGED_CODE();
+
+ ImportDescriptor = (PIMAGE_IMPORT_DESCRIPTOR)RtlImageDirectoryEntryToData(
+ ImageBase,
+ TRUE,
+ IMAGE_DIRECTORY_ENTRY_IMPORT,
+ &ImportSize);
+
+ if (ImportDescriptor) {
+
+ //
+ // We do not support bound images in the kernel
+ //
+
+ if (ImportDescriptor->TimeDateStamp == (ULONG)-1) {
+#if DBG
+ KeBugCheckEx (BOUND_IMAGE_UNSUPPORTED,
+ (ULONG)ImportDescriptor,
+ (ULONG)ImageBase,
+ (ULONG)ImageFileDirectory,
+ (ULONG)ImportSize);
+#else
+ return (STATUS_PROCEDURE_NOT_FOUND);
+#endif
+ }
+
+ while (ImportDescriptor->Name && ImportDescriptor->FirstThunk) {
+
+ ImportName = (PSZ)((ULONG)ImageBase + ImportDescriptor->Name);
+
+ //
+ // A driver can link with win32k.sys if and only if it is a GDI
+ // driver.
+ // Also display drivers can only link to win32k.sys (and lego ...).
+ //
+ // So if we get a driver that links to win32k.sys and has more
+ // than one set of imports, we will fail to load it.
+ //
+
+ LinkWin32k = LinkWin32k |
+ (!_strnicmp(ImportName, "win32k", sizeof("win32k") - 1));
+
+ //
+ // We don't want to count coverage, win32k and irt (lego) since
+ // display drivers CAN link against these.
+ //
+
+ LinkNonWin32k = LinkNonWin32k |
+ ((_strnicmp(ImportName, "win32k", sizeof("win32k") - 1)) &&
+ (_strnicmp(ImportName, "coverage", sizeof("coverage") - 1)) &&
+ (_strnicmp(ImportName, "irt", sizeof("irt") - 1)));
+
+
+ if (LinkNonWin32k && LinkWin32k) {
+ return (STATUS_PROCEDURE_NOT_FOUND);
+ }
+
+ if ((!_strnicmp(ImportName, "ntdll", sizeof("ntdll") - 1)) ||
+ (!_strnicmp(ImportName, "winsrv", sizeof("winsrv") - 1)) ||
+ (!_strnicmp(ImportName, "advapi32", sizeof("advapi32") - 1)) ||
+ (!_strnicmp(ImportName, "kernel32", sizeof("kernel32") - 1)) ||
+ (!_strnicmp(ImportName, "user32", sizeof("user32") - 1)) ||
+ (!_strnicmp(ImportName, "gdi32", sizeof("gdi32") - 1)) ) {
+
+ return (STATUS_PROCEDURE_NOT_FOUND);
+ }
+
+ReCheck:
+ RtlInitAnsiString(&AnsiString, ImportName);
+ st = RtlAnsiStringToUnicodeString(&ImportDescriptorName_U,
+ &AnsiString,
+ TRUE);
+ if (!NT_SUCCESS(st)) {
+ return st;
+ }
+
+ NextEntry = PsLoadedModuleList.Flink;
+ ImportBase = NULL;
+ while (NextEntry != &PsLoadedModuleList) {
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+ if (RtlEqualString((PSTRING)&ImportDescriptorName_U,
+ (PSTRING)&DataTableEntry->BaseDllName,
+ TRUE
+ )) {
+ ImportBase = DataTableEntry->DllBase;
+ break;
+ }
+ NextEntry = NextEntry->Flink;
+ }
+
+ if (!ImportBase) {
+
+ //
+ // The DLL name was not located, attempt to load this dll.
+ //
+
+ DllToLoad.MaximumLength = ImportDescriptorName_U.Length +
+ ImageFileDirectory->Length +
+ (USHORT)sizeof(WCHAR);
+
+ DllToLoad.Buffer = ExAllocatePoolWithTag (NonPagedPool,
+ DllToLoad.MaximumLength,
+ 'TDmM');
+
+ if (DllToLoad.Buffer == NULL) {
+ RtlFreeUnicodeString( &ImportDescriptorName_U );
+ return STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ DllToLoad.Length = ImageFileDirectory->Length;
+ RtlMoveMemory (DllToLoad.Buffer,
+ ImageFileDirectory->Buffer,
+ ImageFileDirectory->Length);
+
+ RtlAppendStringToString ((PSTRING)&DllToLoad,
+ (PSTRING)&ImportDescriptorName_U);
+
+ st = MmLoadSystemImage (&DllToLoad,
+ &Section,
+ &BaseAddress);
+
+ ExFreePool (DllToLoad.Buffer);
+ if (!NT_SUCCESS(st)) {
+ RtlFreeUnicodeString( &ImportDescriptorName_U );
+ return st;
+ }
+ goto ReCheck;
+ }
+
+ RtlFreeUnicodeString( &ImportDescriptorName_U );
+ *MissingDriverName = DataTableEntry->BaseDllName.Buffer;
+
+ ExportDirectory = (PIMAGE_EXPORT_DIRECTORY)RtlImageDirectoryEntryToData(
+ ImportBase,
+ TRUE,
+ IMAGE_DIRECTORY_ENTRY_EXPORT,
+ &ExportSize
+ );
+
+ if (!ExportDirectory) {
+ return STATUS_DRIVER_ENTRYPOINT_NOT_FOUND;
+ }
+
+ //
+ // Walk through the IAT and snap all the thunks.
+ //
+
+ if ( (Thunk = ImportDescriptor->FirstThunk) ) {
+ Thunk = (PIMAGE_THUNK_DATA)((ULONG)ImageBase + (ULONG)Thunk);
+ while (Thunk->u1.AddressOfData) {
+ st = MiSnapThunk(ImportBase,
+ ImageBase,
+ Thunk++,
+ ExportDirectory,
+ ExportSize,
+ FALSE,
+ MissingProcedureName
+ );
+ if (!NT_SUCCESS(st) ) {
+ return st;
+ }
+ }
+ }
+
+ ImportDescriptor++;
+ }
+ }
+ return STATUS_SUCCESS;
+}
+
+
+NTSTATUS
+MiSnapThunk(
+ IN PVOID DllBase,
+ IN PVOID ImageBase,
+ IN OUT PIMAGE_THUNK_DATA Thunk,
+ IN PIMAGE_EXPORT_DIRECTORY ExportDirectory,
+ IN ULONG ExportSize,
+ IN BOOLEAN SnapForwarder,
+ OUT PCHAR *MissingProcedureName
+ )
+
+/*++
+
+Routine Description:
+
+ This function snaps a thunk using the specified Export Section data.
+ If the section data does not support the thunk, then the thunk is
+ partially snapped (Dll field is still non-null, but snap address is
+ set).
+
+Arguments:
+
+ DllBase - Base if DLL being snapped to
+
+ ImageBase - Base of image that contains the thunks to snap.
+
+ Thunk - On input, supplies the thunk to snap. When successfully
+ snapped, the function field is set to point to the address in
+ the DLL, and the DLL field is set to NULL.
+
+ ExportDirectory - Supplies the Export Section data from a DLL.
+
+ SnapForwarder - determine if the snap is for a forwarder, and therefore
+ Address of Data is already setup.
+
+Return Value:
+
+
+ STATUS_SUCCESS or STATUS_DRIVER_ENTRYPOINT_NOT_FOUND or
+ STATUS_DRIVER_ORDINAL_NOT_FOUND
+
+--*/
+
+{
+
+ BOOLEAN Ordinal;
+ USHORT OrdinalNumber;
+ PULONG NameTableBase;
+ PUSHORT NameOrdinalTableBase;
+ PULONG Addr;
+ USHORT HintIndex;
+ ULONG High;
+ ULONG Low;
+ ULONG Middle;
+ LONG Result;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ //
+ // Determine if snap is by name, or by ordinal
+ //
+
+ Ordinal = (BOOLEAN)IMAGE_SNAP_BY_ORDINAL(Thunk->u1.Ordinal);
+
+ if (Ordinal && !SnapForwarder) {
+
+ OrdinalNumber = (USHORT)(IMAGE_ORDINAL(Thunk->u1.Ordinal) -
+ ExportDirectory->Base);
+
+ *MissingProcedureName = (PCHAR)(ULONG)OrdinalNumber;
+
+ } else {
+
+ //
+ // Change AddressOfData from an RVA to a VA.
+ //
+
+ if (!SnapForwarder) {
+ Thunk->u1.AddressOfData = (PIMAGE_IMPORT_BY_NAME)((ULONG)ImageBase +
+ (ULONG)Thunk->u1.AddressOfData);
+ }
+
+ strncpy( *MissingProcedureName,
+ &Thunk->u1.AddressOfData->Name[0],
+ MAXIMUM_FILENAME_LENGTH - 1
+ );
+
+ //
+ // Lookup Name in NameTable
+ //
+
+ NameTableBase = (PULONG)((ULONG)DllBase + (ULONG)ExportDirectory->AddressOfNames);
+ NameOrdinalTableBase = (PUSHORT)((ULONG)DllBase + (ULONG)ExportDirectory->AddressOfNameOrdinals);
+
+ //
+ // Before dropping into binary search, see if
+ // the hint index results in a successful
+ // match. If the hint index is zero, then
+ // drop into binary search.
+ //
+
+ HintIndex = Thunk->u1.AddressOfData->Hint;
+ if ((ULONG)HintIndex < ExportDirectory->NumberOfNames &&
+ !strcmp((PSZ)Thunk->u1.AddressOfData->Name,
+ (PSZ)((ULONG)DllBase + NameTableBase[HintIndex]))) {
+ OrdinalNumber = NameOrdinalTableBase[HintIndex];
+
+ } else {
+
+ //
+ // Lookup the import name in the name table using a binary search.
+ //
+
+ Low = 0;
+ High = ExportDirectory->NumberOfNames - 1;
+
+ while (High >= Low) {
+
+ //
+ // Compute the next probe index and compare the import name
+ // with the export name entry.
+ //
+
+ Middle = (Low + High) >> 1;
+ Result = strcmp(&Thunk->u1.AddressOfData->Name[0],
+ (PCHAR)((ULONG)DllBase + NameTableBase[Middle]));
+
+ if (Result < 0) {
+ High = Middle - 1;
+
+ } else if (Result > 0) {
+ Low = Middle + 1;
+
+ } else {
+ break;
+ }
+ }
+
+ //
+ // If the high index is less than the low index, then a matching
+ // table entry was not found. Otherwise, get the ordinal number
+ // from the ordinal table.
+ //
+
+ if (High < Low) {
+ return (STATUS_DRIVER_ENTRYPOINT_NOT_FOUND);
+ } else {
+ OrdinalNumber = NameOrdinalTableBase[Middle];
+ }
+ }
+ }
+
+ //
+ // If OrdinalNumber is not within the Export Address Table,
+ // then DLL does not implement function. Snap to LDRP_BAD_DLL.
+ //
+
+ if ((ULONG)OrdinalNumber >= ExportDirectory->NumberOfFunctions) {
+ Status = STATUS_DRIVER_ORDINAL_NOT_FOUND;
+
+ } else {
+
+ Addr = (PULONG)((ULONG)DllBase + (ULONG)ExportDirectory->AddressOfFunctions);
+ Thunk->u1.Function = (PULONG)((ULONG)DllBase + Addr[OrdinalNumber]);
+
+ Status = STATUS_SUCCESS;
+
+ if ( ((ULONG)Thunk->u1.Function > (ULONG)ExportDirectory) &&
+ ((ULONG)Thunk->u1.Function < ((ULONG)ExportDirectory + ExportSize)) ) {
+
+ UNICODE_STRING UnicodeString;
+ ANSI_STRING ForwardDllName;
+
+ PLIST_ENTRY NextEntry;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG ExportSize;
+ PIMAGE_EXPORT_DIRECTORY ExportDirectory;
+
+ Status = STATUS_DRIVER_ENTRYPOINT_NOT_FOUND;
+
+ //
+ // Include the dot in the length so we can do prefix later on.
+ //
+
+ ForwardDllName.Buffer = (PCHAR)Thunk->u1.Function;
+ ForwardDllName.Length = strchr(ForwardDllName.Buffer, '.') -
+ ForwardDllName.Buffer + 1;
+ ForwardDllName.MaximumLength = ForwardDllName.Length;
+
+ if (NT_SUCCESS(RtlAnsiStringToUnicodeString(&UnicodeString,
+ &ForwardDllName,
+ TRUE))) {
+
+ NextEntry = PsLoadedModuleList.Flink;
+
+ while (NextEntry != &PsLoadedModuleList) {
+
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ //
+ // We have to do a case INSENSITIVE comparison for
+ // forwarder because the linker just took what is in the
+ // def file, as opposed to looking in the exporting
+ // image for the name.
+ // we alos use the prefix function to ignore the .exe or
+ // .sys or .dll at the end.
+ //
+
+ if (RtlPrefixString((PSTRING)&UnicodeString,
+ (PSTRING)&DataTableEntry->BaseDllName,
+ TRUE)) {
+
+ ExportDirectory = (PIMAGE_EXPORT_DIRECTORY)
+ RtlImageDirectoryEntryToData(DataTableEntry->DllBase,
+ TRUE,
+ IMAGE_DIRECTORY_ENTRY_EXPORT,
+ &ExportSize);
+
+ if (ExportDirectory) {
+
+ IMAGE_THUNK_DATA thunkData;
+ PIMAGE_IMPORT_BY_NAME addressOfData;
+ ULONG length;
+
+ // one extra byte for NULL,
+
+ length = strlen(ForwardDllName.Buffer +
+ ForwardDllName.Length) + 1;
+
+ addressOfData = (PIMAGE_IMPORT_BY_NAME)
+ ExAllocatePoolWithTag (PagedPool,
+ length +
+ sizeof(IMAGE_IMPORT_BY_NAME),
+ ' mM');
+
+ if (addressOfData) {
+
+ RtlCopyMemory(&(addressOfData->Name[0]),
+ ForwardDllName.Buffer +
+ ForwardDllName.Length,
+ length);
+
+ addressOfData->Hint = 0;
+
+ thunkData.u1.AddressOfData = addressOfData;
+
+ Status = MiSnapThunk(DataTableEntry->DllBase,
+ ImageBase,
+ &thunkData,
+ ExportDirectory,
+ ExportSize,
+ TRUE,
+ MissingProcedureName
+ );
+
+ ExFreePool(addressOfData);
+
+ Thunk->u1 = thunkData.u1;
+ }
+ }
+
+ break;
+ }
+
+ NextEntry = NextEntry->Flink;
+ }
+
+ RtlFreeUnicodeString(&UnicodeString);
+ }
+
+ }
+
+ }
+ return Status;
+}
+#if 0
+PVOID
+MiLookupImageSectionByName (
+ IN PVOID Base,
+ IN BOOLEAN MappedAsImage,
+ IN PCHAR SectionName,
+ OUT PULONG SectionSize
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates a Directory Entry within the image header
+ and returns either the virtual address or seek address of the
+ data the Directory describes.
+
+Arguments:
+
+ Base - Supplies the base of the image or data file.
+
+ MappedAsImage - FALSE if the file is mapped as a data file.
+ - TRUE if the file is mapped as an image.
+
+ SectionName - Supplies the name of the section to lookup.
+
+ SectionSize - Return the size of the section.
+
+Return Value:
+
+ NULL - The file does not contain data for the specified section.
+
+ NON-NULL - Returns the address where the section is mapped in memory.
+
+--*/
+
+{
+ ULONG i, j, Match;
+ PIMAGE_NT_HEADERS NtHeaders;
+ PIMAGE_SECTION_HEADER NtSection;
+
+ NtHeaders = RtlImageNtHeader(Base);
+ NtSection = IMAGE_FIRST_SECTION( NtHeaders );
+ for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i++) {
+ Match = TRUE;
+ for (j = 0; j < IMAGE_SIZEOF_SHORT_NAME; j++) {
+ if (SectionName[j] != NtSection->Name[j]) {
+ Match = FALSE;
+ break;
+ }
+ if (SectionName[j] == '\0') {
+ break;
+ }
+ }
+ if (Match) {
+ break;
+ }
+ NtSection += 1;
+ }
+ if (Match) {
+ *SectionSize = NtSection->SizeOfRawData;
+ if (MappedAsImage) {
+ return( (PVOID)((ULONG)Base + NtSection->VirtualAddress));
+ } else {
+ return( (PVOID)((ULONG)Base + NtSection->PointerToRawData));
+ }
+ }
+ return( NULL );
+}
+#endif //0
+
+NTSTATUS
+MmCheckSystemImage(
+ IN HANDLE ImageFileHandle
+ )
+
+/*++
+
+Routine Description:
+
+ This function ensures the checksum for a system image is correct.
+ data the Directory describes.
+
+Arguments:
+
+ ImageFileHandle - Supplies the file handle of the image.
+Return Value:
+
+ Status value.
+
+--*/
+
+{
+
+ NTSTATUS Status;
+ HANDLE Section;
+ PVOID ViewBase;
+ ULONG ViewSize;
+ IO_STATUS_BLOCK IoStatusBlock;
+ FILE_STANDARD_INFORMATION StandardInfo;
+
+ PAGED_CODE();
+
+ Status = NtCreateSection(
+ &Section,
+ SECTION_MAP_EXECUTE,
+ NULL,
+ NULL,
+ PAGE_EXECUTE,
+ SEC_COMMIT,
+ ImageFileHandle
+ );
+
+ if ( !NT_SUCCESS(Status) ) {
+ return Status;
+ }
+
+ ViewBase = NULL;
+ ViewSize = 0;
+
+ Status = NtMapViewOfSection(
+ Section,
+ NtCurrentProcess(),
+ (PVOID *)&ViewBase,
+ 0L,
+ 0L,
+ NULL,
+ &ViewSize,
+ ViewShare,
+ 0L,
+ PAGE_EXECUTE
+ );
+
+ if ( !NT_SUCCESS(Status) ) {
+ NtClose(Section);
+ return Status;
+ }
+
+ //
+ // now the image is mapped as a data file... Calculate it's size and then
+ // check it's checksum
+ //
+
+ Status = NtQueryInformationFile(
+ ImageFileHandle,
+ &IoStatusBlock,
+ &StandardInfo,
+ sizeof(StandardInfo),
+ FileStandardInformation
+ );
+
+ if ( NT_SUCCESS(Status) ) {
+
+ try {
+ if (!LdrVerifyMappedImageMatchesChecksum(ViewBase,StandardInfo.EndOfFile.LowPart)) {
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
+ }
+#if !defined(NT_UP)
+ if ( !MmVerifyImageIsOkForMpUse(ViewBase) ) {
+ Status = STATUS_IMAGE_MP_UP_MISMATCH;
+ }
+#endif // NT_UP
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
+ }
+ }
+
+ NtUnmapViewOfSection(NtCurrentProcess(),ViewBase);
+ NtClose(Section);
+ return Status;
+}
+
+#if !defined(NT_UP)
+BOOLEAN
+MmVerifyImageIsOkForMpUse(
+ IN PVOID BaseAddress
+ )
+{
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ PAGED_CODE();
+
+ //
+ // If the file is an image file, then subtract the two checksum words
+ // in the optional header from the computed checksum before adding
+ // the file length, and set the value of the header checksum.
+ //
+
+ NtHeaders = RtlImageNtHeader(BaseAddress);
+ if (NtHeaders != NULL) {
+ if ( KeNumberProcessors > 1 &&
+ (NtHeaders->FileHeader.Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY) ) {
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+#endif // NT_UP
+
+
+ULONG
+MiDeleteSystemPagableVm (
+ IN PMMPTE PointerPte,
+ IN ULONG NumberOfPtes,
+ IN ULONG NewPteValue,
+ OUT PULONG ResidentPages
+ )
+
+/*++
+
+Routine Description:
+
+ This function deletes pageable system address space (paged pool
+ or driver pagable sections).
+
+Arguments:
+
+ PointerPte - Supplies the start of the PTE range to delete.
+
+ NumberOfPtes - Supplies the number of PTEs in the range.
+
+ NewPteValue - Supplies the new value for the PTE.
+
+ ResidentPages - Returns the number of resident pages freed.
+
+Return Value:
+
+ Returns the number of pages actually freed.
+
+--*/
+
+{
+ ULONG PageFrameIndex;
+ MMPTE PteContents;
+ PMMPFN Pfn1;
+ ULONG ValidPages = 0;
+ ULONG PagesRequired = 0;
+ MMPTE NewContents;
+ ULONG WsIndex;
+ KIRQL OldIrql;
+ MMPTE_FLUSH_LIST PteFlushList;
+
+ ASSERT (KeGetCurrentIrql() <= APC_LEVEL);
+
+ PteFlushList.Count = 0;
+ NewContents.u.Long = NewPteValue;
+ while (NumberOfPtes != 0) {
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Long != ZeroKernelPte.u.Long) {
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ LOCK_SYSTEM_WS (OldIrql)
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+ if (PteContents.u.Hard.Valid == 0) {
+ UNLOCK_SYSTEM_WS (OldIrql);
+ continue;
+ }
+
+ //
+ // Delete the page.
+ //
+
+ PageFrameIndex = PteContents.u.Hard.PageFrameNumber;
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ //
+ // Check to see if this is a pagable page in which
+ // case it needs to be removed from the working set list.
+ //
+
+ WsIndex = Pfn1->u1.WsIndex;
+ if (WsIndex == 0) {
+ ValidPages += 1;
+ } else {
+ MiRemoveWsle (WsIndex,
+ MmSystemCacheWorkingSetList );
+ MiReleaseWsle (WsIndex, &MmSystemCacheWs);
+ }
+ UNLOCK_SYSTEM_WS (OldIrql);
+
+ LOCK_PFN (OldIrql);
+#if DBG
+ if ((Pfn1->u3.e2.ReferenceCount > 1) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ DbgPrint ("MM:SYSLOAD - deleting pool locked for I/O %lx\n",
+ PageFrameIndex);
+ ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
+ }
+#endif //DBG
+ MiDecrementShareAndValidCount (Pfn1->PteFrame);
+ MI_SET_PFN_DELETED (Pfn1);
+ MiDecrementShareCountOnly (PageFrameIndex);
+ *PointerPte = NewContents;
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Flush the TB for this page.
+ //
+
+ if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
+ PteFlushList.FlushPte[PteFlushList.Count] = PointerPte;
+ PteFlushList.FlushVa[PteFlushList.Count] =
+ MiGetVirtualAddressMappedByPte (PointerPte);
+ PteFlushList.Count += 1;
+ }
+
+ } else if (PteContents.u.Soft.Transition == 1) {
+
+ LOCK_PFN (OldIrql);
+
+ PteContents = *(volatile MMPTE *)PointerPte;
+
+ if (PteContents.u.Soft.Transition == 0) {
+ UNLOCK_PFN (OldIrql);
+ continue;
+ }
+
+ //
+ // Transition, release page.
+ //
+
+ PageFrameIndex = PteContents.u.Trans.PageFrameNumber;
+
+ //
+ // Set the pointer to PTE as empty so the page
+ // is deleted when the reference count goes to zero.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // Check the reference count for the page, if the reference
+ // count is zero, move the page to the free list, if the
+ // reference count is not zero, ignore this page. When the
+ // refernce count goes to zero, it will be placed on the
+ // free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiUnlinkPageFromList (Pfn1);
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PageFrameIndex);
+ }
+#if DBG
+ if ((Pfn1->u3.e2.ReferenceCount > 1) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+ DbgPrint ("MM:SYSLOAD - deleting pool locked for I/O %lx\n",
+ PageFrameIndex);
+ DbgBreakPoint();
+ }
+#endif //DBG
+
+ *PointerPte = NewContents;
+ UNLOCK_PFN (OldIrql);
+ } else {
+
+ //
+ // Demand zero, release page file space.
+ //
+ if (PteContents.u.Soft.PageFileHigh != 0) {
+ LOCK_PFN (OldIrql);
+ MiReleasePageFileSpace (PteContents);
+ UNLOCK_PFN (OldIrql);
+ }
+
+ *PointerPte = NewContents;
+ }
+
+ PagesRequired += 1;
+ }
+
+ NumberOfPtes -= 1;
+ PointerPte += 1;
+ }
+ LOCK_PFN (OldIrql);
+ MiFlushPteList (&PteFlushList, TRUE, NewContents);
+ UNLOCK_PFN (OldIrql);
+
+ *ResidentPages = ValidPages;
+ return PagesRequired;
+}
+
+VOID
+MiSetImageProtectWrite (
+ IN PSEGMENT Segment
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the protection of all prototype PTEs to writable.
+
+Arguments:
+
+ Segment - a pointer to the segment to protect.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ MMPTE PteContents;
+
+ PointerPte = Segment->PrototypePte;
+ LastPte = PointerPte + Segment->NonExtendedPtes;
+
+ do {
+ PteContents = *PointerPte;
+ ASSERT (PteContents.u.Hard.Valid == 0);
+ if (PteContents.u.Long != ZeroPte.u.Long) {
+ if ((PteContents.u.Soft.Prototype == 0) &&
+ (PteContents.u.Soft.Transition == 1)) {
+ if (MiSetProtectionOnTransitionPte (PointerPte,
+ MM_EXECUTE_READWRITE)) {
+ continue;
+ }
+ } else {
+ PointerPte->u.Soft.Protection = MM_EXECUTE_READWRITE;
+ }
+ }
+ PointerPte += 1;
+ } while (PointerPte < LastPte );
+ return;
+}
diff --git a/private/ntos/mm/sysptes.c b/private/ntos/mm/sysptes.c
new file mode 100644
index 000000000..d117890ae
--- /dev/null
+++ b/private/ntos/mm/sysptes.c
@@ -0,0 +1,1376 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sysptes.c
+
+Abstract:
+
+ This module contains the routines which reserve and release
+ system wide PTEs reserved within the non paged portion of the
+ system space. These PTEs are used for mapping I/O devices
+ and mapping kernel stacks for threads.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,MiInitializeSystemPtes)
+#endif
+
+
+ULONG MmTotalFreeSystemPtes[MaximumPtePoolTypes];
+ULONG MmSystemPtesStart[MaximumPtePoolTypes];
+ULONG MmSystemPtesEnd[MaximumPtePoolTypes];
+
+#define MM_MIN_SYSPTE_FREE 500
+#define MM_MAX_SYSPTE_FREE 3000
+
+PMMPTE MmFlushPte1;
+
+MMPTE MmFlushCounter;
+
+//
+// PTEs are binned at sizes 1, 2, 4, 8, and 16.
+//
+
+#ifdef _ALPHA_
+
+//
+// alpha has 8k pages size and stacks consume 9 pages (including guard page).
+//
+
+ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,9,16};
+
+UCHAR MmSysPteTables[17] = {0,0,1,2,2,3,3,3,3,3,4,4,4,4,4,4,4};
+
+#else
+
+ULONG MmSysPteIndex[MM_SYS_PTE_TABLES_MAX] = {1,2,4,8,16};
+
+UCHAR MmSysPteTables[17] = {0,0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
+#endif
+
+MMPTE MmFreeSysPteListBySize [MM_SYS_PTE_TABLES_MAX];
+PMMPTE MmLastSysPteListBySize [MM_SYS_PTE_TABLES_MAX];
+ULONG MmSysPteListBySizeCount [MM_SYS_PTE_TABLES_MAX];
+ULONG MmSysPteMinimumFree [MM_SYS_PTE_TABLES_MAX] = {100,50,30,20,20};
+
+//
+// Initial sizes for PTE lists.
+//
+
+#define MM_PTE_LIST_1 400
+#define MM_PTE_LIST_2 100
+#define MM_PTE_LIST_4 60
+#define MM_PTE_LIST_8 50
+#define MM_PTE_LIST_16 40
+
+#define MM_PTE_TABLE_LIMIT 16
+
+PMMPTE
+MiReserveSystemPtes2 (
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
+ IN ULONG Alignment,
+ IN ULONG Offset,
+ IN ULONG BugCheckOnFailure
+ );
+
+VOID
+MiFeedSysPtePool (
+ IN ULONG Index
+ );
+
+VOID
+MiDumpSystemPtes (
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ );
+
+ULONG
+MiCountFreeSystemPtes (
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ );
+
+
+PMMPTE
+MiReserveSystemPtes (
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
+ IN ULONG Alignment,
+ IN ULONG Offset,
+ IN ULONG BugCheckOnFailure
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the specified number of unused PTEs to locate
+ within the non paged portion of system space.
+
+Arguments:
+
+ NumberOfPtes - Supplies the number of PTEs to locate.
+
+ SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
+ SystemPteSpace or NonPagedPoolExpansion.
+
+ Alignment - Supplies the virtual address alignment for the address
+ the returned PTE maps. For example, if the value is 64K,
+ the returned PTE will map an address on a 64K boundary.
+ An alignment of zero means to align on a page boundary.
+
+ Offset - Supplies the offset into the alignment for the virtual address.
+ For example, if the Alignment is 64k and the Offset is 4k,
+ the returned address will be 4k above a 64k boundary.
+
+ BugCheckOnFailure - Supplies FALSE if NULL should be returned if
+ the request cannot be satisfied, TRUE if
+ a bugcheck should be issued.
+
+Return Value:
+
+ Returns the address of the first PTE located.
+ NULL if no system PTEs can be located and BugCheckOnFailure is FALSE.
+
+Environment:
+
+ Kernel mode, DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE Previous;
+ KIRQL OldIrql;
+ ULONG PteMask;
+ ULONG MaskSize;
+ ULONG Index;
+
+ if (SystemPtePoolType == SystemPteSpace) {
+
+ MaskSize = (Alignment - 1) >> (PAGE_SHIFT - PTE_SHIFT);
+ PteMask = MaskSize & (Offset >> (PAGE_SHIFT - PTE_SHIFT));
+
+ //
+ // Acquire the system space lock to synchronize access to this
+ // routine.
+ //
+
+ ExAcquireSpinLock ( &MmSystemSpaceLock, &OldIrql );
+
+ if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) {
+ Index = MmSysPteTables [NumberOfPtes];
+ ASSERT (NumberOfPtes <= MmSysPteIndex[Index]);
+ PointerPte = &MmFreeSysPteListBySize[Index];
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ PMMPTE PointerPte1;
+ PointerPte1 = &MmFreeSysPteListBySize[Index];
+ while (PointerPte1->u.List.NextEntry != MM_EMPTY_PTE_LIST) {
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerPte1 = MmSystemPteBase + PointerPte1->u.List.NextEntry;
+ PointerFreedPte = PointerPte1;
+ for (j = 0; j < MmSysPteIndex[Index]; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+ }
+#endif //DBG
+
+ Previous = PointerPte;
+
+ while (PointerPte->u.List.NextEntry != MM_EMPTY_PTE_LIST) {
+
+ //
+ // Try to find suitable PTEs with the proper alignment.
+ //
+
+ Previous = PointerPte;
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ if (PointerPte == MmFlushPte1) {
+ KeFlushEntireTb (TRUE, TRUE);
+ MmFlushCounter.u.List.NextEntry += 1;
+ MmFlushPte1 = NULL;
+ }
+ if ((Alignment == 0) ||
+ (((ULONG)PointerPte & MaskSize) == PteMask)) {
+
+ //
+ // Proper alignment and offset, update list index.
+ //
+
+ ASSERT ((ULONG)(PointerPte->u.List.NextEntry + MmSystemPteBase) >=
+ MmSystemPtesStart[SystemPtePoolType] ||
+ PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
+ ASSERT ((ULONG)(PointerPte->u.List.NextEntry + MmSystemPteBase) <=
+ MmSystemPtesEnd[SystemPtePoolType] ||
+ PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
+
+ Previous->u.List.NextEntry = PointerPte->u.List.NextEntry;
+ MmSysPteListBySizeCount [Index] -= 1;
+
+ if (NumberOfPtes != 1) {
+
+ //
+ // Check to see if the TB should be flushed.
+ //
+
+ if ((PointerPte + 1)->u.List.NextEntry == MmFlushCounter.u.List.NextEntry) {
+ KeFlushEntireTb (TRUE, TRUE);
+ MmFlushCounter.u.List.NextEntry += 1;
+ MmFlushPte1 = NULL;
+ }
+ }
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+ MmLastSysPteListBySize[Index] = Previous;
+ }
+#if DBG
+
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ PMMPTE PointerPte1;
+ PointerPte1 = &MmFreeSysPteListBySize[Index];
+ while (PointerPte1->u.List.NextEntry != MM_EMPTY_PTE_LIST) {
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerPte1 = MmSystemPteBase + PointerPte1->u.List.NextEntry;
+ PointerFreedPte = PointerPte1;
+ for (j = 0; j < MmSysPteIndex[Index]; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+ }
+#endif //DBG
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql);
+
+#if DBG
+ PointerPte->u.List.NextEntry = 0xABCDE;
+ if (MmDebug & MM_DBG_SYS_PTES) {
+
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerFreedPte = PointerPte;
+ for (j = 0; j < MmSysPteIndex[Index]; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+ if (!((ULONG)PointerPte >= MmSystemPtesStart[SystemPtePoolType])) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x652,(ULONG)PointerPte,
+ NumberOfPtes,
+ SystemPtePoolType);
+ }
+ if (!((ULONG)PointerPte <= MmSystemPtesEnd[SystemPtePoolType])) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x653,(ULONG)PointerPte,
+ NumberOfPtes,
+ SystemPtePoolType); //fixfix make assert
+ }
+#endif //DBG
+
+ if (MmSysPteListBySizeCount[Index] <
+ MmSysPteMinimumFree[Index]) {
+ MiFeedSysPtePool (Index);
+ }
+ return PointerPte;
+ }
+ }
+ NumberOfPtes = MmSysPteIndex [Index];
+ }
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql);
+ }
+ PointerPte = MiReserveSystemPtes2 (NumberOfPtes,
+ SystemPtePoolType,
+ Alignment,
+ Offset,
+ BugCheckOnFailure);
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerFreedPte = PointerPte;
+ for (j = 0; j < NumberOfPtes; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+#endif //DBG
+ return PointerPte;
+}
+
+VOID
+MiFeedSysPtePool (
+ IN ULONG Index
+ )
+
+/*++
+
+Routine Description:
+
+ This routine adds PTEs to the look aside lists.
+
+Arguments:
+
+ Index - Supplies the index for the look aside list to fill.
+
+Return Value:
+
+ None.
+
+
+Environment:
+
+ Kernel mode, internal to SysPtes.
+
+--*/
+
+{
+ ULONG i;
+ PMMPTE PointerPte;
+
+ if (MmTotalFreeSystemPtes[SystemPteSpace] < MM_MIN_SYSPTE_FREE) {
+ return;
+ }
+
+ for (i = 0; i < 10 ; i++ ) {
+ PointerPte = MiReserveSystemPtes2 (MmSysPteIndex [Index],
+ SystemPteSpace,
+ 0,
+ 0,
+ FALSE);
+ if (PointerPte == NULL) {
+ return;
+ }
+ MiReleaseSystemPtes (PointerPte,
+ MmSysPteIndex [Index],
+ SystemPteSpace);
+ }
+ return;
+}
+
+
+PMMPTE
+MiReserveSystemPtes2 (
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
+ IN ULONG Alignment,
+ IN ULONG Offset,
+ IN ULONG BugCheckOnFailure
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the specified number of unused PTEs to locate
+ within the non paged portion of system space.
+
+Arguments:
+
+ NumberOfPtes - Supplies the number of PTEs to locate.
+
+ SystemPtePoolType - Supplies the PTE type of the pool to expand, one of
+ SystemPteSpace or NonPagedPoolExpansion.
+
+ Alignment - Supplies the virtual address alignment for the address
+ the returned PTE maps. For example, if the value is 64K,
+ the returned PTE will map an address on a 64K boundary.
+ An alignment of zero means to align on a page boundary.
+
+ Offset - Supplies the offset into the alignment for the virtual address.
+ For example, if the Alignment is 64k and the Offset is 4k,
+ the returned address will be 4k above a 64k boundary.
+
+ BugCheckOnFailure - Supplies FALSE if NULL should be returned if
+ the request cannot be satisfied, TRUE if
+ a bugcheck should be issued.
+
+Return Value:
+
+ Returns the address of the first PTE located.
+ NULL if no system PTEs can be located and BugCheckOnFailure is FALSE.
+
+Environment:
+
+ Kernel mode, DISPATCH_LEVEL or below.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerFollowingPte;
+ PMMPTE Previous;
+ ULONG SizeInSet;
+ KIRQL OldIrql;
+ ULONG MaskSize;
+ ULONG NumberOfRequiredPtes;
+ ULONG OffsetSum;
+ ULONG PtesToObtainAlignment;
+ PMMPTE NextSetPointer;
+ ULONG LeftInSet;
+ ULONG PteOffset;
+ MMPTE_FLUSH_LIST PteFlushList;
+
+ MaskSize = (Alignment - 1) >> (PAGE_SHIFT - PTE_SHIFT);
+
+ OffsetSum = (Offset >> (PAGE_SHIFT - PTE_SHIFT)) |
+ (Alignment >> (PAGE_SHIFT - PTE_SHIFT));
+
+ ExAcquireSpinLock ( &MmSystemSpaceLock, &OldIrql );
+
+ //
+ // The nonpaged PTE pool use the invalid PTEs to define the pool
+ // structure. A global pointer points to the first free set
+ // in the list, each free set contains the number free and a pointer
+ // to the next free set. The free sets are kept in an ordered list
+ // such that the pointer to the next free set is always greater
+ // than the address of the current free set.
+ //
+ // As to not limit the size of this pool, a two PTEs are used
+ // to define a free region. If the region is a single PTE, the
+ // Prototype field within the PTE is set indicating the set
+ // consists of a single PTE.
+ //
+ // The page frame number field is used to define the next set
+ // and the number free. The two flavors are:
+ //
+ // o V
+ // n l
+ // e d
+ // +-----------------------+-+----------+
+ // | next set |0|0 0|
+ // +-----------------------+-+----------+
+ // | number in this set |0|0 0|
+ // +-----------------------+-+----------+
+ //
+ //
+ // +-----------------------+-+----------+
+ // | next set |1|0 0|
+ // +-----------------------+-+----------+
+ // ...
+ //
+
+ //
+ // Acquire the system space lock to synchronize access to this
+ // routine.
+ //
+
+ PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
+ Previous = PointerPte;
+
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+
+ //
+ // End of list and none found, return NULL or bugcheck.
+ //
+
+ if (BugCheckOnFailure) {
+ KeBugCheckEx (NO_MORE_SYSTEM_PTES,
+ (ULONG)SystemPtePoolType,
+ NumberOfPtes,
+ MmTotalFreeSystemPtes[SystemPtePoolType],
+ MmNumberOfSystemPtes);
+ }
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ return NULL;
+ }
+
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+
+ if (Alignment <= PAGE_SIZE) {
+
+ //
+ // Don't deal with aligment issues.
+ //
+
+ while (TRUE) {
+
+ if (PointerPte->u.List.OneEntry) {
+ SizeInSet = 1;
+
+ } else {
+
+ PointerFollowingPte = PointerPte + 1;
+ SizeInSet = PointerFollowingPte->u.List.NextEntry;
+ }
+
+ if (NumberOfPtes < SizeInSet) {
+
+ //
+ // Get the PTEs from this set and reduce the size of the
+ // set. Note that the size of the current set cannot be 1.
+ //
+
+ if ((SizeInSet - NumberOfPtes) == 1) {
+
+ //
+ // Collapse to the single PTE format.
+ //
+
+ PointerPte->u.List.OneEntry = 1;
+
+ } else {
+
+ PointerFollowingPte->u.List.NextEntry = SizeInSet - NumberOfPtes;
+
+ //
+ // Get the required PTEs from the end of the set.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ MiDumpSystemPtes(SystemPtePoolType);
+ PointerFollowingPte = PointerPte + (SizeInSet - NumberOfPtes);
+ DbgPrint("allocated 0x%lx Ptes at %lx\n",NumberOfPtes,PointerFollowingPte);
+ }
+#endif //DBG
+ }
+
+ MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+ }
+#endif //DBG
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ PointerPte = PointerPte + (SizeInSet - NumberOfPtes);
+ goto Flush;
+ }
+
+ if (NumberOfPtes == SizeInSet) {
+
+ //
+ // Satisfy the request with this complete set and change
+ // the list to reflect the fact that this set is gone.
+ //
+
+ Previous->u.List.NextEntry = PointerPte->u.List.NextEntry;
+
+ //
+ // Release the system PTE lock.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ MiDumpSystemPtes(SystemPtePoolType);
+ PointerFollowingPte = PointerPte + (SizeInSet - NumberOfPtes);
+ DbgPrint("allocated 0x%lx Ptes at %lx\n",NumberOfPtes,PointerFollowingPte);
+ }
+#endif
+
+ MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+ }
+#endif //DBG
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ goto Flush;
+ }
+
+ //
+ // Point to the next set and try again
+ //
+
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+
+ //
+ // End of list and none found, return NULL or bugcheck.
+ //
+
+ if (BugCheckOnFailure) {
+ KeBugCheckEx (NO_MORE_SYSTEM_PTES,
+ (ULONG)SystemPtePoolType,
+ NumberOfPtes,
+ MmTotalFreeSystemPtes[SystemPtePoolType],
+ MmNumberOfSystemPtes);
+ }
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ return NULL;
+ }
+ Previous = PointerPte;
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ ASSERT (PointerPte > Previous);
+ }
+
+ } else {
+
+ //
+ // Deal with the alignment issues.
+ //
+
+ while (TRUE) {
+
+ if (PointerPte->u.List.OneEntry) {
+ SizeInSet = 1;
+
+ } else {
+
+ PointerFollowingPte = PointerPte + 1;
+ SizeInSet = PointerFollowingPte->u.List.NextEntry;
+ }
+
+ PtesToObtainAlignment =
+ (((OffsetSum - ((ULONG)PointerPte & MaskSize)) & MaskSize) >>
+ PTE_SHIFT);
+
+ NumberOfRequiredPtes = NumberOfPtes + PtesToObtainAlignment;
+
+ if (NumberOfRequiredPtes < SizeInSet) {
+
+ //
+ // Get the PTEs from this set and reduce the size of the
+ // set. Note that the size of the current set cannot be 1.
+ //
+ // This current block will be slit into 2 blocks if
+ // the PointerPte does not match the aligment.
+ //
+
+ //
+ // Check to see if the first PTE is on the proper
+ // alignment, if so, eliminate this block.
+ //
+
+ LeftInSet = SizeInSet - NumberOfRequiredPtes;
+
+ //
+ // Set up the new set at the end of this block.
+ //
+
+ NextSetPointer = PointerPte + NumberOfRequiredPtes;
+ NextSetPointer->u.List.NextEntry =
+ PointerPte->u.List.NextEntry;
+
+ PteOffset = NextSetPointer - MmSystemPteBase;
+
+ if (PtesToObtainAlignment == 0) {
+
+ Previous->u.List.NextEntry += NumberOfRequiredPtes;
+
+ } else {
+
+ //
+ // Point to the new set at the end of the block
+ // we are giving away.
+ //
+
+ PointerPte->u.List.NextEntry = PteOffset;
+
+ //
+ // Update the size of the current set.
+ //
+
+ if (PtesToObtainAlignment == 1) {
+
+ //
+ // Collapse to the single PTE format.
+ //
+
+ PointerPte->u.List.OneEntry = 1;
+
+ } else {
+
+ //
+ // Set the set size in the next PTE.
+ //
+
+ PointerFollowingPte->u.List.NextEntry =
+ PtesToObtainAlignment;
+ }
+ }
+
+ //
+ // Set up the new set at the end of the block.
+ //
+
+ if (LeftInSet == 1) {
+ NextSetPointer->u.List.OneEntry = 1;
+ } else {
+ NextSetPointer->u.List.OneEntry = 0;
+ NextSetPointer += 1;
+ NextSetPointer->u.List.NextEntry = LeftInSet;
+ }
+ MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+ }
+#endif //DBG
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ PointerPte = PointerPte + PtesToObtainAlignment;
+ goto Flush;
+ }
+
+ if (NumberOfRequiredPtes == SizeInSet) {
+
+ //
+ // Satisfy the request with this complete set and change
+ // the list to reflect the fact that this set is gone.
+ //
+
+ if (PtesToObtainAlignment == 0) {
+
+ //
+ // This block exactly satifies the request.
+ //
+
+ Previous->u.List.NextEntry =
+ PointerPte->u.List.NextEntry;
+
+ } else {
+
+ //
+ // A portion at the start of this block remains.
+ //
+
+ if (PtesToObtainAlignment == 1) {
+
+ //
+ // Collapse to the single PTE format.
+ //
+
+ PointerPte->u.List.OneEntry = 1;
+
+ } else {
+ PointerFollowingPte->u.List.NextEntry =
+ PtesToObtainAlignment;
+
+ }
+ }
+
+ MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+ }
+#endif //DBG
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+
+ PointerPte = PointerPte + PtesToObtainAlignment;
+ goto Flush;
+ }
+
+ //
+ // Point to the next set and try again
+ //
+
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+
+ //
+ // End of list and none found, return NULL or bugcheck.
+ //
+
+ if (BugCheckOnFailure) {
+ KeBugCheckEx (NO_MORE_SYSTEM_PTES,
+ (ULONG)SystemPtePoolType,
+ NumberOfPtes,
+ MmTotalFreeSystemPtes[SystemPtePoolType],
+ MmNumberOfSystemPtes);
+ }
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ return NULL;
+ }
+ Previous = PointerPte;
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ ASSERT (PointerPte > Previous);
+ }
+ }
+Flush:
+
+ if (SystemPtePoolType == SystemPteSpace) {
+ PVOID BaseAddress;
+ ULONG j;
+
+ PteFlushList.Count = 0;
+ Previous = PointerPte;
+ BaseAddress = MiGetVirtualAddressMappedByPte (Previous);
+
+ for (j = 0; j < NumberOfPtes ; j++) {
+ if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) {
+ PteFlushList.FlushPte[PteFlushList.Count] = Previous;
+ PteFlushList.FlushVa[PteFlushList.Count] = BaseAddress;
+ PteFlushList.Count += 1;
+ }
+ *Previous = ZeroKernelPte;
+ BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE);
+ Previous++;
+ }
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ MiFlushPteList (&PteFlushList, TRUE, ZeroKernelPte);
+ KeLowerIrql (OldIrql);
+ }
+ return PointerPte;
+}
+
+VOID
+MiReleaseSystemPtes (
+ IN PMMPTE StartingPte,
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases the specified number of PTEs
+ within the non paged portion of system space.
+
+ Note that the PTEs must be invalid and the page frame number
+ must have been set to zero.
+
+Arguments:
+
+ StartingPte - Supplies the address of the first PTE to release.
+
+ NumberOfPtes - Supplies the number of PTEs to release.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ ULONG Size;
+ ULONG i;
+ ULONG PteOffset;
+ PMMPTE PointerPte;
+ PMMPTE PointerFollowingPte;
+ PMMPTE NextPte;
+ KIRQL OldIrql;
+ ULONG Index;
+ MMPTE TempPte;
+
+ //
+ // Check to make sure the PTEs don't map anything.
+ //
+
+ ASSERT (NumberOfPtes != 0);
+#if DBG
+ if (!((ULONG)StartingPte >= MmSystemPtesStart[SystemPtePoolType])) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x656,(ULONG)StartingPte,
+ NumberOfPtes,
+ SystemPtePoolType);
+ }
+
+ if (!((ULONG)StartingPte <= MmSystemPtesEnd[SystemPtePoolType])) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x657,(ULONG)StartingPte,
+ NumberOfPtes,
+ SystemPtePoolType);
+ }
+#endif //DBG
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ DbgPrint("releasing 0x%lx system PTEs at location %lx\n",NumberOfPtes,StartingPte);
+ }
+#endif
+
+ //
+ // Zero PTEs.
+ //
+
+ RtlFillMemoryUlong (StartingPte,
+ NumberOfPtes * sizeof (MMPTE),
+ ZeroKernelPte.u.Long);
+
+ //
+ // Acquire system space spin lock to synchronize access.
+ //
+
+ PteOffset = StartingPte - MmSystemPteBase;
+
+ ExAcquireSpinLock ( &MmSystemSpaceLock, &OldIrql );
+
+ if ((SystemPtePoolType == SystemPteSpace) &&
+ (NumberOfPtes <= MM_PTE_TABLE_LIMIT)) {
+
+ Index = MmSysPteTables [NumberOfPtes];
+ NumberOfPtes = MmSysPteIndex [Index];
+
+ if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MIN_SYSPTE_FREE) {
+
+ //
+ // Don't add to the pool if the size is greater than 15 + the minimum.
+ //
+
+ i = MmSysPteMinimumFree[Index];
+ if (MmTotalFreeSystemPtes[SystemPteSpace] >= MM_MAX_SYSPTE_FREE) {
+
+ //
+ // Lots of free PTEs, quadrouple the limit.
+ //
+
+ i = i * 4;
+ }
+ i += 15;
+ if (MmSysPteListBySizeCount[Index] <= i) {
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ PMMPTE PointerPte1;
+
+ PointerPte1 = &MmFreeSysPteListBySize[Index];
+ while (PointerPte1->u.List.NextEntry != MM_EMPTY_PTE_LIST) {
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerPte1 = MmSystemPteBase + PointerPte1->u.List.NextEntry;
+ PointerFreedPte = PointerPte1;
+ for (j = 0; j < MmSysPteIndex[Index]; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+ }
+#endif //DBG
+ MmSysPteListBySizeCount [Index] += 1;
+ PointerPte = MmLastSysPteListBySize[Index];
+ ASSERT (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);
+ PointerPte->u.List.NextEntry = PteOffset;
+ MmLastSysPteListBySize[Index] = StartingPte;
+ StartingPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ PMMPTE PointerPte1;
+ PointerPte1 = &MmFreeSysPteListBySize[Index];
+ while (PointerPte1->u.List.NextEntry != MM_EMPTY_PTE_LIST) {
+ PMMPTE PointerFreedPte;
+ ULONG j;
+
+ PointerPte1 = MmSystemPteBase + PointerPte1->u.List.NextEntry;
+ PointerFreedPte = PointerPte1;
+ for (j = 0; j < MmSysPteIndex[Index]; j++) {
+ ASSERT (PointerFreedPte->u.Hard.Valid == 0);
+ PointerFreedPte++;
+ }
+ }
+ }
+#endif //DBG
+ if (NumberOfPtes == 1) {
+ if (MmFlushPte1 == NULL) {
+ MmFlushPte1 = StartingPte;
+ }
+ } else {
+ (StartingPte + 1)->u.List.NextEntry = MmFlushCounter.u.List.NextEntry;
+ }
+
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql);
+ return;
+ }
+ }
+ }
+
+ MmTotalFreeSystemPtes[SystemPtePoolType] += NumberOfPtes;
+
+ PteOffset = StartingPte - MmSystemPteBase;
+ PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
+
+ while (TRUE) {
+ NextPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ if (PteOffset < PointerPte->u.List.NextEntry) {
+
+ //
+ // Insert in the list at this point. The
+ // previous one should point to the new freed set and
+ // the new freed set should point to the place
+ // the previous set points to.
+ //
+ // Attempt to combine the clusters before we
+ // insert.
+ //
+ // Locate the end of the current structure.
+ //
+
+ ASSERT ((StartingPte + NumberOfPtes) <= NextPte);
+
+ PointerFollowingPte = PointerPte + 1;
+ if (PointerPte->u.List.OneEntry) {
+ Size = 1;
+ } else {
+ Size = PointerFollowingPte->u.List.NextEntry;
+ }
+ if ((PointerPte + Size) == StartingPte) {
+
+ //
+ // We can combine the clusters.
+ //
+
+ NumberOfPtes = Size + NumberOfPtes;
+ PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
+ PointerPte->u.List.OneEntry = 0;
+
+ //
+ // Point the starting PTE to the beginning of
+ // the new free set and try to combine with the
+ // following free cluster.
+ //
+
+ StartingPte = PointerPte;
+
+ } else {
+
+ //
+ // Can't combine with previous. Make this Pte the
+ // start of a cluster.
+ //
+
+ //
+ // Point this cluster to the next cluster.
+ //
+
+ StartingPte->u.List.NextEntry = PointerPte->u.List.NextEntry;
+
+ //
+ // Point the current cluster to this cluster.
+ //
+
+ PointerPte->u.List.NextEntry = PteOffset;
+
+ //
+ // Set the size of this cluster.
+ //
+
+ if (NumberOfPtes == 1) {
+ StartingPte->u.List.OneEntry = 1;
+
+ } else {
+ StartingPte->u.List.OneEntry = 0;
+ PointerFollowingPte = StartingPte + 1;
+ PointerFollowingPte->u.List.NextEntry = NumberOfPtes;
+ }
+ }
+
+ //
+ // Attempt to combine the newly created cluster with
+ // the following cluster.
+ //
+
+ if ((StartingPte + NumberOfPtes) == NextPte) {
+
+ //
+ // Combine with following cluster.
+ //
+
+ //
+ // Set the next cluster to the value contained in the
+ // cluster we are merging into this one.
+ //
+
+ StartingPte->u.List.NextEntry = NextPte->u.List.NextEntry;
+ StartingPte->u.List.OneEntry = 0;
+ PointerFollowingPte = StartingPte + 1;
+
+ if (NextPte->u.List.OneEntry) {
+ Size = 1;
+
+ } else {
+ NextPte++;
+ Size = NextPte->u.List.NextEntry;
+ }
+ PointerFollowingPte->u.List.NextEntry = NumberOfPtes + Size;
+ }
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ MiDumpSystemPtes(SystemPtePoolType);
+ }
+#endif
+
+#if DBG
+ if (MmDebug & MM_DBG_SYS_PTES) {
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+ }
+#endif //DBG
+ ExReleaseSpinLock ( &MmSystemSpaceLock, OldIrql );
+ return;
+ }
+
+ //
+ // Point to next freed cluster.
+ //
+
+ PointerPte = NextPte;
+ }
+}
+
+VOID
+MiInitializeSystemPtes (
+ IN PMMPTE StartingPte,
+ IN ULONG NumberOfPtes,
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ )
+
+/*++
+
+Routine Description:
+
+ This routine initializes the system PTE pool.
+
+Arguments:
+
+ StartingPte - Supplies the address of the first PTE to put in the pool.
+
+ NumberOfPtes - Supplies the number of PTEs to put in the pool.
+
+Return Value:
+
+ none.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ LONG i;
+ LONG j;
+
+ //
+ // Set the base of the system PTE pool to this PTE.
+ //
+
+ MmSystemPteBase = MiGetPteAddress (0xC0000000);
+ MmSystemPtesStart[SystemPtePoolType] = (ULONG)StartingPte;
+ MmSystemPtesEnd[SystemPtePoolType] = (ULONG)((StartingPte + NumberOfPtes -1));
+
+ if (NumberOfPtes <= 1) {
+
+ //
+ // Not enough PTEs to make a valid chain, just indicate
+ // not PTEs are free.
+ //
+
+ MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
+ MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
+ MM_EMPTY_LIST;
+ return;
+
+ }
+
+ //
+ // Zero the system pte pool.
+ //
+
+ RtlFillMemoryUlong (StartingPte,
+ NumberOfPtes * sizeof (MMPTE),
+ ZeroKernelPte.u.Long);
+
+ //
+ // The page frame field points to the next cluster. As we only
+ // have one cluster at initialization time, mark it as the last
+ // cluster.
+ //
+
+ StartingPte->u.List.NextEntry = MM_EMPTY_LIST;
+
+ MmFirstFreeSystemPte[SystemPtePoolType] = ZeroKernelPte;
+ MmFirstFreeSystemPte[SystemPtePoolType].u.List.NextEntry =
+ StartingPte - MmSystemPteBase;
+
+ //
+ // Point to the next PTE to fill in the size of this cluster.
+ //
+
+ StartingPte++;
+ *StartingPte = ZeroKernelPte;
+ StartingPte->u.List.NextEntry = NumberOfPtes;
+
+ MmTotalFreeSystemPtes[SystemPtePoolType] = NumberOfPtes;
+ ASSERT (MmTotalFreeSystemPtes[SystemPtePoolType] ==
+ MiCountFreeSystemPtes (SystemPtePoolType));
+
+ if (SystemPtePoolType == SystemPteSpace) {
+
+ ULONG Lists[MM_SYS_PTE_TABLES_MAX] = {MM_PTE_LIST_1, MM_PTE_LIST_2, MM_PTE_LIST_4, MM_PTE_LIST_8, MM_PTE_LIST_16};
+ PMMPTE PointerPte;
+ ULONG total;
+
+ for (j = 0; j < MM_SYS_PTE_TABLES_MAX ; j++) {
+ MmFreeSysPteListBySize [j].u.List.NextEntry = MM_EMPTY_PTE_LIST;
+ MmLastSysPteListBySize [j] = &MmFreeSysPteListBySize [j];
+ }
+ MmFlushCounter.u.List.NextEntry += 1;
+
+ //
+ // Initialize the by size lists.
+ //
+
+ total = MM_PTE_LIST_1 * MmSysPteIndex[0] +
+ MM_PTE_LIST_2 * MmSysPteIndex[1] +
+ MM_PTE_LIST_4 * MmSysPteIndex[2] +
+ MM_PTE_LIST_8 * MmSysPteIndex[3] +
+ MM_PTE_LIST_16 * MmSysPteIndex[4];
+
+ PointerPte = MiReserveSystemPtes (total,
+ SystemPteSpace,
+ 64*1024,
+ 0,
+ TRUE);
+
+#ifdef MIPS
+ {
+ ULONG inserted;
+
+ //
+ // For MIPS make sure buffers exist at all alignemnts.
+ //
+
+ do {
+ inserted = FALSE;
+ for (i = 0; i < MM_SYS_PTE_TABLES_MAX; i++) {
+ if (Lists[i]) {
+ Lists[i] -= 1;
+ MiReleaseSystemPtes (PointerPte,
+ MmSysPteIndex[i],
+ SystemPteSpace);
+ inserted = TRUE;
+ PointerPte += MmSysPteIndex[i];
+ }
+ }
+ } while (inserted);
+ }
+
+#else
+ for (i = (MM_SYS_PTE_TABLES_MAX - 1); i >= 0; i--) {
+ do {
+ Lists[i] -= 1;
+ MiReleaseSystemPtes (PointerPte,
+ MmSysPteIndex[i],
+ SystemPteSpace);
+ PointerPte += MmSysPteIndex[i];
+ } while (Lists[i] != 0 );
+ }
+#endif //MIPS
+ MmFlushCounter.u.List.NextEntry += 1;
+ MmFlushPte1 = NULL;
+ }
+
+ return;
+}
+
+
+#if DBG
+
+VOID
+MiDumpSystemPtes (
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ )
+
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerNextPte;
+ ULONG ClusterSize;
+ PMMPTE EndOfCluster;
+
+ PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+ return;
+ }
+
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+
+ for (;;) {
+ if (PointerPte->u.List.OneEntry) {
+ ClusterSize = 1;
+ } else {
+ PointerNextPte = PointerPte + 1;
+ ClusterSize = PointerNextPte->u.List.NextEntry;
+ }
+
+ EndOfCluster = PointerPte + (ClusterSize - 1);
+
+ DbgPrint("System Pte at %lx for %lx entries (%lx)\n",PointerPte,
+ ClusterSize, EndOfCluster);
+
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+ break;
+ }
+
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ }
+ return;
+}
+
+ULONG
+MiCountFreeSystemPtes (
+ IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType
+ )
+
+{
+ PMMPTE PointerPte;
+ PMMPTE PointerNextPte;
+ ULONG ClusterSize;
+ PMMPTE EndOfCluster;
+ ULONG FreeCount = 0;
+
+ PointerPte = &MmFirstFreeSystemPte[SystemPtePoolType];
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+ return 0;
+ }
+
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+
+ for (;;) {
+ if (PointerPte->u.List.OneEntry) {
+ ClusterSize = 1;
+ } else {
+ PointerNextPte = PointerPte + 1;
+ ClusterSize = PointerNextPte->u.List.NextEntry;
+ }
+
+ FreeCount += ClusterSize;
+
+ EndOfCluster = PointerPte + (ClusterSize - 1);
+
+ if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) {
+ break;
+ }
+
+ PointerPte = MmSystemPteBase + PointerPte->u.List.NextEntry;
+ }
+ return FreeCount;
+}
+
+#endif //DBG
diff --git a/private/ntos/mm/umapview.c b/private/ntos/mm/umapview.c
new file mode 100644
index 000000000..4798121a2
--- /dev/null
+++ b/private/ntos/mm/umapview.c
@@ -0,0 +1,660 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ Umapview.c
+
+Abstract:
+
+ This module contains the routines which implement the
+ NtUnmapViewOfSection service.
+
+Author:
+
+ Lou Perazzoli (loup) 22-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,NtUnmapViewOfSection)
+#pragma alloc_text(PAGE,MmUnmapViewOfSection)
+#endif
+
+
+NTSTATUS
+NtUnmapViewOfSection(
+ IN HANDLE ProcessHandle,
+ IN PVOID BaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a previously created view to a section.
+
+Arguments:
+
+ ProcessHandle - Supplies an open handle to a process object.
+
+ BaseAddress - Supplies the base address of the view.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PEPROCESS Process;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+
+ PAGED_CODE();
+
+ PreviousMode = KeGetPreviousMode();
+
+ if ((PreviousMode == UserMode) && (BaseAddress > MM_HIGHEST_USER_ADDRESS)) {
+ return STATUS_NOT_MAPPED_VIEW;
+ }
+
+ Status = ObReferenceObjectByHandle ( ProcessHandle,
+ PROCESS_VM_OPERATION,
+ PsProcessType,
+ PreviousMode,
+ (PVOID *)&Process,
+ NULL );
+
+ if (!NT_SUCCESS(Status)) {
+ return Status;
+ }
+
+ Status = MmUnmapViewOfSection ( Process, BaseAddress );
+ ObDereferenceObject (Process);
+
+ return Status;
+}
+
+NTSTATUS
+MmUnmapViewOfSection(
+ IN PEPROCESS Process,
+ IN PVOID BaseAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function unmaps a previously created view to a section.
+
+Arguments:
+
+ Process - Supplies a referenced pointer to a process object.
+
+ BaseAddress - Supplies the base address of the view.
+
+Return Value:
+
+ Returns the status
+
+ TBS
+
+
+--*/
+
+{
+ PMMVAD Vad;
+ PMMVAD PreviousVad;
+ PMMVAD NextVad;
+ ULONG RegionSize;
+ PVOID UnMapImageBase;
+ NTSTATUS status;
+
+ PAGED_CODE();
+
+ UnMapImageBase = NULL;
+
+ //
+ // If the specified process is not the current process, attach
+ // to the specified process.
+ //
+
+
+ KeAttachProcess (&Process->Pcb);
+
+ //
+ // Get the address creation mutex to block multiple threads from
+ // creating or deleting address space at the same time and
+ // get the working set mutex so virtual address descriptors can
+ // be inserted and walked.
+ // Raise IRQL to block APCs.
+ //
+ // Get the working set mutex, no page faults allowed for now until
+ // working set mutex released.
+ //
+
+
+ LOCK_WS_AND_ADDRESS_SPACE (Process);
+
+ //
+ // Make sure the address space was not deleted, if so, return an error.
+ //
+
+ if (Process->AddressSpaceDeleted != 0) {
+ status = STATUS_PROCESS_IS_TERMINATING;
+ goto ErrorReturn;
+ }
+
+ //
+ // Find the associated vad.
+ //
+
+ Vad = MiLocateAddress (BaseAddress);
+
+ if ((Vad == (PMMVAD)NULL) || (Vad->u.VadFlags.PrivateMemory)) {
+
+ //
+ // No Virtual Address Descriptor located for Base Address.
+ //
+
+ status = STATUS_NOT_MAPPED_VIEW;
+ goto ErrorReturn;
+ }
+
+ if (Vad->u.VadFlags.NoChange == 1) {
+
+ //
+ // An attempt is being made to delete a secured VAD, check
+ // to see if this deletion is allowed.
+ //
+
+ status = MiCheckSecuredVad ((PMMVAD)Vad,
+ Vad->StartingVa,
+ 1,
+ MM_SECURE_DELETE_CHECK);
+
+ if (!NT_SUCCESS (status)) {
+ goto ErrorReturn;
+ }
+ }
+
+ //
+ // If this Vad is for an image section, then
+ // get the base address of the section
+ //
+
+ if ((Vad->u.VadFlags.ImageMap == 1) && (Process == PsGetCurrentProcess())) {
+ UnMapImageBase = Vad->StartingVa;
+ }
+
+ RegionSize = 1 + (ULONG)Vad->EndingVa - (ULONG)Vad->StartingVa;
+
+ PreviousVad = MiGetPreviousVad (Vad);
+ NextVad = MiGetNextVad (Vad);
+
+ MiRemoveVad (Vad);
+
+ //
+ // Return commitment for page table pages if possibible.
+ //
+
+ MiReturnPageTablePageCommitment (Vad->StartingVa,
+ Vad->EndingVa,
+ Process,
+ PreviousVad,
+ NextVad);
+
+ MiRemoveMappedView (Process, Vad);
+
+ ExFreePool (Vad);
+
+ //
+ // Update the current virtual size in the process header.
+ //
+
+ Process->VirtualSize -= RegionSize;
+ status = STATUS_SUCCESS;
+
+ErrorReturn:
+
+ UNLOCK_WS (Process);
+ UNLOCK_ADDRESS_SPACE (Process);
+
+ if ( UnMapImageBase ) {
+ DbgkUnMapViewOfSection(UnMapImageBase);
+ }
+ KeDetachProcess();
+
+ return status;
+}
+
+VOID
+MiRemoveMappedView (
+ IN PEPROCESS CurrentProcess,
+ IN PMMVAD Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes the mapping from the current process's
+ address space.
+
+Arguments:
+
+ Process - Supplies a referenced pointer to the currnt process object.
+
+ Vad - Supplies the VAD which maps the view.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ APC level, working set mutex and address creation mutex held.
+
+ NOTE: THE WORKING SET MUTEXS MAY BE RELEASED THEN REACQUIRED!!!!
+
+--*/
+
+{
+ KIRQL OldIrql;
+ BOOLEAN DereferenceSegment = FALSE;
+ PCONTROL_AREA ControlArea;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE LastPte;
+ ULONG PageTableOffset;
+ ULONG PdePage;
+ PKEVENT PurgeEvent = NULL;
+ PVOID TempVa;
+ BOOLEAN DeleteOnClose = FALSE;
+ MMPTE_FLUSH_LIST PteFlushList;
+
+ ControlArea = Vad->ControlArea;
+
+ if (Vad->u.VadFlags.PhysicalMapping == 1) {
+
+ if (Vad->Banked) {
+ ExFreePool (Vad->Banked);
+ }
+
+#ifdef LARGE_PAGES
+ if (Vad->u.VadFlags.LargePages == 1) {
+
+ //
+ // Delete the subsection allocated to hold the large pages.
+ //
+
+ ExFreePool (Vad->FirstPrototypePte);
+ Vad->FirstPrototypePte = NULL;
+ KeFlushEntireTb (TRUE, FALSE);
+ LOCK_PFN (OldIrql);
+ } else {
+
+#endif //LARGE_PAGES
+
+ //
+ // This is a physical memory view. The pages map physical memory
+ // and are not accounted for in the working set list or in the PFN
+ // database.
+ //
+
+ //
+ // Set count so only flush entire TB operations are performed.
+ //
+
+ PteFlushList.Count = MM_MAXIMUM_FLUSH_COUNT;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Remove the PTES from the address space.
+ //
+
+ PointerPde = MiGetPdeAddress (Vad->StartingVa);
+ PdePage = PointerPde->u.Hard.PageFrameNumber;
+ PointerPte = MiGetPteAddress (Vad->StartingVa);
+ LastPte = MiGetPteAddress (Vad->EndingVa);
+ PageTableOffset = MiGetPteOffset( PointerPte );
+
+ while (PointerPte <= LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+ PageTableOffset = MiGetPteOffset( PointerPte );
+ PdePage = PointerPde->u.Hard.PageFrameNumber;
+ }
+
+ *PointerPte = ZeroPte;
+ MiDecrementShareAndValidCount (PdePage);
+
+ //
+ // Decrement the count of non-zero page table entires for this
+ // page table.
+ //
+
+ MmWorkingSetList->UsedPageTableEntries[PageTableOffset] -= 1;
+ ASSERT (MmWorkingSetList->UsedPageTableEntries[PageTableOffset]
+ < PTE_PER_PAGE);
+
+ //
+ // If all the entries have been eliminated from the previous
+ // page table page, delete the page table page itself.
+ //
+
+ if (MmWorkingSetList->UsedPageTableEntries[PageTableOffset] ==
+ 0) {
+
+ TempVa = MiGetVirtualAddressMappedByPte(PointerPde);
+
+ PteFlushList.Count = MM_MAXIMUM_FLUSH_COUNT;
+
+ MiDeletePte (PointerPde,
+ TempVa,
+ FALSE,
+ CurrentProcess,
+ (PMMPTE)NULL,
+ &PteFlushList);
+
+ }
+ PointerPte += 1;
+ }
+ KeFlushEntireTb (TRUE, FALSE);
+#ifdef LARGE_PAGES
+ }
+#endif //LARGE_PAGES
+ } else {
+
+ LOCK_PFN (OldIrql);
+ MiDeleteVirtualAddresses (Vad->StartingVa,
+ Vad->EndingVa,
+ FALSE,
+ Vad);
+ }
+
+ //
+ // Decrement the count of the number of views for the
+ // Segment object. This requires the PFN mutex to be held (it is already).
+ //
+
+ ControlArea->NumberOfMappedViews -= 1;
+ ControlArea->NumberOfUserReferences -= 1;
+
+ //
+ // Check to see if the control area (segment) should be deleted.
+ // This routine releases the PFN lock.
+ //
+
+ MiCheckControlArea (ControlArea, CurrentProcess, OldIrql);
+
+ return;
+}
+
+VOID
+MiPurgeImageSection (
+ IN PCONTROL_AREA ControlArea,
+ IN PEPROCESS Process OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates subsections within an image section that
+ contain global memory and resets the global memory back to
+ the initial subsection contents.
+
+ Note, that for this routine to be called the section is not
+ referenced nor is it mapped in any process.
+
+Arguments:
+
+ ControlArea - Supplies a pointer to the control area for the section.
+
+ Process - Supplies a pointer to the process IFF the working set mutex
+ is held, else NULL is supplied.
+
+Return Value:
+
+ None.
+
+Environment:
+ PFN LOCK held.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPFN Pfn1;
+ MMPTE PteContents;
+ MMPTE NewContents;
+ MMPTE NewContentsDemandZero;
+ KIRQL OldIrql = APC_LEVEL;
+ ULONG i;
+ ULONG SizeOfRawData;
+ ULONG OffsetIntoSubsection;
+ PSUBSECTION Subsection;
+#if DBG
+ ULONG DelayCount = 0;
+#endif //DBG
+
+
+ i = ControlArea->NumberOfSubsections;
+ Subsection = (PSUBSECTION)(ControlArea + 1);
+
+ //
+ // Loop through all the subsections
+
+ while (i > 0) {
+
+ if (Subsection->u.SubsectionFlags.GlobalMemory == 1) {
+
+ NewContents.u.Long = 0;
+ NewContentsDemandZero.u.Long = 0;
+ SizeOfRawData = 0;
+ OffsetIntoSubsection = 0;
+
+ //
+ // Purge this section.
+ //
+
+ if (Subsection->StartingSector != 0) {
+
+ //
+ // This is a not a demand zero section.
+ //
+
+ NewContents.u.Long =
+ (ULONG)MiGetSubsectionAddressForPte(Subsection);
+ NewContents.u.Soft.Prototype = 1;
+ SizeOfRawData = ((Subsection->EndingSector << MMSECTOR_SHIFT) |
+ Subsection->u.SubsectionFlags.SectorEndOffset) -
+ (Subsection->StartingSector << MMSECTOR_SHIFT);
+ }
+
+ NewContents.u.Soft.Protection =
+ Subsection->u.SubsectionFlags.Protection;
+ NewContentsDemandZero.u.Soft.Protection =
+ NewContents.u.Soft.Protection;
+
+ PointerPte = Subsection->SubsectionBase;
+ LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection];
+ ControlArea = Subsection->ControlArea;
+
+ MiMakeSystemAddressValidPfnWs (PointerPte, Process);
+
+ while (PointerPte < LastPte) {
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ //
+ // We are on a page boundary, make sure this PTE is resident.
+ //
+
+ MiMakeSystemAddressValidPfnWs (PointerPte, Process);
+ }
+
+ PteContents = *PointerPte;
+ if (PteContents.u.Long == 0) {
+
+ //
+ // No more valid PTEs to deal with.
+ //
+
+ break;
+ }
+
+ ASSERT (PteContents.u.Hard.Valid == 0);
+
+ if ((PteContents.u.Soft.Prototype == 0) &&
+ (PteContents.u.Soft.Transition == 1)) {
+
+ //
+ // The prototype PTE is in transition format.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);
+
+ //
+ // If the prototype PTE is no longer pointing to
+ // the original image page (not in protopte format),
+ // or has been modified, remove it from memory.
+ //
+
+ if ((Pfn1->u3.e1.Modified == 1) ||
+ (Pfn1->OriginalPte.u.Soft.Prototype == 0)) {
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+
+ //
+ // This is a transition PTE which has been
+ // modified or is no longer in protopte format.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount != 0) {
+
+ //
+ // There must be an I/O in progress on this
+ // page. Wait for the I/O operation to complete.
+ //
+
+ UNLOCK_PFN (OldIrql);
+
+ KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+
+ //
+ // Redo the loop.
+ //
+#if DBG
+ if ((DelayCount % 1024) == 0) {
+ DbgPrint("MMFLUSHSEC: waiting for i/o to complete PFN %lx\n",
+ Pfn1);
+ }
+ DelayCount += 1;
+#endif //DBG
+
+ LOCK_PFN (OldIrql);
+
+ MiMakeSystemAddressValidPfnWs (PointerPte, Process);
+ continue;
+ }
+
+ ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->OriginalPte.u.Soft.Transition == 1)));
+
+ *PointerPte = Pfn1->OriginalPte;
+ ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0);
+
+ //
+ // Only reduce the number of PFN references if
+ // the original PTE is still in prototype PTE
+ // format.
+ //
+
+ if (Pfn1->OriginalPte.u.Soft.Prototype == 1) {
+ ControlArea->NumberOfPfnReferences -= 1;
+ ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0);
+ }
+ MiUnlinkPageFromList (Pfn1);
+
+ MI_SET_PFN_DELETED (Pfn1);
+
+ MiDecrementShareCount (Pfn1->PteFrame);
+
+ //
+ // If the reference count for the page is zero, insert
+ // it into the free page list, otherwize leave it alone
+ // and when the reference count is decremented to zero
+ // the page will go to the free list.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ PteContents.u.Trans.PageFrameNumber);
+ }
+
+ *PointerPte = NewContents;
+ }
+ } else {
+
+ //
+ // Prototype PTE is not in transition format.
+ //
+
+ if (PteContents.u.Soft.Prototype == 0) {
+
+ //
+ // This refers to a page in the paging file,
+ // as it no longer references the image,
+ // restore the PTE contents to what they were
+ // at the initial image creation.
+ //
+
+ if (PteContents.u.Long != NoAccessPte.u.Long) {
+ MiReleasePageFileSpace (PteContents);
+ *PointerPte = NewContents;
+ }
+ }
+ }
+ PointerPte += 1;
+ OffsetIntoSubsection += PAGE_SIZE;
+
+ if (OffsetIntoSubsection >= SizeOfRawData) {
+
+ //
+ // There are trailing demand zero pages in this
+ // subsection, set the PTE contents to be demand
+ // zero for the remainder of the PTEs in this
+ // subsection.
+ //
+
+ NewContents = NewContentsDemandZero;
+ }
+
+#if DBG
+ DelayCount = 0;
+#endif //DBG
+
+ } //end while
+ }
+
+ i -=1;
+ Subsection += 1;
+ }
+
+ return;
+}
diff --git a/private/ntos/mm/up/makefile b/private/ntos/mm/up/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/mm/up/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/mm/up/sources b/private/ntos/mm/up/sources
new file mode 100644
index 000000000..6dca9c583
--- /dev/null
+++ b/private/ntos/mm/up/sources
@@ -0,0 +1,27 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+TARGETPATH=..\..\obj
+
+!include ..\sources.inc
diff --git a/private/ntos/mm/vadtree.c b/private/ntos/mm/vadtree.c
new file mode 100644
index 000000000..b5d214534
--- /dev/null
+++ b/private/ntos/mm/vadtree.c
@@ -0,0 +1,466 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ vadtree.c
+
+Abstract:
+
+ This module contains the routine to manipulate the virtual address
+ descriptor tree.
+
+Author:
+
+ Lou Perazzoli (loup) 19-May-1989
+
+Environment:
+
+ Kernel mode only, working set mutex held, APC's disabled.
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+VOID
+MiInsertVad (
+ IN PMMVAD Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a virtual address descriptor into the tree and
+ reorders the splay tree as appropriate.
+
+Arguments:
+
+ Vad - Supplies a pointer to a virtual address descriptor
+
+
+Return Value:
+
+ None - An exception is raised if quota is exceeded.
+
+--*/
+
+{
+ PMMADDRESS_NODE *Root;
+ PEPROCESS CurrentProcess;
+ ULONG RealCharge;
+ ULONG PageCharge = 0;
+ ULONG PagedQuotaCharged = 0;
+ ULONG FirstPage;
+ ULONG LastPage;
+ ULONG PagedPoolCharge;
+ ULONG ChargedPageFileQuota = FALSE;
+
+ ASSERT (Vad->EndingVa > Vad->StartingVa);
+
+ CurrentProcess = PsGetCurrentProcess();
+ //ASSERT (KeReadStateMutant (&CurrentProcess->WorkingSetLock) == 0);
+
+ //
+ // Commit charge of MAX_COMMIT means don't charge quota.
+ //
+
+ if (Vad->u.VadFlags.CommitCharge != MM_MAX_COMMIT) {
+
+ //
+ // Charge quota for the nonpaged pool for the VAD. This is
+ // done here rather than by using ExAllocatePoolWithQuota
+ // so the process object is not referenced by the quota charge.
+ //
+
+ PsChargePoolQuota (CurrentProcess, NonPagedPool, sizeof(MMVAD));
+
+ try {
+
+ //
+ // Charge quota for the prototype PTEs if this is a mapped view.
+ //
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != NULL)) {
+ PagedPoolCharge =
+ ((ULONG)Vad->EndingVa - (ULONG)Vad->StartingVa) >>
+ (PAGE_SHIFT - PTE_SHIFT);
+ PsChargePoolQuota (CurrentProcess, PagedPool, PagedPoolCharge);
+ PagedQuotaCharged = PagedPoolCharge;
+ }
+
+ //
+ // Add in the charge for page table pages.
+ //
+
+ FirstPage = MiGetPdeOffset (Vad->StartingVa);
+ LastPage = MiGetPdeOffset (Vad->EndingVa);
+
+ while (FirstPage <= LastPage) {
+
+ if (!MI_CHECK_BIT (MmWorkingSetList->CommittedPageTables,
+ FirstPage)) {
+ PageCharge += 1;
+ }
+ FirstPage += 1;
+ }
+
+ RealCharge = Vad->u.VadFlags.CommitCharge + PageCharge;
+
+ if (RealCharge != 0) {
+
+ MiChargePageFileQuota (RealCharge, CurrentProcess);
+ ChargedPageFileQuota = TRUE;
+
+#if 0 //commented out so page file quota is meaningful.
+ if (Vad->u.VadFlags.PrivateMemory == 0) {
+
+ if ((Vad->ControlArea->FilePointer == NULL) &&
+ (Vad->u.VadFlags.PhysicalMapping == 0)) {
+
+ //
+ // Don't charge commitment for the page file space
+ // occupied by a page file section. This will be
+ // charged as the shared memory is committed.
+ //
+
+ RealCharge -= BYTES_TO_PAGES ((ULONG)Vad->EndingVa -
+ (ULONG)Vad->StartingVa);
+ }
+ }
+#endif //0
+ MiChargeCommitment (RealCharge, CurrentProcess);
+ CurrentProcess->CommitCharge += RealCharge;
+ }
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // Return any quotas charged thus far.
+ //
+
+ PsReturnPoolQuota (CurrentProcess, NonPagedPool, sizeof(MMVAD));
+
+ if (PagedQuotaCharged != 0) {
+ PsReturnPoolQuota (CurrentProcess, PagedPool, PagedPoolCharge);
+ }
+
+ if (ChargedPageFileQuota) {
+
+ MiReturnPageFileQuota (RealCharge,
+ CurrentProcess);
+ }
+
+ ExRaiseStatus (GetExceptionCode());
+ }
+
+ if (PageCharge != 0) {
+
+ //
+ // Since the committment was successful, charge the page
+ // table pages.
+ //
+
+ FirstPage = MiGetPdeOffset (Vad->StartingVa);
+
+ while (FirstPage <= LastPage) {
+
+ if (!MI_CHECK_BIT (MmWorkingSetList->CommittedPageTables,
+ FirstPage)) {
+ MI_SET_BIT (MmWorkingSetList->CommittedPageTables,
+ FirstPage);
+ MmWorkingSetList->NumberOfCommittedPageTables += 1;
+ ASSERT (MmWorkingSetList->NumberOfCommittedPageTables <
+ PDE_PER_PAGE);
+ }
+ FirstPage += 1;
+ }
+ }
+ }
+
+ Root = (PMMADDRESS_NODE *)&CurrentProcess->VadRoot;
+
+ //
+ // Set the hint field in the process to this Vad.
+ //
+
+ CurrentProcess->VadHint = Vad;
+
+ if (CurrentProcess->VadFreeHint != NULL) {
+ if (((ULONG)((PMMVAD)CurrentProcess->VadFreeHint)->EndingVa + X64K) >=
+ (ULONG)Vad->StartingVa) {
+ CurrentProcess->VadFreeHint = Vad;
+ }
+ }
+
+ MiInsertNode ( (PMMADDRESS_NODE)Vad, Root);
+ return;
+}
+
+VOID
+MiRemoveVad (
+ IN PMMVAD Vad
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes a virtual address descriptor from the tree and
+ reorders the splay tree as appropriate. If any quota or commitment
+ was charged by the VAD (as indicated by the CommitCharge field) it
+ is released.
+
+Arguments:
+
+ Vad - Supplies a pointer to a virtual address descriptor.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PMMADDRESS_NODE *Root;
+ PEPROCESS CurrentProcess;
+ ULONG RealCharge;
+ PLIST_ENTRY Next;
+ PMMSECURE_ENTRY Entry;
+
+ CurrentProcess = PsGetCurrentProcess();
+
+
+ //
+ // Commit charge of MAX_COMMIT means don't charge quota.
+ //
+
+ if (Vad->u.VadFlags.CommitCharge != MM_MAX_COMMIT) {
+
+ //
+ // Return the quota charge to the process.
+ //
+
+ PsReturnPoolQuota (CurrentProcess, NonPagedPool, sizeof(MMVAD));
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != NULL)) {
+ PsReturnPoolQuota (CurrentProcess,
+ PagedPool,
+ ((ULONG)Vad->EndingVa - (ULONG)Vad->StartingVa) >> (PAGE_SHIFT - PTE_SHIFT));
+ }
+
+ RealCharge = Vad->u.VadFlags.CommitCharge;
+
+ if (RealCharge != 0) {
+
+ MiReturnPageFileQuota (RealCharge, CurrentProcess);
+
+ if ((Vad->u.VadFlags.PrivateMemory == 0) &&
+ (Vad->ControlArea != NULL)) {
+
+#if 0 //commented out so page file quota is meaningful.
+ if (Vad->ControlArea->FilePointer == NULL) {
+
+ //
+ // Don't release commitment for the page file space
+ // occupied by a page file section. This will be charged
+ // as the shared memory is committed.
+ //
+
+ RealCharge -= BYTES_TO_PAGES ((ULONG)Vad->EndingVa -
+ (ULONG)Vad->StartingVa);
+ }
+#endif
+ }
+
+ MiReturnCommitment (RealCharge);
+ CurrentProcess->CommitCharge -= RealCharge;
+ }
+ }
+
+ if (Vad == CurrentProcess->VadFreeHint) {
+ CurrentProcess->VadFreeHint = MiGetPreviousVad (Vad);
+ }
+
+ Root = (PMMADDRESS_NODE *)&CurrentProcess->VadRoot;
+
+ MiRemoveNode ( (PMMADDRESS_NODE)Vad, Root);
+
+ if (Vad->u.VadFlags.NoChange) {
+ if (Vad->u2.VadFlags2.MultipleSecured) {
+
+ //
+ // Free the oustanding pool allocations.
+ //
+
+ Next = Vad->u3.List.Flink;
+ do {
+ Entry = CONTAINING_RECORD( Next,
+ MMSECURE_ENTRY,
+ List);
+
+ Next = Entry->List.Flink;
+ ExFreePool (Entry);
+ } while (Next != &Vad->u3.List);
+ }
+ }
+
+ //
+ // If the VadHint was the removed Vad, change the Hint.
+
+ if (CurrentProcess->VadHint == Vad) {
+ CurrentProcess->VadHint = CurrentProcess->VadRoot;
+ }
+
+ return;
+}
+
+PMMVAD
+FASTCALL
+MiLocateAddress (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ The function locates the virtual address descriptor which describes
+ a given address.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to locate a descriptor
+ for.
+
+Return Value:
+
+ Returns a pointer to the virtual address descriptor which contains
+ the supplied virtual address or NULL if none was located.
+
+--*/
+
+{
+ PMMVAD FoundVad;
+ PEPROCESS CurrentProcess;
+
+ CurrentProcess = PsGetCurrentProcess();
+
+ //ASSERT (KeReadStateMutant (&CurrentProcess->WorkingSetLock) == 0);
+
+ if (CurrentProcess->VadHint == NULL) {
+ return NULL;
+ }
+
+ if ((VirtualAddress >= ((PMMADDRESS_NODE)CurrentProcess->VadHint)->StartingVa) &&
+ (VirtualAddress <= ((PMMADDRESS_NODE)CurrentProcess->VadHint)->EndingVa)) {
+
+ return (PMMVAD)CurrentProcess->VadHint;
+ }
+
+ FoundVad = (PMMVAD)MiLocateAddressInTree ( VirtualAddress,
+ (PMMADDRESS_NODE *)&(CurrentProcess->VadRoot));
+
+ if (FoundVad != NULL) {
+ CurrentProcess->VadHint = (PVOID)FoundVad;
+ }
+ return FoundVad;
+}
+
+PVOID
+MiFindEmptyAddressRange (
+ IN ULONG SizeOfRange,
+ IN ULONG Alignment,
+ IN ULONG QuickCheck
+ )
+
+/*++
+
+Routine Description:
+
+ The function examines the virtual address descriptors to locate
+ an unused range of the specified size and returns the starting
+ address of the range.
+
+Arguments:
+
+ SizeOfRange - Supplies the size in bytes of the range to locate.
+
+ Alignment - Supplies the alignment for the address. Must be
+ a power of 2 and greater than the page_size.
+
+ QuickCheck - Supplies a zero if a quick check for free memory
+ after the VadFreeHint exists, non-zero if checking
+ should start at the lowest address.
+
+Return Value:
+
+ Returns the starting address of a suitable range.
+
+--*/
+
+{
+ PMMVAD NextVad;
+ PMMVAD FreeHint;
+ PEPROCESS CurrentProcess;
+
+ CurrentProcess = PsGetCurrentProcess();
+ //ASSERT (KeReadStateMutant (&CurrentProcess->WorkingSetLock) == 0);
+
+ FreeHint = CurrentProcess->VadFreeHint;
+ if ((QuickCheck == 0) && (FreeHint != NULL)) {
+ NextVad = MiGetNextVad (FreeHint);
+ if (NextVad == NULL) {
+
+ if (SizeOfRange <
+ (((ULONG)MM_HIGHEST_USER_ADDRESS + 1) -
+ (ULONG)MI_ROUND_TO_SIZE(FreeHint->EndingVa, Alignment))) {
+ return (PMMADDRESS_NODE)MI_ROUND_TO_SIZE(FreeHint->EndingVa,
+ Alignment);
+ }
+ } else {
+
+ if (SizeOfRange <
+ ((ULONG)NextVad->StartingVa -
+ (ULONG)MI_ROUND_TO_SIZE(FreeHint->EndingVa, Alignment))) {
+
+ //
+ // Check to ensure that the ending address aligned upwards
+ // is not greater than the starting address.
+ //
+
+ if ((ULONG)NextVad->StartingVa >
+ (ULONG)MI_ROUND_TO_SIZE(FreeHint->EndingVa,Alignment)) {
+ return (PMMADDRESS_NODE)MI_ROUND_TO_SIZE(FreeHint->EndingVa,
+ Alignment);
+ }
+ }
+ }
+ }
+
+ return (PMMVAD)MiFindEmptyAddressRangeInTree (
+ SizeOfRange,
+ Alignment,
+ (PMMADDRESS_NODE)(CurrentProcess->VadRoot),
+ (PMMADDRESS_NODE *)&CurrentProcess->VadFreeHint);
+
+}
+
+#if DBG
+VOID
+VadTreeWalk (
+ PMMVAD Start
+ )
+
+{
+ Start;
+ NodeTreeWalk ( (PMMADDRESS_NODE)(PsGetCurrentProcess()->VadRoot));
+ return;
+}
+#endif //DBG
+
diff --git a/private/ntos/mm/wrtfault.c b/private/ntos/mm/wrtfault.c
new file mode 100644
index 000000000..d2227d4b8
--- /dev/null
+++ b/private/ntos/mm/wrtfault.c
@@ -0,0 +1,400 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ wrtfault.c
+
+Abstract:
+
+ This module contains the copy on write routine for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+NTSTATUS
+FASTCALL
+MiCopyOnWrite (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs a copy on write operation for the specified
+ virtual address.
+
+Arguments:
+
+ FaultingAddress - Supplies the virtual address which caused the
+ fault.
+
+ PointerPte - Supplies the pointer to the PTE which caused the
+ page fault.
+
+
+Return Value:
+
+ Returns the status of the fault handling operation. Can be one of:
+ - Success.
+ - In-page Error.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ ULONG NewPageIndex;
+ PULONG CopyTo;
+ PULONG CopyFrom;
+ KIRQL OldIrql;
+ PMMPFN Pfn1;
+// PMMPTE PointerPde;
+ PEPROCESS CurrentProcess;
+ PMMCLONE_BLOCK CloneBlock;
+ PMMCLONE_DESCRIPTOR CloneDescriptor;
+ PVOID VirtualAddress;
+ ULONG WorkingSetIndex;
+ BOOLEAN FakeCopyOnWrite = FALSE;
+
+ //
+ // This is called from MmAccessFault, the PointerPte is valid
+ // and the working set mutex ensures it cannot change state.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_WRITEFAULT) {
+ DbgPrint("**copy on write Fault va %lx proc %lx thread %lx\n",
+ (ULONG)FaultingAddress,
+ (ULONG)PsGetCurrentProcess(), (ULONG)PsGetCurrentThread());
+ }
+
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ MiFormatPte(PointerPte);
+ }
+#endif //DBG
+
+ ASSERT (PsGetCurrentProcess()->ForkInProgress == NULL);
+
+ //
+ // Capture the PTE contents to TempPte.
+ //
+
+ TempPte = *PointerPte;
+
+ //
+ // Check to see if this is a prototype PTE with copy on write
+ // enabled.
+ //
+
+ if (TempPte.u.Hard.CopyOnWrite == 0) {
+
+ //
+ // This is a fork page which is being made private in order
+ // to change the protection of the page.
+ // Do not make the page writable.
+ //
+
+ FakeCopyOnWrite = TRUE;
+ }
+
+ PageFrameIndex = TempPte.u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ CurrentProcess = PsGetCurrentProcess();
+
+ //
+ // Acquire the PFN mutex.
+ //
+
+ VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ WorkingSetIndex = MiLocateWsle (VirtualAddress, MmWorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // The page must be copied into a new page.
+ //
+
+ //
+ // If a fork operation is in progress and the faulting thread
+ // is not the thread performning the fork operation, block until
+ // the fork is completed.
+ //
+
+ if ((CurrentProcess->ForkInProgress != NULL) &&
+ (CurrentProcess->ForkInProgress != PsGetCurrentThread())) {
+ MiWaitForForkToComplete (CurrentProcess);
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+
+ if (MiEnsureAvailablePageOrWait(CurrentProcess, NULL)) {
+
+ //
+ // A wait operation was performed to obtain an available
+ // page and the working set mutex and pfn mutexes have
+ // been released and various things may have changed for
+ // the worse. Rather than examine all the conditions again,
+ // return and if things are still proper, the fault we
+ // be taken again.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+ }
+
+ //
+ // Increment the number of private pages.
+ //
+
+ CurrentProcess->NumberOfPrivatePages += 1;
+
+ MmInfoCounters.CopyOnWriteCount += 1;
+
+ //
+ // A page is being copied and made private, the global state of
+ // the shared page needs to be updated at this point on certain
+ // hardware. This is done by ORing the dirty bit into the modify bit in
+ // the PFN element.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
+
+ //
+ // This must be a prototype PTE. Perform the copy on write.
+ //
+
+#if DBG
+ if (Pfn1->u3.e1.PrototypePte == 0) {
+ DbgPrint("writefault - PTE indicates cow but not protopte\n");
+ MiFormatPte(PointerPte);
+ MiFormatPfn(Pfn1);
+ }
+#endif
+
+ CloneBlock = (PMMCLONE_BLOCK)Pfn1->PteAddress;
+
+ //
+ // If the share count for the physical page is one, the reference
+ // count is one, and the modified flag is clear the current page
+ // can be stolen to satisfy the copy on write.
+ //
+
+#if 0
+// COMMENTED OUT ****************************************************
+// COMMENTED OUT ****************************************************
+// COMMENTED OUT ****************************************************
+ if ((Pfn1->u2.ShareCount == 1) && (Pfn1->u3.e2.ReferenceCount == 1)
+ && (Pfn1->u3.e1.Modified == 0)) {
+
+ //
+ // Make this page a private page and return the prototype
+ // PTE into its original contents. The PFN database for
+ // this page now points to this PTE.
+ //
+
+ //
+ // Note that a page fault could occur referencing the prototype
+ // PTE, so we map it into hyperspace to prevent a fault.
+ //
+
+ MiRestorePrototypePte (Pfn1);
+
+ Pfn1->PteAddress = PointerPte;
+
+ //
+ // Get the protection for the page.
+ //
+
+ VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ WorkingSetIndex = MiLocateWsle (VirtualAddress, MmWorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX) {
+
+ Pfn1->OriginalPte.u.Long = 0;
+ Pfn1->OriginalPte.u.Soft.Protection =
+ MI_MAKE_PROTECT_NOT_WRITE_COPY (
+ MmWsle[WorkingSetIndex].u1.e1.Protection);
+
+ PointerPde = MiGetPteAddress(PointerPte);
+ Pfn1->u3.e1.PrototypePte = 0;
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+
+ if (!FakeCopyOnWrite) {
+
+ //
+ // If the page was Copy On Write and stolen or if the page was not
+ // copy on write, update the PTE setting both the dirty bit and the
+ // accessed bit. Note, that as this PTE is in the TB, the TB must
+ // be flushed.
+ //
+
+ MI_SET_PTE_DIRTY (TempPte);
+ TempPte.u.Hard.Write = 1;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ TempPte.u.Hard.CopyOnWrite = 0;
+ *PointerPte = TempPte;
+
+ //
+ // This is a copy on write operation, set the modify bit
+ // in the PFN database and deallocate any page file space.
+ //
+
+ Pfn1->u3.e1.Modified = 1;
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // This page is in page file format, deallocate the page
+ // file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is
+ // reserved, otherwise the space will be deallocated when
+ // the PTE is deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+ }
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty
+ // bit clear has been fetched into the TB. If it isn't flushed,
+ // another fault is generated as the dirty bit is not set in
+ // the cached TB entry.
+ //
+
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, FaultingAddress, TRUE);
+
+ CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);
+
+ if (CloneDescriptor != (PMMCLONE_DESCRIPTOR)NULL) {
+
+ //
+ // Decrement the reference count for the clone block,
+ // note that this could release and reacquire
+ // the mutexes.
+ //
+
+ MiDecrementCloneBlockReference ( CloneDescriptor,
+ CloneBlock,
+ CurrentProcess );
+ }
+
+ ] else [
+
+// ABOVE COMMENTED OUT ****************************************************
+// ABOVE COMMENTED OUT ****************************************************
+#endif
+
+ //
+ // Get a new page with the same color as this page.
+ //
+
+ NewPageIndex = MiRemoveAnyPage (
+ MI_GET_SECONDARY_COLOR (PageFrameIndex,
+ Pfn1));
+ MiInitializeCopyOnWritePfn (NewPageIndex, PointerPte, WorkingSetIndex);
+
+ UNLOCK_PFN (OldIrql);
+
+ CopyTo = (PULONG)MiMapPageInHyperSpace (NewPageIndex, &OldIrql);
+ CopyFrom = (PULONG)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ RtlCopyMemory ( CopyTo, CopyFrom, PAGE_SIZE);
+
+ MiUnmapPageInHyperSpace (OldIrql);
+
+ if (!FakeCopyOnWrite) {
+
+ //
+ // If the page was really a copy on write page, make it
+ // accessed, dirty and writable. Also, clear the copy-on-write
+ // bit in the PTE.
+ //
+
+ MI_SET_PTE_DIRTY (TempPte);
+ TempPte.u.Hard.Write = 1;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ TempPte.u.Hard.CopyOnWrite = 0;
+ TempPte.u.Hard.PageFrameNumber = NewPageIndex;
+
+ } else {
+
+ //
+ // The page was not really a copy on write, just change
+ // the frame field of the PTE.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NewPageIndex;
+ }
+
+ //
+ // If the modify bit is set in the PFN database for the
+ // page, the data cache must be flushed. This is due to the
+ // fact that this process may have been cloned and the cache
+ // still contains stale data destined for the page we are
+ // going to remove.
+ //
+
+ ASSERT (TempPte.u.Hard.Valid == 1);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Flush the TB entry for this page.
+ //
+
+ KeFlushSingleTb (FaultingAddress,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ //
+ // Decrement the share count for the page which was copied
+ // as this pte no longer refers to it.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+
+ CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);
+
+ if (CloneDescriptor != (PMMCLONE_DESCRIPTOR)NULL) {
+
+ //
+ // Decrement the reference count for the clone block,
+ // note that this could release and reacquire
+ // the mutexes.
+ //
+
+ MiDecrementCloneBlockReference ( CloneDescriptor,
+ CloneBlock,
+ CurrentProcess );
+ }
+
+ UNLOCK_PFN (OldIrql);
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/mm/wslist.c b/private/ntos/mm/wslist.c
new file mode 100644
index 000000000..a3d16284d
--- /dev/null
+++ b/private/ntos/mm/wslist.c
@@ -0,0 +1,2973 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ wslist.c
+
+Abstract:
+
+ This module contains routines which operate on the working
+ set list structure.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#define MM_SYSTEM_CACHE_THRESHOLD ((1024*1024) / PAGE_SIZE)
+
+extern ULONG MmMaximumWorkingSetSize;
+ULONG MmFaultsTakenToGoAboveMaxWs = 100;
+ULONG MmFaultsTakenToGoAboveMinWs = 16;
+
+ULONG MmSystemCodePage;
+ULONG MmSystemCachePage;
+ULONG MmPagedPoolPage;
+ULONG MmSystemDriverPage;
+
+#define MM_RETRY_COUNT 2
+
+VOID
+MiCheckWsleHash (
+ IN PMMWSL WorkingSetList
+ );
+
+VOID
+MiEliminateWorkingSetEntry (
+ IN ULONG WorkingSetIndex,
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn,
+ IN PMMWSLE Wsle
+ );
+
+ULONG
+MiAddWorkingSetPage (
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiRemoveWorkingSetPages (
+ IN PMMWSL WorkingSetList,
+ IN PMMSUPPORT WsInfo
+ );
+
+VOID
+MiCheckNullIndex (
+ IN PMMWSL WorkingSetList
+ );
+
+VOID
+MiDumpWsleInCacheBlock (
+ IN PMMPTE CachePte
+ );
+
+ULONG
+MiDumpPteInCacheBlock (
+ IN PMMPTE PointerPte
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGELK, MmAdjustWorkingSetSize)
+#pragma alloc_text(PAGELK, MiEmptyWorkingSet)
+#endif // ALLOC_PRAGMA
+
+
+ULONG
+MiLocateAndReserveWsle (
+ PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines the Working Set List for the current
+ process and locates an entry to contain a new page. If the
+ working set is not currently at its quota, the new page is
+ added without removing a page, if the working set it at its
+ quota a page is removed from the working set and the new
+ page added in its place.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Returns the working set index which is now reserved for the
+ next page to be added.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock. Pfn lock NOT held.
+
+--*/
+
+{
+ ULONG WorkingSetIndex;
+ ULONG NumberOfCandidates;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ PMMPTE PointerPte;
+ ULONG CurrentSize;
+ ULONG AvailablePageThreshold;
+ ULONG TheNextSlot;
+ ULONG QuotaIncrement;
+ LARGE_INTEGER CurrentTime;
+ KIRQL OldIrql;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+ AvailablePageThreshold = 0;
+
+ if (WsInfo == &MmSystemCacheWs) {
+ MM_SYSTEM_WS_LOCK_ASSERT();
+ AvailablePageThreshold = MM_SYSTEM_CACHE_THRESHOLD;
+ }
+
+ //
+ // Update page fault counts.
+ //
+
+ WsInfo->PageFaultCount += 1;
+ MmInfoCounters.PageFaultCount += 1;
+
+ //
+ // Determine if a page should be removed from the working set.
+ //
+
+recheck:
+
+ CurrentSize = WsInfo->WorkingSetSize;
+ ASSERT (CurrentSize <= WorkingSetList->LastInitializedWsle);
+
+ if (CurrentSize < WsInfo->MinimumWorkingSetSize) {
+
+ //
+ // Working set is below minimum, allow it to grow unconditionally.
+ //
+
+ AvailablePageThreshold = 0;
+ QuotaIncrement = 1;
+
+ } else if (WsInfo->AllowWorkingSetAdjustment == MM_FORCE_TRIM) {
+
+ //
+ // The working set manager cannot attach to this process
+ // to trim it. Force a trim now and update the working
+ // set managers fields properly to indicate a trim occurred.
+ //
+
+ MiTrimWorkingSet (20, WsInfo, TRUE);
+ KeQuerySystemTime (&CurrentTime);
+ WsInfo->LastTrimTime = CurrentTime;
+ WsInfo->LastTrimFaultCount = WsInfo->PageFaultCount;
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ WsInfo->AllowWorkingSetAdjustment = TRUE;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+
+ //
+ // Set the quota to the current size.
+ //
+
+ WorkingSetList->Quota = WsInfo->WorkingSetSize;
+ if (WorkingSetList->Quota < WsInfo->MinimumWorkingSetSize) {
+ WorkingSetList->Quota = WsInfo->MinimumWorkingSetSize;
+ }
+ goto recheck;
+
+ } else if (CurrentSize < WorkingSetList->Quota) {
+
+ //
+ // Working set is below quota, allow it to grow with few pages
+ // available.
+ //
+
+ AvailablePageThreshold = 10;
+ QuotaIncrement = 1;
+ } else if (CurrentSize < WsInfo->MaximumWorkingSetSize) {
+
+ //
+ // Working set is between min and max. Allow it to grow if enough
+ // faults have been taken since last adjustment.
+ //
+
+ if ((WsInfo->PageFaultCount - WsInfo->LastTrimFaultCount) <
+ MmFaultsTakenToGoAboveMinWs) {
+ AvailablePageThreshold = MmMoreThanEnoughFreePages + 200;
+ if (WsInfo->MemoryPriority == MEMORY_PRIORITY_FOREGROUND) {
+ AvailablePageThreshold -= 250;
+ }
+ } else {
+ AvailablePageThreshold = MmWsAdjustThreshold;
+ }
+ QuotaIncrement = MmWorkingSetSizeIncrement;
+ } else {
+
+ //
+ // Working set is above max.
+ //
+
+ if ((WsInfo->PageFaultCount - WsInfo->LastTrimFaultCount) <
+ (CurrentSize >> 3)) {
+ AvailablePageThreshold = MmMoreThanEnoughFreePages +200;
+ if (WsInfo->MemoryPriority == MEMORY_PRIORITY_FOREGROUND) {
+ AvailablePageThreshold -= 250;
+ }
+ } else {
+ AvailablePageThreshold += MmWsExpandThreshold;
+ }
+ QuotaIncrement = MmWorkingSetSizeExpansion;
+
+ if (CurrentSize > MM_MAXIMUM_WORKING_SET) {
+ AvailablePageThreshold = 0xffffffff;
+ QuotaIncrement = 1;
+ }
+ }
+
+ if ((!WsInfo->AddressSpaceBeingDeleted) && (AvailablePageThreshold != 0)) {
+ if ((MmAvailablePages <= AvailablePageThreshold) ||
+ (WsInfo->WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION)) {
+
+ //
+ // Toss a page out of the working set.
+ //
+
+ WorkingSetIndex = WorkingSetList->NextSlot;
+ TheNextSlot = WorkingSetIndex;
+ ASSERT (WorkingSetIndex <= WorkingSetList->LastEntry);
+ ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
+ NumberOfCandidates = 0;
+
+ for (; ; ) {
+
+ //
+ // Find a valid entry within the set.
+ //
+
+ WorkingSetIndex += 1;
+ if (WorkingSetIndex >= WorkingSetList->LastEntry) {
+ WorkingSetIndex = WorkingSetList->FirstDynamic;
+ }
+
+ if (Wsle[WorkingSetIndex].u1.e1.Valid != 0) {
+ PointerPte = MiGetPteAddress (
+ Wsle[WorkingSetIndex].u1.VirtualAddress);
+ if ((MI_GET_ACCESSED_IN_PTE(PointerPte) == 0) ||
+ (NumberOfCandidates > MM_WORKING_SET_LIST_SEARCH)) {
+
+ //
+ // Don't throw this guy out if he is the same one
+ // we did last time.
+ //
+
+ if ((WorkingSetIndex != TheNextSlot) &&
+ MiFreeWsle (WorkingSetIndex,
+ WsInfo,
+ PointerPte)) {
+
+ //
+ // This entry was removed.
+ //
+
+ WorkingSetList->NextSlot = WorkingSetIndex;
+ break;
+ }
+ }
+ MI_SET_ACCESSED_IN_PTE (PointerPte, 0);
+ NumberOfCandidates += 1;
+ }
+
+ if (WorkingSetIndex == TheNextSlot) {
+
+ //
+ // Entire working set list has been searched, increase
+ // the working set size.
+ //
+
+ break;
+ }
+ }
+ }
+ }
+ ASSERT (WsInfo->WorkingSetSize <= WorkingSetList->Quota);
+ WsInfo->WorkingSetSize += 1;
+
+ if (WsInfo->WorkingSetSize > WorkingSetList->Quota) {
+
+ //
+ // Add 1 to the quota and check boundary conditions.
+ //
+
+ WorkingSetList->Quota += QuotaIncrement;
+
+ WsInfo->LastTrimFaultCount = WsInfo->PageFaultCount;
+
+ if (WorkingSetList->Quota > WorkingSetList->LastInitializedWsle) {
+
+ //
+ // Add more pages to the working set list structure.
+ //
+
+ MiAddWorkingSetPage (WsInfo);
+ }
+ }
+
+ //
+ // Get the working set entry from the free list.
+ //
+
+ ASSERT (WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle);
+
+ WorkingSetIndex = WorkingSetList->FirstFree;
+ WorkingSetList->FirstFree = Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum += 1;
+ }
+
+ if (WsInfo->WorkingSetSize >= WsInfo->PeakWorkingSetSize) {
+ WsInfo->PeakWorkingSetSize = WsInfo->WorkingSetSize;
+ }
+
+ if (WorkingSetIndex > WorkingSetList->LastEntry) {
+ WorkingSetList->LastEntry = WorkingSetIndex;
+ }
+
+ //
+ // Mark the entry as not valid.
+ //
+
+ ASSERT (Wsle[WorkingSetIndex].u1.e1.Valid == 0);
+
+ return WorkingSetIndex;
+}
+
+ULONG
+MiRemovePageFromWorkingSet (
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn1,
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes the page mapped by the specified PTE from
+ the process's working set list.
+
+Arguments:
+
+ PointerPte - Supplies a pointer to the PTE mapping the page to
+ be removed from the working set list.
+
+ Pfn1 - Supplies a pointer to the PFN database element referred to
+ by the PointerPte.
+
+Return Value:
+
+ Returns TRUE if the specified page was locked in the working set,
+ FALSE otherwise.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set and pfn mutexes held.
+
+--*/
+
+{
+ ULONG WorkingSetIndex;
+ PVOID VirtualAddress;
+ ULONG Entry;
+ PVOID SwapVa;
+ MMWSLENTRY Locked;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ KIRQL OldIrql;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte);
+ WorkingSetIndex = MiLocateWsle (VirtualAddress,
+ WorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);
+ LOCK_PFN (OldIrql);
+ MiEliminateWorkingSetEntry (WorkingSetIndex,
+ PointerPte,
+ Pfn1,
+ Wsle);
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Check to see if this entry is locked in the working set
+ // or locked in memory.
+ //
+
+ Locked = Wsle[WorkingSetIndex].u1.e1;
+ MiRemoveWsle (WorkingSetIndex, WorkingSetList);
+
+ //
+ // Add this entry to the list of free working set entries
+ // and adjust the working set count.
+ //
+
+ MiReleaseWsle ((ULONG)WorkingSetIndex, WsInfo);
+
+ if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {
+
+ //
+ // This entry is locked.
+ //
+
+ WorkingSetList->FirstDynamic -= 1;
+
+ if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
+
+ SwapVa = Wsle[WorkingSetList->FirstDynamic].u1.VirtualAddress;
+ SwapVa = PAGE_ALIGN (SwapVa);
+
+ PointerPte = MiGetPteAddress (SwapVa);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+#if 0
+ Entry = MiLocateWsleAndParent (SwapVa,
+ &Parent,
+ WorkingSetList,
+ Pfn1->u1.WsIndex);
+
+ //
+ // Swap the removed entry with the last locked entry
+ // which is located at first dynamic.
+ //
+
+ MiSwapWslEntries (Entry, Parent, WorkingSetIndex, WorkingSetList);
+#endif //0
+
+ Entry = MiLocateWsle (SwapVa, WorkingSetList, Pfn1->u1.WsIndex);
+
+ MiSwapWslEntries (Entry, WorkingSetIndex, WsInfo);
+
+ }
+ return TRUE;
+ } else {
+ ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
+ }
+ return FALSE;
+}
+
+
+VOID
+MiReleaseWsle (
+ IN ULONG WorkingSetIndex,
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a previously reserved working set entry to
+ be reused. A release occurs when a page fault is retried due to
+ changes in PTEs and working sets during an I/O operation.
+
+Arguments:
+
+ WorkingSetIndex - Supplies the index of the working set entry to
+ release.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock held and PFN lock held.
+
+--*/
+
+{
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+#if DBG
+ if (WsInfo == &MmSystemCacheWs) {
+ MM_SYSTEM_WS_LOCK_ASSERT();
+ }
+#endif //DBG
+
+ ASSERT (WorkingSetIndex <= WorkingSetList->LastInitializedWsle);
+
+ //
+ // Put the entry on the free list and decrement the current
+ // size.
+ //
+
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+ Wsle[WorkingSetIndex].u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
+ WorkingSetList->FirstFree = WorkingSetIndex;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+ if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum -= 1;
+ }
+ WsInfo->WorkingSetSize -= 1;
+ return;
+
+}
+
+VOID
+MiUpdateWsle (
+ IN OUT PULONG DesiredIndex,
+ IN PVOID VirtualAddress,
+ PMMWSL WorkingSetList,
+ IN PMMPFN Pfn
+ )
+
+/*++
+
+Routine Description:
+
+ This routine updates a reserved working set entry to place it into
+ the valid state.
+
+Arguments:
+
+ DesiredIndex - Supplies the index of the working set entry to update.
+
+ VirtualAddress - Supplies the virtual address which the working set
+ entry maps.
+
+ WsInfo - Supples a pointer to the working set info block for the
+ process (or system cache).
+
+ Pfn - Supplies a pointer to the PFN element for the page.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock held and PFN lock held.
+
+--*/
+
+{
+ PMMWSLE Wsle;
+ ULONG Index;
+ ULONG WorkingSetIndex;
+
+ WorkingSetIndex = *DesiredIndex;
+ Wsle = WorkingSetList->Wsle;
+
+#if DBG
+ if (WorkingSetList == MmSystemCacheWorkingSetList) {
+ ASSERT ((VirtualAddress < (PVOID)PTE_BASE) ||
+ (VirtualAddress >= (PVOID)MM_SYSTEM_SPACE_START));
+ } else {
+ ASSERT (VirtualAddress < (PVOID)MM_SYSTEM_SPACE_START);
+ }
+ ASSERT (WorkingSetIndex >= WorkingSetList->FirstDynamic);
+#endif //DBG
+
+ if (WorkingSetList == MmSystemCacheWorkingSetList) {
+
+ MM_SYSTEM_WS_LOCK_ASSERT();
+
+ //
+ // count system space inserts and removals.
+ //
+
+ if (VirtualAddress < (PVOID)MM_SYSTEM_CACHE_START) {
+ MmSystemCodePage += 1;
+ } else if (VirtualAddress < MM_PAGED_POOL_START) {
+ MmSystemCachePage += 1;
+ } else if (VirtualAddress < MmNonPagedSystemStart) {
+ MmPagedPoolPage += 1;
+ } else {
+ MmSystemDriverPage += 1;
+ }
+ }
+
+ //
+ // Make the wsle valid, referring to the corresponding virtual
+ // page number.
+ //
+
+ //
+ // The value 0 is invalid. This is due to the fact that the working
+ // set lock is a process wide lock and two threads in different
+ // processes could be adding the same physical page to their working
+ // sets. Each one could see the WsIndex field in the PFN as 0, and
+ // set the direct bit. To solve this, the WsIndex field is set to
+ // the current thread pointer.
+ //
+
+ ASSERT (Pfn->u1.WsIndex != 0);
+
+#if DBG
+ if (Pfn->u1.WsIndex <= WorkingSetList->LastInitializedWsle) {
+ ASSERT ((PAGE_ALIGN(VirtualAddress) !=
+ PAGE_ALIGN(Wsle[Pfn->u1.WsIndex].u1.VirtualAddress)) ||
+ (Wsle[Pfn->u1.WsIndex].u1.e1.Valid == 0));
+ }
+#endif //DBG
+
+ Wsle[WorkingSetIndex].u1.VirtualAddress = VirtualAddress;
+ Wsle[WorkingSetIndex].u1.Long &= ~(PAGE_SIZE - 1);
+ Wsle[WorkingSetIndex].u1.e1.Valid = 1;
+
+ if (Pfn->u1.WsIndex == (ULONG)PsGetCurrentThread()) {
+
+ //
+ // Directly index into the WSL for this entry via the PFN database
+ // element.
+ //
+
+ Pfn->u1.WsIndex = WorkingSetIndex;
+ Wsle[WorkingSetIndex].u1.e1.Direct = 1;
+ return;
+
+ } else if (WorkingSetList->HashTable == NULL) {
+
+ //
+ // Try to insert at WsIndex.
+ //
+
+ Index = Pfn->u1.WsIndex;
+
+ if ((Index < WorkingSetList->LastInitializedWsle) &&
+ (Index > WorkingSetList->FirstDynamic) &&
+ (Index != WorkingSetIndex)) {
+
+ if (Wsle[Index].u1.e1.Valid) {
+
+ if (Wsle[Index].u1.e1.Direct) {
+
+ //
+ // Only move direct indexed entries.
+ //
+
+ PMMSUPPORT WsInfo;
+
+ if (Wsle == MmSystemCacheWsle) {
+ WsInfo = &MmSystemCacheWs;
+ } else {
+ WsInfo = &PsGetCurrentProcess()->Vm;
+ }
+
+ MiSwapWslEntries (Index, WorkingSetIndex, WsInfo);
+ WorkingSetIndex = Index;
+ }
+ } else {
+
+ //
+ // On free list, try to remove quickly without walking
+ // all the free pages.
+ //
+
+ ULONG FreeIndex;
+ MMWSLE Temp;
+
+ FreeIndex = 0;
+
+ if (WorkingSetList->FirstFree == Index) {
+ WorkingSetList->FirstFree = WorkingSetIndex;
+ Temp = Wsle[WorkingSetIndex];
+ Wsle[WorkingSetIndex] = Wsle[Index];
+ Wsle[Index] = Temp;
+ WorkingSetIndex = Index;
+ ASSERT (((Wsle[WorkingSetList->FirstFree].u1.Long >> MM_FREE_WSLE_SHIFT)
+ <= WorkingSetList->LastInitializedWsle) ||
+ ((Wsle[WorkingSetList->FirstFree].u1.Long >> MM_FREE_WSLE_SHIFT)
+ == WSLE_NULL_INDEX));
+ } else if (Wsle[Index - 1].u1.e1.Valid == 0) {
+ if ((Wsle[Index - 1].u1.Long >> MM_FREE_WSLE_SHIFT) == Index) {
+ FreeIndex = Index - 1;
+ }
+ } else if (Wsle[Index + 1].u1.e1.Valid == 0) {
+ if ((Wsle[Index + 1].u1.Long >> MM_FREE_WSLE_SHIFT) == Index) {
+ FreeIndex = Index + 1;
+ }
+ }
+ if (FreeIndex != 0) {
+
+ //
+ // Link the Wsle into the free list.
+ //
+
+ Temp = Wsle[WorkingSetIndex];
+ Wsle[FreeIndex].u1.Long = WorkingSetIndex << MM_FREE_WSLE_SHIFT;
+ Wsle[WorkingSetIndex] = Wsle[Index];
+ Wsle[Index] = Temp;
+ WorkingSetIndex = Index;
+
+ ASSERT (((Wsle[FreeIndex].u1.Long >> MM_FREE_WSLE_SHIFT)
+ <= WorkingSetList->LastInitializedWsle) ||
+ ((Wsle[FreeIndex].u1.Long >> MM_FREE_WSLE_SHIFT)
+ == WSLE_NULL_INDEX));
+ }
+
+ }
+ *DesiredIndex = WorkingSetIndex;
+
+ if (WorkingSetIndex > WorkingSetList->LastEntry) {
+ WorkingSetList->LastEntry = WorkingSetIndex;
+ }
+ }
+ }
+
+ //
+ // Insert the valid WSLE into the working set list tree.
+ //
+
+ MiInsertWsle (WorkingSetIndex, WorkingSetList);
+ return;
+}
+
+
+#if 0 //COMMENTED OUT!!!
+ULONG
+MiGetFirstFreeWsle (
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes the first entry from the WSLE free list and
+ updates the WSLIST structures.
+
+ NOTE: There must be an element on the free list!
+
+Arguments:
+
+ WsInfo - Supples a pointer to the working set info block for the
+ process (or system cache).
+
+Return Value:
+
+ Free WSLE.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock held.
+
+--*/
+
+{
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ ULONG WorkingSetIndex;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ //
+ // Get the working set entry from the free list.
+ //
+
+ ASSERT (WorkingSetList->FirstFree != WSLE_NULL_INDEX);
+
+ WorkingSetIndex = WorkingSetList->FirstFree;
+ WorkingSetList->FirstFree = Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT;
+
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ WsInfo->WorkingSetSize += 1;
+
+ if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum += 1;
+ }
+
+ if (WsInfo->WorkingSetSize >= WsInfo->PeakWorkingSetSize) {
+ WsInfo->PeakWorkingSetSize = WsInfo->WorkingSetSize;
+ }
+
+ if (WorkingSetIndex > WorkingSetList->LastEntry) {
+ WorkingSetList->LastEntry = WorkingSetIndex;
+ }
+
+ if (WsInfo->WorkingSetSize > WorkingSetList->Quota) {
+ WorkingSetList->Quota = WsInfo->WorkingSetSize;
+ }
+
+ //
+ // Mark the entry as not valid.
+ //
+
+ ASSERT (Wsle[WorkingSetIndex].u1.e1.Valid == 0);
+
+ return WorkingSetIndex;
+}
+#endif //0 COMMENTED OUT!!!
+
+VOID
+MiTakePageFromWorkingSet (
+ IN ULONG Entry,
+ IN PMMSUPPORT WsInfo,
+ IN PMMPTE PointerPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is a wrapper for MiFreeWsle that acquires the pfn
+ lock. Used by pagable code.
+
+Arguments:
+
+ same as free wsle.
+
+Return Value:
+
+ same as free wsle.
+
+Environment:
+
+ Kernel mode, PFN lock NOT held, working set lock held.
+
+--*/
+
+{
+ KIRQL OldIrql;
+//fixfix is this still needed?
+ MiFreeWsle (Entry, WsInfo, PointerPte);
+ return;
+}
+
+ULONG
+MiFreeWsle (
+ IN ULONG WorkingSetIndex,
+ IN PMMSUPPORT WsInfo,
+ IN PMMPTE PointerPte
+ )
+
+/*++
+
+Routine Description:
+
+ This routine frees the specified WSLE and decrements the share
+ count for the corresponding page, putting the PTE into a transition
+ state if the share count goes to 0.
+
+Arguments:
+
+ WorkingSetIndex - Supplies the index of the working set entry to free.
+
+ WsInfo - Supplies a pointer to the working set structure (process or
+ system cache).
+
+ PointerPte - Supplies a pointer to the PTE for the working set entry.
+
+Return Value:
+
+ Returns TRUE if the WSLE was removed, FALSE if it was not removed.
+ Pages with valid PTEs are not removed (i.e. page table pages
+ that contain valid or transition PTEs).
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock. Pfn lock NOT held.
+
+--*/
+
+{
+ PMMPFN Pfn1;
+ ULONG NumberOfCandidates = 0;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ KIRQL OldIrql;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+#if DBG
+ if (WsInfo == &MmSystemCacheWs) {
+ MM_SYSTEM_WS_LOCK_ASSERT();
+ }
+#endif //DBG
+
+ ASSERT (Wsle[WorkingSetIndex].u1.e1.Valid == 1);
+
+ //
+ // Check to see the located entry is elgible for removal.
+ //
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+
+ //
+ // Check to see if this is a page table with valid PTEs.
+ //
+ // Note, don't clear the access bit for page table pages
+ // with valid PTEs as this could cause an access trap fault which
+ // would not be handled (it is only handled for PTEs not PDEs).
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // If the PTE is page table page with non-zero share count or
+ // within the system cache with its reference count greater
+ // than 0, don't remove it.
+ //
+
+ if (WsInfo == &MmSystemCacheWs) {
+ if (Pfn1->u3.e2.ReferenceCount > 1) {
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+ } else {
+ if ((Pfn1->u2.ShareCount > 1) &&
+ (Pfn1->u3.e1.PrototypePte == 0)) {
+
+ ASSERT ((Wsle[WorkingSetIndex].u1.VirtualAddress >= (PVOID)PTE_BASE) &&
+ (Wsle[WorkingSetIndex].u1.VirtualAddress<= (PVOID)PDE_TOP));
+
+
+ //
+ // Don't remove page table pages from the working set until
+ // all transition pages have exited.
+ //
+
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+ }
+
+ //
+ // Found a candidate, remove the page from the working set.
+ //
+
+ MiEliminateWorkingSetEntry (WorkingSetIndex,
+ PointerPte,
+ Pfn1,
+ Wsle);
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Remove the working set entry from the working set tree.
+ //
+
+ MiRemoveWsle (WorkingSetIndex, WorkingSetList);
+
+ //
+ // Put the entry on the free list and decrement the current
+ // size.
+ //
+
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+ Wsle[WorkingSetIndex].u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
+ WorkingSetList->FirstFree = WorkingSetIndex;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum -= 1;
+ }
+ WsInfo->WorkingSetSize -= 1;
+
+#if 0
+ if ((WsInfo == &MmSystemCacheWs) &&
+ (Pfn1->u3.e1.Modified == 1)) {
+ MiDumpWsleInCacheBlock (PointerPte);
+ }
+#endif //0
+ return TRUE;
+}
+
+VOID
+MiInitializeWorkingSetList (
+ IN PEPROCESS CurrentProcess
+ )
+
+/*++
+
+Routine Description:
+
+ This routine initializes a process's working set to the empty
+ state.
+
+Arguments:
+
+ CurrentProcess - Supplies a pointer to the process to initialize.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled.
+
+--*/
+
+{
+ ULONG i;
+ PMMWSLE WslEntry;
+ ULONG CurrentEntry;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ ULONG NumberOfEntriesMapped;
+ ULONG CurrentVa;
+ ULONG WorkingSetPage;
+ MMPTE TempPte;
+ KIRQL OldIrql;
+
+ WslEntry = MmWsle;
+
+ //
+ // Initialize the temporary double mapping portion of hyperspace, if
+ // it has not already been done.
+ //
+ // Initialize the working set list control cells.
+ //
+
+ MmWorkingSetList->LastEntry = CurrentProcess->Vm.MinimumWorkingSetSize;
+ MmWorkingSetList->Quota = MmWorkingSetList->LastEntry;
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+ MmWorkingSetList->HashTable = NULL;
+ MmWorkingSetList->HashTableSize = 0;
+ MmWorkingSetList->Wsle = MmWsle;
+
+ //
+ // Fill in the reserved slots.
+ //
+
+ WslEntry->u1.Long = PDE_BASE;
+ WslEntry->u1.e1.Valid = 1;
+ WslEntry->u1.e1.LockedInWs = 1;
+ WslEntry->u1.e1.Direct = 1;
+
+ PointerPte = MiGetPteAddress (WslEntry->u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ Pfn1->u1.WsIndex = (ULONG)CurrentProcess;
+
+ //
+ // As this index is 0, don't set another zero into the WsIndex field.
+ //
+
+ // don't put it in the list. MiInsertWsle(0, MmWorkingSetList);
+
+ //
+ // Fill in page table page which maps hyper space.
+ //
+
+ WslEntry += 1;
+
+ WslEntry->u1.VirtualAddress = (PVOID)MiGetPteAddress (HYPER_SPACE);
+ WslEntry->u1.e1.Valid = 1;
+ WslEntry->u1.e1.LockedInWs = 1;
+ WslEntry->u1.e1.Direct = 1;
+
+ PointerPte = MiGetPteAddress (WslEntry->u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ Pfn1->u1.WsIndex = 1;
+
+ // MiInsertWsle(1, MmWorkingSetList);
+
+ //
+ // Fill in page which contains the working set list.
+ //
+
+ WslEntry += 1;
+
+ WslEntry->u1.VirtualAddress = (PVOID)MmWorkingSetList;
+ WslEntry->u1.e1.Valid = 1;
+ WslEntry->u1.e1.LockedInWs = 1;
+ WslEntry->u1.e1.Direct = 1;
+
+ PointerPte = MiGetPteAddress (WslEntry->u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ Pfn1->u1.WsIndex = 2;
+
+ // MiInsertWsle(2, MmWorkingSetList);
+
+ CurrentEntry = 3;
+
+ //
+ // Check to see if more pages are required in the working set list
+ // to map the current maximum working set size.
+ //
+
+ NumberOfEntriesMapped = ((PMMWSLE)((ULONG)WORKING_SET_LIST + PAGE_SIZE)) -
+ MmWsle;
+
+ if (CurrentProcess->Vm.MaximumWorkingSetSize >= NumberOfEntriesMapped) {
+
+ PointerPte = MiGetPteAddress (&MmWsle[0]);
+
+ CurrentVa = (ULONG)MmWorkingSetList + PAGE_SIZE;
+
+ //
+ // The working set requires more than a single page.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ do {
+
+ MiEnsureAvailablePageOrWait (NULL, NULL);
+
+ PointerPte += 1;
+ WorkingSetPage = MiRemoveZeroPage (
+ MI_PAGE_COLOR_PTE_PROCESS (PointerPte,
+ &CurrentProcess->NextPageColor));
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+
+ MiInitializePfn (WorkingSetPage, PointerPte, 1);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ WorkingSetPage,
+ MM_READWRITE,
+ PointerPte );
+
+ MI_SET_PTE_DIRTY (TempPte);
+ *PointerPte = TempPte;
+
+ WslEntry += 1;
+
+ WslEntry->u1.Long = CurrentVa;
+ WslEntry->u1.e1.Valid = 1;
+ WslEntry->u1.e1.LockedInWs = 1;
+ WslEntry->u1.e1.Direct = 1;
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+
+ ASSERT (Pfn1->u1.WsIndex == 0);
+ Pfn1->u1.WsIndex = CurrentEntry;
+
+ // MiInsertWsle(CurrentEntry, MmWorkingSetList);
+
+ CurrentEntry += 1;
+ CurrentVa += PAGE_SIZE;
+
+ NumberOfEntriesMapped += PAGE_SIZE / sizeof(MMWSLE);
+
+ } while (CurrentProcess->Vm.MaximumWorkingSetSize >= NumberOfEntriesMapped);
+
+ UNLOCK_PFN (OldIrql);
+ }
+
+ CurrentProcess->Vm.WorkingSetSize = CurrentEntry;
+ MmWorkingSetList->FirstFree = CurrentEntry;
+ MmWorkingSetList->FirstDynamic = CurrentEntry;
+ MmWorkingSetList->NextSlot = CurrentEntry;
+
+ //
+ // Initialize the following slots as free.
+ //
+
+ i = CurrentEntry + 1;
+ do {
+
+ //
+ // Build the free list, note that the first working
+ // set entries (CurrentEntry) are not on the free list.
+ // These entries are reserved for the pages which
+ // map the working set and the page which contains the PDE.
+ //
+
+ WslEntry += 1;
+ WslEntry->u1.Long = i << MM_FREE_WSLE_SHIFT;
+ i++;
+ } while (i <= NumberOfEntriesMapped);
+
+ WslEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
+
+ MmWorkingSetList->LastInitializedWsle =
+ NumberOfEntriesMapped - 1;
+
+ if (CurrentProcess->Vm.MaximumWorkingSetSize > ((1536*1024) >> PAGE_SHIFT)) {
+
+ //
+ // The working set list consists of more than a single page.
+ //
+
+ MiGrowWsleHash (&CurrentProcess->Vm, FALSE);
+ }
+
+ return;
+}
+
+NTSTATUS
+MmAdjustWorkingSetSize (
+ IN ULONG WorkingSetMinimum,
+ IN ULONG WorkingSetMaximum,
+ IN ULONG SystemCache
+ )
+
+/*++
+
+Routine Description:
+
+ This routine adjusts the current size of a process's working set
+ list. If the maximum value is above the current maximum, pages
+ are removed from the working set list.
+
+ An exception is raised if the limit cannot be granted. This
+ could occur if too many pages were locked in the process's
+ working set.
+
+ Note: if the minimum and maximum are both 0xffffffff, the working set
+ is purged, but the default sizes are not changed.
+
+Arguments:
+
+ WorkingSetMinimum - Supplies the new minimum working set size in bytes.
+
+ WorkingSetMaximum - Supplies the new maximum working set size in bytes.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, IRQL 0 or APC_LEVEL.
+
+--*/
+
+
+{
+ PEPROCESS CurrentProcess;
+ ULONG Entry;
+ ULONG SwapEntry;
+ ULONG CurrentEntry;
+ ULONG LastFreed;
+ PMMWSLE WslEntry;
+ PMMWSLE Wsle;
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ LONG i;
+ PMMPTE PointerPte;
+ PMMPTE Va;
+ ULONG NumberOfEntriesMapped;
+ NTSTATUS ReturnStatus;
+ PMMPFN Pfn1;
+ LONG PagesAbove;
+ LONG NewPagesAbove;
+ ULONG FreeTryCount = 0;
+ PMMSUPPORT WsInfo;
+ IN PMMWSL WorkingSetList;
+
+ //
+ // Get the working set lock and disable APCs.
+ //
+
+ if (SystemCache) {
+ WsInfo = &MmSystemCacheWs;
+ } else {
+ CurrentProcess = PsGetCurrentProcess ();
+ WsInfo = &CurrentProcess->Vm;
+ }
+
+ if (WorkingSetMinimum == 0) {
+ WorkingSetMinimum = WsInfo->MinimumWorkingSetSize;
+ }
+
+ if (WorkingSetMaximum == 0) {
+ WorkingSetMaximum = WsInfo->MaximumWorkingSetSize;
+ }
+
+ if ((WorkingSetMinimum == 0xFFFFFFFF) &&
+ (WorkingSetMaximum == 0xFFFFFFFF)) {
+ return MiEmptyWorkingSet (WsInfo);
+ }
+
+ WorkingSetMinimum = WorkingSetMinimum >> PAGE_SHIFT;
+ WorkingSetMaximum = WorkingSetMaximum >> PAGE_SHIFT;
+
+ if (WorkingSetMinimum > WorkingSetMaximum) {
+ return STATUS_BAD_WORKING_SET_LIMIT;
+ }
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ ReturnStatus = STATUS_SUCCESS;
+
+ if (SystemCache) {
+ LOCK_SYSTEM_WS (OldIrql2);
+ } else {
+ LOCK_WS (CurrentProcess);
+ }
+
+ if (WorkingSetMaximum > MmMaximumWorkingSetSize) {
+ WorkingSetMaximum = MmMaximumWorkingSetSize;
+ ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
+ }
+
+ if (WorkingSetMinimum > MmMaximumWorkingSetSize) {
+ WorkingSetMinimum = MmMaximumWorkingSetSize;
+ ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
+ }
+
+ if (WorkingSetMinimum < MmMinimumWorkingSetSize) {
+ WorkingSetMinimum = MmMinimumWorkingSetSize;
+ ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE;
+ }
+
+ //
+ // Make sure that the number of locked pages will not
+ // make the working set not fluid.
+ //
+
+ if ((WsInfo->VmWorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >=
+ WorkingSetMaximum) {
+ ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
+ goto Returns;
+ }
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ //
+ // Check to make sure ample resident phyiscal pages exist for
+ // this operation.
+ //
+
+ LOCK_PFN (OldIrql);
+
+ i = WorkingSetMinimum - WsInfo->MinimumWorkingSetSize;
+
+ if (i > 0) {
+
+ //
+ // New minimum working set is greater than the old one.
+ //
+
+ if ((MmResidentAvailablePages < i) ||
+ (MmAvailablePages < (20 + (i / (PAGE_SIZE / sizeof (MMWSLE)))))) {
+ UNLOCK_PFN (OldIrql);
+ ReturnStatus = STATUS_INSUFFICIENT_RESOURCES;
+ goto Returns;
+ }
+ }
+
+ //
+ // Adjust the number of resident pages up or down dependent on
+ // the size of the new minimum working set size verus the previous
+ // minimum size.
+ //
+
+ MmResidentAvailablePages -= i;
+
+ UNLOCK_PFN (OldIrql);
+
+ if (WsInfo->AllowWorkingSetAdjustment == FALSE) {
+ MmAllowWorkingSetExpansion ();
+ }
+
+ if (WorkingSetMaximum > WorkingSetList->LastInitializedWsle) {
+
+ do {
+
+ //
+ // The maximum size of the working set is being increased, check
+ // to ensure the proper number of pages are mapped to cover
+ // the complete working set list.
+ //
+
+ if (!MiAddWorkingSetPage (WsInfo)) {
+ WorkingSetMaximum = WorkingSetList->LastInitializedWsle - 1;
+ break;
+ }
+ } while (WorkingSetMaximum > WorkingSetList->LastInitializedWsle);
+
+ } else {
+
+ //
+ // The new working set maximum is less than the current working set
+ // maximum.
+ //
+
+ if (WsInfo->WorkingSetSize > WorkingSetMaximum) {
+
+ //
+ // Remove some pages from the working set.
+ //
+
+ //
+ // Make sure that the number of locked pages will not
+ // make the working set not fluid.
+ //
+
+ if ((WorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >=
+ WorkingSetMaximum) {
+
+ ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
+ goto Returns;
+ }
+
+ //
+ // Attempt to remove the pages from the Maximum downward.
+ //
+
+ LastFreed = WorkingSetList->LastEntry;
+ if (WorkingSetList->LastEntry > WorkingSetMaximum) {
+
+ while (LastFreed >= WorkingSetMaximum) {
+
+ PointerPte = MiGetPteAddress(
+ Wsle[LastFreed].u1.VirtualAddress);
+
+ if ((Wsle[LastFreed].u1.e1.Valid != 0) &&
+ (!MiFreeWsle (LastFreed,
+ WsInfo,
+ PointerPte))) {
+
+ //
+ // This LastFreed could not be removed.
+ //
+
+ break;
+ }
+ LastFreed -= 1;
+ }
+ WorkingSetList->LastEntry = LastFreed;
+ if (WorkingSetList->NextSlot >= LastFreed) {
+ WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
+ }
+ }
+
+ //
+ // Remove pages.
+ //
+
+ Entry = WorkingSetList->FirstDynamic;
+
+ while (WsInfo->WorkingSetSize > WorkingSetMaximum) {
+ if (Wsle[Entry].u1.e1.Valid != 0) {
+ PointerPte = MiGetPteAddress (
+ Wsle[Entry].u1.VirtualAddress);
+ MiFreeWsle (Entry, WsInfo, PointerPte);
+ }
+ Entry += 1;
+ if (Entry > LastFreed) {
+ FreeTryCount += 1;
+ if (FreeTryCount > MM_RETRY_COUNT) {
+
+ //
+ // Page table pages are not becoming free, give up
+ // and return an error.
+ //
+
+ ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT;
+
+ break;
+ }
+ Entry = WorkingSetList->FirstDynamic;
+ }
+ }
+
+ if (FreeTryCount <= MM_RETRY_COUNT) {
+ WorkingSetList->Quota = WorkingSetMaximum;
+ }
+ }
+ }
+
+ //
+ // Adjust the number of pages above the working set minimum.
+ //
+
+ PagesAbove = (LONG)WsInfo->WorkingSetSize -
+ (LONG)WsInfo->MinimumWorkingSetSize;
+ NewPagesAbove = (LONG)WsInfo->WorkingSetSize -
+ (LONG)WorkingSetMinimum;
+
+ LOCK_PFN (OldIrql);
+ if (PagesAbove > 0) {
+ MmPagesAboveWsMinimum -= (ULONG)PagesAbove;
+ }
+ if (NewPagesAbove > 0) {
+ MmPagesAboveWsMinimum += (ULONG)NewPagesAbove;
+ }
+ UNLOCK_PFN (OldIrql);
+
+ if (FreeTryCount <= MM_RETRY_COUNT) {
+ WsInfo->MaximumWorkingSetSize = WorkingSetMaximum;
+ WsInfo->MinimumWorkingSetSize = WorkingSetMinimum;
+
+ if (WorkingSetMinimum >= WorkingSetList->Quota) {
+ WorkingSetList->Quota = WorkingSetMinimum;
+ }
+ }
+
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ if ((WorkingSetList->HashTable == NULL) &&
+ (WsInfo->MaximumWorkingSetSize > ((1536*1024) >> PAGE_SHIFT))) {
+
+ //
+ // The working set list consists of more than a single page.
+ //
+
+ MiGrowWsleHash (WsInfo, FALSE);
+ }
+
+Returns:
+
+ if (SystemCache) {
+ UNLOCK_SYSTEM_WS (OldIrql2);
+ } else {
+ UNLOCK_WS (CurrentProcess);
+ }
+
+ MmUnlockPagableImageSection(ExPageLockHandle);
+
+ return ReturnStatus;
+}
+
+ULONG
+MiAddWorkingSetPage (
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This function grows the working set list above working set
+ maximum during working set adjustment. At most one page
+ can be added at a time.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Returns FALSE if no working set page could be added.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set mutexes held.
+
+--*/
+
+{
+ ULONG SwapEntry;
+ ULONG CurrentEntry;
+ PMMWSLE WslEntry;
+ ULONG i;
+ PMMPTE PointerPte;
+ PMMPTE Va;
+ MMPTE TempPte;
+ ULONG NumberOfEntriesMapped;
+ ULONG WorkingSetPage;
+ ULONG WorkingSetIndex;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+#if DBG
+ if (WsInfo == &MmSystemCacheWs) {
+ MM_SYSTEM_WS_LOCK_ASSERT();
+ }
+#endif //DBG
+
+ //
+ // The maximum size of the working set is being increased, check
+ // to ensure the proper number of pages are mapped to cover
+ // the complete working set list.
+ //
+
+ PointerPte = MiGetPteAddress (&Wsle[WorkingSetList->LastInitializedWsle]);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PointerPte += 1;
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ Va = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ NumberOfEntriesMapped = ((PMMWSLE)((ULONG)Va + PAGE_SIZE)) - Wsle;
+
+ //
+ // Map in a new working set page.
+ //
+
+ LOCK_PFN (OldIrql);
+ if (MmAvailablePages < 20) {
+
+ //
+ // No pages are available, set the quota to the last
+ // initialized WSLE and return.
+
+ WorkingSetList->Quota = WorkingSetList->LastInitializedWsle;
+ UNLOCK_PFN (OldIrql);
+ return FALSE;
+ }
+
+ WorkingSetPage = MiRemoveZeroPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ MiInitializePfn (WorkingSetPage, PointerPte, 1);
+ UNLOCK_PFN (OldIrql);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ WorkingSetPage,
+ MM_READWRITE,
+ PointerPte );
+
+ MI_SET_PTE_DIRTY (TempPte);
+ *PointerPte = TempPte;
+
+ CurrentEntry = WorkingSetList->LastInitializedWsle + 1;
+
+ ASSERT (NumberOfEntriesMapped > CurrentEntry);
+
+ WslEntry = &Wsle[CurrentEntry - 1];
+
+ for (i = CurrentEntry; i < NumberOfEntriesMapped; i++) {
+
+ //
+ // Build the free list, note that the first working
+ // set entries (CurrentEntry) are not on the free list.
+ // These entries are reserved for the pages which
+ // map the working set and the page which contains the PDE.
+ //
+
+ WslEntry += 1;
+ WslEntry->u1.Long = (i + 1) << MM_FREE_WSLE_SHIFT;
+ }
+
+ WslEntry->u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
+
+ WorkingSetList->FirstFree = CurrentEntry;
+
+ WorkingSetList->LastInitializedWsle =
+ (NumberOfEntriesMapped - 1);
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ //
+ // As we are growing the working set, make sure the quota is
+ // above the working set size by adding 1 to the quota.
+ //
+
+ WorkingSetList->Quota += 1;
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+
+ //
+ // Get a working set entry.
+ //
+
+ WsInfo->WorkingSetSize += 1;
+ ASSERT (WorkingSetList->FirstFree != WSLE_NULL_INDEX);
+ WorkingSetIndex = WorkingSetList->FirstFree;
+ WorkingSetList->FirstFree = Wsle[WorkingSetIndex].u1.Long >> MM_FREE_WSLE_SHIFT;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ if (WsInfo->WorkingSetSize > WsInfo->MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum += 1;
+ }
+ if (WorkingSetIndex > WorkingSetList->LastEntry) {
+ WorkingSetList->LastEntry = WorkingSetIndex;
+ }
+
+ MiUpdateWsle ( &WorkingSetIndex, Va, WorkingSetList, Pfn1);
+
+ //
+ // Lock any created page table pages into the working set.
+ //
+
+ if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
+
+ SwapEntry = WorkingSetList->FirstDynamic;
+
+ if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
+
+ //
+ // Swap this entry with the one at first dynamic.
+ //
+
+ MiSwapWslEntries (WorkingSetIndex, SwapEntry, WsInfo);
+ }
+
+ WorkingSetList->FirstDynamic += 1;
+ WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
+
+ Wsle[SwapEntry].u1.e1.LockedInWs = 1;
+ ASSERT (Wsle[SwapEntry].u1.e1.Valid == 1);
+ }
+
+ ASSERT ((MiGetPteAddress(&Wsle[WorkingSetList->LastInitializedWsle]))->u.Hard.Valid == 1);
+
+ if ((WorkingSetList->HashTable == NULL) &&
+ (MmAvailablePages > 20)) {
+
+ //
+ // Add a hash table to support shared pages in the working set to
+ // eliminate costly lookups.
+ //
+
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ ASSERT (WsInfo->AllowWorkingSetAdjustment != FALSE);
+ WsInfo->AllowWorkingSetAdjustment = MM_GROW_WSLE_HASH;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+ }
+
+ return TRUE;
+}
+VOID
+MiGrowWsleHash (
+ IN PMMSUPPORT WsInfo,
+ IN ULONG PfnLockHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This function grows (or adds) a hash table to the working set list
+ to allow direct indexing for WSLEs than cannot be located via the
+ PFN database WSINDEX field.
+
+ The hash table is located AFTER the WSLE array and the pages are
+ locked into the working set just like standard WSLEs.
+
+ Note, that the hash table is expanded by setting the hash table
+ field in the working set to NULL, but leaving the size as non-zero.
+ This indicates that the hash should be expanded and the initial
+ portion of the table zeroed.
+
+Arguments:
+
+ WsInfo - Supples a pointer to the working set info block for the
+ process (or system cache).
+
+ PfnLockHeld - Supplies TRUE if the PFN lock is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock held.
+
+--*/
+{
+ LONG Size;
+ PMMWSLE Wsle;
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+ ULONG First;
+ PVOID Va;
+ ULONG SwapEntry;
+ ULONG WorkingSetPage;
+ ULONG Hash;
+ ULONG HashValue;
+ ULONG NewSize;
+ ULONG WorkingSetIndex;
+ PMMWSLE_HASH Table;
+ ULONG j;
+ PMMWSL WorkingSetList;
+ KIRQL OldIrql;
+ ULONG Count;
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ Table = WorkingSetList->HashTable;
+ if (Table == NULL) {
+ NewSize = (ULONG)PAGE_ALIGN (((1 + WorkingSetList->NonDirectCount) *
+ 2 * sizeof(MMWSLE_HASH)) + PAGE_SIZE - 1);
+
+ Table = (PMMWSLE_HASH)
+ ((PCHAR)PAGE_ALIGN (&Wsle[MM_MAXIMUM_WORKING_SET]) + PAGE_SIZE);
+ First = WorkingSetList->HashTableSize;
+ ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
+ WorkingSetList->HashTableSize = 0;
+
+ j = First * sizeof(MMWSLE_HASH);
+ if (j > NewSize) {
+ NewSize = j;
+ }
+
+ } else {
+
+ //
+ // Attempt to add 4 pages, make sure the working set list has
+ // 4 free entries.
+ //
+
+ ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
+ if ((WorkingSetList->LastInitializedWsle + 5) > WsInfo->WorkingSetSize) {
+ NewSize = PAGE_SIZE * 4;
+ } else {
+ NewSize = PAGE_SIZE;
+ }
+ First = WorkingSetList->HashTableSize;
+ }
+
+ Size = NewSize;
+
+ PointerPte = MiGetPteAddress (&Table[WorkingSetList->HashTableSize]);
+
+ do {
+
+ if (PointerPte->u.Hard.Valid == 0) {
+
+ LOCK_PFN (OldIrql);
+ WorkingSetPage = MiRemoveZeroPage (
+ MI_GET_PAGE_COLOR_FROM_PTE (PointerPte));
+
+ PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE;
+ MiInitializePfn (WorkingSetPage, PointerPte, 1);
+
+ MI_MAKE_VALID_PTE (TempPte,
+ WorkingSetPage,
+ MM_READWRITE,
+ PointerPte );
+
+ MI_SET_PTE_DIRTY (TempPte);
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // As we are growing the working set, we know that quota
+ // is above the current working set size. Just take the
+ // next free WSLE from the list and use it.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = (ULONG)PsGetCurrentThread();
+
+ Va = (PMMPTE)MiGetVirtualAddressMappedByPte (PointerPte);
+
+ WorkingSetIndex = MiLocateAndReserveWsle (WsInfo);
+ MiUpdateWsle (&WorkingSetIndex , Va, WorkingSetList, Pfn1);
+
+ //
+ // Lock any created page table pages into the working set.
+ //
+
+ if (WorkingSetIndex >= WorkingSetList->FirstDynamic) {
+
+ SwapEntry = WorkingSetList->FirstDynamic;
+
+ if (WorkingSetIndex != WorkingSetList->FirstDynamic) {
+
+ //
+ // Swap this entry with the one at first dynamic.
+ //
+
+ MiSwapWslEntries (WorkingSetIndex, SwapEntry, WsInfo);
+ }
+
+ WorkingSetList->FirstDynamic += 1;
+ WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
+
+ Wsle[SwapEntry].u1.e1.LockedInWs = 1;
+ ASSERT (Wsle[SwapEntry].u1.e1.Valid == 1);
+ }
+ }
+ PointerPte += 1;
+ Size -= PAGE_SIZE;
+ } while (Size > 0);
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ WorkingSetList->HashTableSize += NewSize / sizeof (MMWSLE_HASH);
+ WorkingSetList->HashTable = Table;
+ ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
+
+ if (First != 0) {
+ RtlZeroMemory (Table, First * sizeof(MMWSLE_HASH));
+ }
+
+ //
+ // Fill hash table
+ //
+
+ j = 0;
+ Count = WorkingSetList->NonDirectCount;
+
+ Size = WorkingSetList->HashTableSize;
+ HashValue = Size - 1;
+
+ do {
+ if ((Wsle[j].u1.e1.Valid == 1) &&
+ (Wsle[j].u1.e1.Direct == 0)) {
+
+ //
+ // Hash this.
+ //
+
+ Count -= 1;
+
+ Hash = (Wsle[j].u1.Long >> (PAGE_SHIFT - 2)) % HashValue;
+
+ while (Table[Hash].Key != 0) {
+ Hash += 1;
+ if (Hash >= (ULONG)Size) {
+ Hash = 0;
+ }
+ }
+
+ Table[Hash].Key = Wsle[j].u1.Long & ~(PAGE_SIZE - 1);
+ Table[Hash].Index = j;
+#if DBG
+ {
+ PMMPTE PointerPte;
+ PMMPFN Pfn;
+
+ PointerPte = MiGetPteAddress(Wsle[j].u1.VirtualAddress);
+ ASSERT (PointerPte->u.Hard.Valid);
+ Pfn = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ }
+#endif //DBG
+
+ }
+ ASSERT (j <= WorkingSetList->LastEntry);
+ j += 1;
+ } while (Count);
+
+#if DBG
+ MiCheckWsleHash (WorkingSetList);
+#endif //DBG
+ return;
+}
+
+
+ULONG
+MiTrimWorkingSet (
+ ULONG Reduction,
+ IN PMMSUPPORT WsInfo,
+ IN ULONG ForcedReduction
+ )
+
+/*++
+
+Routine Description:
+
+ This function reduces the working set by the specified amount.
+
+Arguments:
+
+ Reduction - Supplies the number of pages to remove from the working
+ set.
+
+ WsInfo - Supplies a pointer to the working set information for the
+ process (or system cache) to trim.
+
+ ForcedReduction - Set TRUE if the reduction is being done to free up
+ pages in which case we should try to reduce
+ working set pages as well. Set to FALSE when the
+ reduction is trying to increase the fault rates
+ in which case the policy should be more like
+ locate and reserve.
+
+Return Value:
+
+ Returns the actual number of pages removed.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set lock. Pfn lock NOT held.
+
+--*/
+
+{
+ ULONG TryToFree;
+ ULONG LastEntry;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ PMMPTE PointerPte;
+ ULONG NumberLeftToRemove;
+ ULONG LoopCount;
+ ULONG EndCount;
+
+ NumberLeftToRemove = Reduction;
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+#if DBG
+ if (WsInfo == &MmSystemCacheWs) {
+ MM_SYSTEM_WS_LOCK_ASSERT();
+ }
+#endif //DBG
+
+ TryToFree = WorkingSetList->NextSlot;
+ LastEntry = WorkingSetList->LastEntry;
+ LoopCount = 0;
+
+ if (ForcedReduction) {
+ EndCount = 5;
+ } else {
+ EndCount = 2;
+ }
+
+ while ((NumberLeftToRemove != 0) && (LoopCount != EndCount)) {
+ while ((NumberLeftToRemove != 0) && (TryToFree <= LastEntry)) {
+
+ if (Wsle[TryToFree].u1.e1.Valid == 1) {
+ PointerPte = MiGetPteAddress (Wsle[TryToFree].u1.VirtualAddress);
+ if (MI_GET_ACCESSED_IN_PTE (PointerPte)) {
+
+ //
+ // If accessed bit is set, clear it. If accessed
+ // bit is clear, remove from working set.
+ //
+
+ MI_SET_ACCESSED_IN_PTE (PointerPte, 0);
+ } else {
+ if (MiFreeWsle (TryToFree, WsInfo, PointerPte)) {
+ NumberLeftToRemove -= 1;
+ }
+ }
+ }
+ TryToFree += 1;
+ }
+ TryToFree = WorkingSetList->FirstDynamic;
+ LoopCount += 1;
+ }
+ WorkingSetList->NextSlot = TryToFree;
+
+ //
+ // If this is not the system cache working set, see if the working
+ // set list can be contracted.
+ //
+
+ if (WsInfo != &MmSystemCacheWs) {
+
+ //
+ // Make sure we are at least a page above the working set maximum.
+ //
+
+ if (WorkingSetList->FirstDynamic == WsInfo->WorkingSetSize) {
+ MiRemoveWorkingSetPages (WorkingSetList, WsInfo);
+ } else {
+
+ if ((WorkingSetList->Quota + 15 + (PAGE_SIZE / sizeof(MMWSLE))) <
+ WorkingSetList->LastEntry) {
+ if ((WsInfo->MaximumWorkingSetSize + 15 + (PAGE_SIZE / sizeof(MMWSLE))) <
+ WorkingSetList->LastEntry ) {
+ MiRemoveWorkingSetPages (WorkingSetList, WsInfo);
+ }
+ }
+ }
+ }
+ return (Reduction - NumberLeftToRemove);
+}
+
+#if 0 //COMMENTED OUT.
+VOID
+MmPurgeWorkingSet (
+ IN PEPROCESS Process,
+ IN PVOID BaseAddress,
+ IN ULONG RegionSize
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes any valid pages with a reference count
+ of 1 within the specified address range of the specified process.
+
+ If the address range is within the system cache, the process
+ paramater is ignored.
+
+Arguments:
+
+ Process - Supplies a pointer to the process to operate upon.
+
+ BaseAddress - Supplies the base address of the range to operate upon.
+
+ RegionSize - Supplies the size of the region to operate upon.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC_LEVEL or below.
+
+--*/
+
+{
+ PMMSUPPORT WsInfo;
+ PMMPTE PointerPte;
+ PMMPTE PointerPde;
+ PMMPTE LastPte;
+ PMMPFN Pfn1;
+ MMPTE PteContents;
+ PEPROCESS CurrentProcess;
+ PVOID EndingAddress;
+ ULONG SystemCache;
+ KIRQL OldIrql;
+
+ //
+ // Determine if the specified base address is within the system
+ // cache and if so, don't attach, the working set lock is still
+ // required to "lock" paged pool pages (proto PTEs) into the
+ // working set.
+ //
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ ASSERT (RegionSize != 0);
+
+ EndingAddress = (PVOID)((ULONG)BaseAddress + RegionSize - 1);
+
+ if ((BaseAddress <= MM_HIGHEST_USER_ADDRESS) ||
+ ((BaseAddress >= (PVOID)PTE_BASE) &&
+ (BaseAddress < (PVOID)MM_SYSTEM_SPACE_START)) ||
+ ((BaseAddress >= MM_PAGED_POOL_START) &&
+ (BaseAddress <= MmPagedPoolEnd))) {
+
+ SystemCache = FALSE;
+
+ //
+ // Attach to the specified process.
+ //
+
+ KeAttachProcess (&Process->Pcb);
+
+ WsInfo = &Process->Vm,
+
+ LOCK_WS (Process);
+ } else {
+
+ SystemCache = TRUE;
+ Process = CurrentProcess;
+ WsInfo = &MmSystemCacheWs;
+ }
+
+ PointerPde = MiGetPdeAddress (BaseAddress);
+ PointerPte = MiGetPteAddress (BaseAddress);
+ LastPte = MiGetPteAddress (EndingAddress);
+
+ while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE)) {
+
+ //
+ // No page table page exists for this address.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ if (PointerPte > LastPte) {
+ break;
+ }
+ }
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte <= LastPte) {
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ //
+ // Remove this page from the working set.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ if (Pfn1->u3.e2.ReferenceCount == 1) {
+ MiRemovePageFromWorkingSet (PointerPte, Pfn1, WsInfo);
+ }
+ }
+
+ PointerPte += 1;
+
+ if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {
+
+ PointerPde = MiGetPteAddress (PointerPte);
+
+ while ((PointerPte <= LastPte) &&
+ (!MiDoesPdeExistAndMakeValid(PointerPde, Process, TRUE))) {
+
+ //
+ // No page table page exists for this address.
+ //
+
+ PointerPde += 1;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+ }
+ }
+ }
+
+ UNLOCK_PFN (OldIrql);
+
+ if (!SystemCache) {
+
+ UNLOCK_WS (Process);
+ KeDetachProcess();
+ }
+ return;
+}
+#endif //0
+
+VOID
+MiEliminateWorkingSetEntry (
+ IN ULONG WorkingSetIndex,
+ IN PMMPTE PointerPte,
+ IN PMMPFN Pfn,
+ IN PMMWSLE Wsle
+ )
+
+/*++
+
+Routine Description:
+
+ This routine removes the specified working set list entry
+ form the working set, flushes the TB for the page, decrements
+ the share count for the physical page, and, if necessary turns
+ the PTE into a transition PTE.
+
+Arguments:
+
+ WorkingSetIndex - Supplies the working set index to remove.
+
+ PointerPte - Supplies a pointer to the PTE corresonding to the virtual
+ address in the working set.
+
+ Pfn - Supplies a pointer to the PFN element corresponding to the PTE.
+
+ Wsle - Supplies a pointer to the first working set list entry for this
+ working set.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working set lock and PFN lock held, APC's disabled.
+
+--*/
+
+{
+ PMMPTE ContainingPageTablePage;
+ MMPTE TempPte;
+ MMPTE PreviousPte;
+ ULONG PageFrameIndex;
+ KIRQL OldIrql;
+
+ //
+ // Remove the page from the working set.
+ //
+
+ MM_PFN_LOCK_ASSERT ();
+
+ TempPte = *PointerPte;
+ PageFrameIndex = TempPte.u.Hard.PageFrameNumber;
+
+#ifdef _X86_
+#if DBG
+#if !defined(NT_UP)
+ if (TempPte.u.Hard.Writable == 1) {
+ ASSERT (TempPte.u.Hard.Dirty == 1);
+ }
+ ASSERT (TempPte.u.Hard.Accessed == 1);
+#endif //NTUP
+#endif //DBG
+#endif //X86
+
+ MI_MAKING_VALID_PTE_INVALID (FALSE);
+
+ if (Pfn->u3.e1.PrototypePte) {
+
+ //
+ // This is a prototype PTE. The PFN database does not contain
+ // the contents of this PTE it contains the contents of the
+ // prototype PTE. This PTE must be reconstructed to contain
+ // a pointer to the prototype PTE.
+ //
+ // The working set list entry contains information about
+ // how to reconstruct the PTE.
+ //
+
+ if (Wsle[WorkingSetIndex].u1.e1.SameProtectAsProto == 0) {
+
+ //
+ // The protection for the prototype PTE is in the
+ // WSLE.
+ //
+
+ ASSERT (Wsle[WorkingSetIndex].u1.e1.Protection != 0);
+ TempPte.u.Long = 0;
+ TempPte.u.Soft.Protection =
+ Wsle[WorkingSetIndex].u1.e1.Protection;
+ TempPte.u.Soft.PageFileHigh = 0xFFFFF;
+
+ } else {
+
+ //
+ // The protection is in the prototype PTE.
+ //
+
+ TempPte.u.Long = MiProtoAddressForPte (Pfn->PteAddress);
+ MI_SET_GLOBAL_BIT_IF_SYSTEM (TempPte, PointerPte);
+ }
+
+ TempPte.u.Proto.Prototype = 1;
+
+ //
+ // Decrement the share count of the containing page table
+ // page as the PTE for the removed page is no longer valid
+ // or in transition
+ //
+
+ ContainingPageTablePage = MiGetPteAddress (PointerPte);
+ if (ContainingPageTablePage->u.Hard.Valid == 0) {
+ MiCheckPdeForPagedPool (PointerPte);
+ }
+ MiDecrementShareAndValidCount (ContainingPageTablePage->u.Hard.PageFrameNumber);
+
+ } else {
+
+ //
+ // This is a private page, make it transition.
+ //
+
+ //
+ // Assert that the share count is 1 for all user mode pages.
+ //
+
+ ASSERT ((Pfn->u2.ShareCount == 1) ||
+ (Wsle[WorkingSetIndex].u1.VirtualAddress >
+ (PVOID)MM_HIGHEST_USER_ADDRESS));
+
+ //
+ // Set the working set index to zero. This allows page table
+ // pages to be brough back in with the proper WSINDEX.
+ //
+
+ ASSERT (Pfn->u1.WsIndex != 0);
+ Pfn->u1.WsIndex = 0;
+ MI_MAKE_VALID_PTE_TRANSITION (TempPte,
+ Pfn->OriginalPte.u.Soft.Protection);
+
+ }
+
+ PreviousPte.u.Flush = KeFlushSingleTb (Wsle[WorkingSetIndex].u1.VirtualAddress,
+ TRUE,
+ (BOOLEAN)(Wsle == MmSystemCacheWsle),
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Flush);
+
+ ASSERT (PreviousPte.u.Hard.Valid == 1);
+
+ //
+ // A page is being removed from the working set, on certain
+ // hardware the dirty bit should be ORed into the modify bit in
+ // the PFN element.
+ //
+
+ MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn);
+
+ //
+ // Flush the translation buffer and decrement the number of valid
+ // PTEs within the containing page table page. Note that for a
+ // private page, the page table page is still needed because the
+ // page is in transiton.
+ //
+
+ MiDecrementShareCount (PageFrameIndex);
+
+ return;
+}
+
+VOID
+MiRemoveWorkingSetPages (
+ IN PMMWSL WorkingSetList,
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This routine compresses the WSLEs into the front of the working set
+ and frees the pages for unneeded working set entries.
+
+Arguments:
+
+ WorkingSetList - Supplies a pointer to the working set list to compress.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working set lock held, APC's disabled.
+
+--*/
+
+{
+ PMMWSLE FreeEntry;
+ PMMWSLE LastEntry;
+ PMMWSLE Wsle;
+ ULONG FreeIndex;
+ ULONG LastIndex;
+ ULONG LastInvalid;
+ PMMPTE PointerPte;
+ PMMPTE WsPte;
+ PMMPFN Pfn1;
+ PEPROCESS CurrentProcess;
+ MMPTE_FLUSH_LIST PteFlushList;
+ ULONG NewSize;
+ PMMWSLE_HASH Table;
+ KIRQL OldIrql;
+
+ PteFlushList.Count = 0;
+ CurrentProcess = PsGetCurrentProcess();
+
+#if DBG
+ MiCheckNullIndex (WorkingSetList);
+#endif //DBG
+
+ //
+ // Check to see if the wsle hash table should be contracted.
+ //
+
+ if (WorkingSetList->HashTable) {
+
+ Table = WorkingSetList->HashTable;
+ ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
+
+ NewSize = (ULONG)PAGE_ALIGN ((WorkingSetList->NonDirectCount * 2 *
+ sizeof(MMWSLE_HASH)) + PAGE_SIZE - 1);
+
+ NewSize = NewSize / sizeof(MMWSLE_HASH);
+
+ if (WsInfo->WorkingSetSize < 200) {
+ NewSize = 0;
+ }
+
+ if (NewSize < WorkingSetList->HashTableSize) {
+
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ if (NewSize && WsInfo->AllowWorkingSetAdjustment) {
+ WsInfo->AllowWorkingSetAdjustment = MM_GROW_WSLE_HASH;
+ }
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+
+ //
+ // Remove pages from hash table.
+ //
+
+ ASSERT (((ULONG)&WorkingSetList->HashTable[NewSize] &
+ (PAGE_SIZE - 1)) == 0);
+
+ PointerPte = MiGetPteAddress (&WorkingSetList->HashTable[NewSize]);
+
+ //
+ // Set the hash table to null indicating that no hashing
+ // is going on.
+ //
+
+ WorkingSetList->HashTable = NULL;
+ WorkingSetList->HashTableSize = NewSize;
+
+ LOCK_PFN (OldIrql);
+ while (PointerPte->u.Hard.Valid == 1) {
+
+ MiDeletePte (PointerPte,
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ FALSE,
+ CurrentProcess,
+ NULL,
+ &PteFlushList);
+
+ PointerPte += 1;
+
+ //
+ // Add back in the private page MiDeletePte subtracted.
+ //
+
+ CurrentProcess->NumberOfPrivatePages += 1;
+ }
+ MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
+ UNLOCK_PFN (OldIrql);
+ }
+ ASSERT (MiGetPteAddress(&Table[WorkingSetList->HashTableSize])->u.Hard.Valid == 0);
+ }
+
+ //
+ // If the only pages in the working set are locked pages (that
+ // is all pages are BEFORE first dynamic, just reorganize the
+ // free list.)
+ //
+
+ Wsle = WorkingSetList->Wsle;
+ if (WorkingSetList->FirstDynamic == WsInfo->WorkingSetSize) {
+
+ LastIndex = WorkingSetList->FirstDynamic;
+ LastEntry = &Wsle[LastIndex];
+
+ } else {
+
+ //
+ // Start from the first dynamic and move towards the end looking
+ // for free entries. At the same time start from the end and
+ // move towards first dynamic looking for valid entries.
+ //
+
+ LastInvalid = 0;
+ FreeIndex = WorkingSetList->FirstDynamic;
+ FreeEntry = &Wsle[FreeIndex];
+ LastIndex = WorkingSetList->LastEntry;
+ LastEntry = &Wsle[LastIndex];
+
+ while (FreeEntry < LastEntry) {
+ if (FreeEntry->u1.e1.Valid == 1) {
+ FreeEntry += 1;
+ FreeIndex += 1;
+ } else if (LastEntry->u1.e1.Valid == 0) {
+ LastEntry -= 1;
+ LastIndex -= 1;
+ } else {
+
+ //
+ // Move the WSLE at LastEntry to the free slot at FreeEntry.
+ //
+
+ LastInvalid = 1;
+ *FreeEntry = *LastEntry;
+ if (LastEntry->u1.e1.Direct) {
+
+ PointerPte = MiGetPteAddress (LastEntry->u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = FreeIndex;
+
+ } else {
+
+ //
+ // This entry is in the working set tree. Remove it
+ // and then add the entry add the free slot.
+ //
+
+ MiRemoveWsle (LastIndex, WorkingSetList);
+ MiInsertWsle (FreeIndex, WorkingSetList);
+ }
+ LastEntry->u1.Long = 0;
+ LastEntry -= 1;
+ LastIndex -= 1;
+ FreeEntry += 1;
+ FreeIndex += 1;
+ }
+ }
+
+ //
+ // If no entries were freed, just return.
+ //
+
+ if (LastInvalid == 0) {
+#if DBG
+ MiCheckNullIndex (WorkingSetList);
+#endif //DBG
+ return;
+ }
+ }
+
+ //
+ // Reorganize the free list. Make last entry the first free.
+ //
+
+ ASSERT ((LastEntry - 1)->u1.e1.Valid == 1);
+
+ if (LastEntry->u1.e1.Valid == 1) {
+ LastEntry += 1;
+ LastIndex += 1;
+ }
+
+ WorkingSetList->LastEntry = LastIndex - 1;
+ WorkingSetList->FirstFree = LastIndex;
+
+ ASSERT ((LastEntry - 1)->u1.e1.Valid == 1);
+ ASSERT ((LastEntry)->u1.e1.Valid == 0);
+
+ //
+ // Point free entry to the first invalid page.
+ //
+
+ FreeEntry = LastEntry;
+
+ while (LastIndex < WorkingSetList->LastInitializedWsle) {
+
+ //
+ // Put the remainer of the WSLEs on the free list.
+ //
+
+ ASSERT (LastEntry->u1.e1.Valid == 0);
+ LastIndex += 1;
+ LastEntry->u1.Long = LastIndex << MM_FREE_WSLE_SHIFT;
+ LastEntry += 1;
+ }
+
+ //LastEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
+
+ //
+ // Delete the working set pages at the end.
+ //
+
+ PointerPte = MiGetPteAddress (&Wsle[WorkingSetList->LastInitializedWsle]);
+ if (&Wsle[WsInfo->MinimumWorkingSetSize] > FreeEntry) {
+ FreeEntry = &Wsle[WsInfo->MinimumWorkingSetSize];
+ }
+
+ WsPte = MiGetPteAddress (FreeEntry);
+
+ LOCK_PFN (OldIrql);
+ while (PointerPte > WsPte) {
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+
+ MiDeletePte (PointerPte,
+ MiGetVirtualAddressMappedByPte (PointerPte),
+ FALSE,
+ CurrentProcess,
+ NULL,
+ &PteFlushList);
+
+ PointerPte -= 1;
+
+ //
+ // Add back in the private page MiDeletePte subtracted.
+ //
+
+ CurrentProcess->NumberOfPrivatePages += 1;
+ }
+
+ MiFlushPteList (&PteFlushList, FALSE, ZeroPte);
+
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Mark the last pte in the list as free.
+ //
+
+ LastEntry = (PMMWSLE)((ULONG)(PAGE_ALIGN(FreeEntry)) + PAGE_SIZE);
+ LastEntry -= 1;
+
+ ASSERT (LastEntry->u1.e1.Valid == 0);
+ LastEntry->u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; //End of List.
+ ASSERT (LastEntry > &Wsle[0]);
+ WorkingSetList->LastInitializedWsle = LastEntry - &Wsle[0];
+ WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
+
+ ASSERT (WorkingSetList->LastEntry <= WorkingSetList->LastInitializedWsle);
+
+ if (WorkingSetList->Quota < WorkingSetList->LastInitializedWsle) {
+ WorkingSetList->Quota = WorkingSetList->LastInitializedWsle;
+ }
+
+ ASSERT ((MiGetPteAddress(&Wsle[WorkingSetList->LastInitializedWsle]))->u.Hard.Valid == 1);
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+#if DBG
+ MiCheckNullIndex (WorkingSetList);
+#endif //DBG
+ return;
+}
+
+
+NTSTATUS
+MiEmptyWorkingSet (
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This routine frees all pages from the working set.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ Status of operation.
+
+Environment:
+
+ Kernel mode. No locks.
+
+--*/
+
+{
+ PEPROCESS Process;
+ KIRQL OldIrql;
+ KIRQL OldIrql2;
+ PMMPTE PointerPte;
+ ULONG Entry;
+ ULONG LastFreed;
+ PMMWSL WorkingSetList;
+ PMMWSLE Wsle;
+ ULONG Last = 0;
+ NTSTATUS Status;
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+
+ if (WsInfo == &MmSystemCacheWs) {
+ LOCK_SYSTEM_WS (OldIrql);
+ } else {
+ Process = PsGetCurrentProcess ();
+ LOCK_WS (Process);
+ if (Process->AddressSpaceDeleted != 0) {
+ Status = STATUS_PROCESS_IS_TERMINATING;
+ goto Deleted;
+ }
+ }
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ //
+ // Attempt to remove the pages from the Maximum downward.
+ //
+
+ Entry = WorkingSetList->FirstDynamic;
+ LastFreed = WorkingSetList->LastEntry;
+ while (Entry <= LastFreed) {
+ if (Wsle[Entry].u1.e1.Valid != 0) {
+ PointerPte = MiGetPteAddress (Wsle[Entry].u1.VirtualAddress);
+ MiFreeWsle (Entry, WsInfo, PointerPte);
+ }
+ Entry += 1;
+ }
+
+ if (WsInfo != &MmSystemCacheWs) {
+ MiRemoveWorkingSetPages (WorkingSetList,WsInfo);
+ }
+ WorkingSetList->Quota = WsInfo->WorkingSetSize;
+ WorkingSetList->NextSlot = WorkingSetList->FirstDynamic;
+
+ //
+ // Attempt to remove the pages from the front to the end.
+ //
+
+ //
+ // Reorder the free list.
+ //
+
+ Entry = WorkingSetList->FirstDynamic;
+ LastFreed = WorkingSetList->LastInitializedWsle;
+ while (Entry <= LastFreed) {
+ if (Wsle[Entry].u1.e1.Valid == 0) {
+ if (Last == 0) {
+ WorkingSetList->FirstFree = Entry;
+ } else {
+ Wsle[Last].u1.Long = Entry << MM_FREE_WSLE_SHIFT;
+ }
+ Last = Entry;
+ }
+ Entry += 1;
+ }
+ if (Last != 0) {
+ Wsle[Last].u1.Long = WSLE_NULL_INDEX << MM_FREE_WSLE_SHIFT; // End of list.
+ }
+
+ Status = STATUS_SUCCESS;
+
+Deleted:
+
+ if (WsInfo == &MmSystemCacheWs) {
+ UNLOCK_SYSTEM_WS (OldIrql);
+ } else {
+ UNLOCK_WS (Process);
+ }
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ return Status;
+}
+
+#if 0
+
+#define x256k_pte_mask (((256*1024) >> (PAGE_SHIFT - PTE_SHIFT)) - (sizeof(MMPTE)))
+
+VOID
+MiDumpWsleInCacheBlock (
+ IN PMMPTE CachePte
+ )
+
+/*++
+
+Routine Description:
+
+ The routine checks the prototypte PTEs adjacent to the supplied
+ PTE and if they are modified, in the system cache working set,
+ and have a reference count of 1, removes it from the system
+ cache working set.
+
+Arguments:
+
+ CachePte - Supplies a pointer to the cache pte.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working set lock and PFN lock held, APC's disabled.
+
+--*/
+
+{
+ PMMPTE LoopPte;
+ PMMPTE PointerPte;
+
+ LoopPte = (PMMPTE)((ULONG)CachePte & ~x256k_pte_mask);
+ PointerPte = CachePte - 1;
+
+ while (PointerPte >= LoopPte ) {
+
+ if (MiDumpPteInCacheBlock (PointerPte) == FALSE) {
+ break;
+ }
+ PointerPte -= 1;
+ }
+
+ PointerPte = CachePte + 1;
+ LoopPte = (PMMPTE)((ULONG)CachePte | x256k_pte_mask);
+
+ while (PointerPte <= LoopPte ) {
+
+ if (MiDumpPteInCacheBlock (PointerPte) == FALSE) {
+ break;
+ }
+ PointerPte += 1;
+ }
+ return;
+}
+
+ULONG
+MiDumpPteInCacheBlock (
+ IN PMMPTE PointerPte
+ )
+
+{
+ PMMPFN Pfn1;
+ MMPTE PteContents;
+ ULONG WorkingSetIndex;
+
+ PteContents = *PointerPte;
+
+ if (PteContents.u.Hard.Valid == 1) {
+
+ Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
+
+ //
+ // If the PTE is valid and dirty (or pfn indicates dirty)
+ // and the Wsle is direct index via the pfn wsindex element
+ // and the reference count is one, then remove this page from
+ // the cache manager's working set list.
+ //
+
+ if ((Pfn1->u3.e2.ReferenceCount == 1) &&
+ ((Pfn1->u3.e1.Modified == 1) ||
+ (MI_IS_PTE_DIRTY (PteContents))) &&
+ (MiGetPteAddress (
+ MmSystemCacheWsle[Pfn1->u1.WsIndex].u1.VirtualAddress) ==
+ PointerPte)) {
+
+ //
+ // Found a candidate, remove the page from the working set.
+ //
+
+ WorkingSetIndex = Pfn1->u1.WsIndex;
+ LOCK_PFN (OldIrql);
+ MiEliminateWorkingSetEntry (WorkingSetIndex,
+ PointerPte,
+ Pfn1,
+ MmSystemCacheWsle);
+ UNLOCK_PFN (OldIrql);
+
+ //
+ // Remove the working set entry from the working set tree.
+ //
+
+ MiRemoveWsle (WorkingSetIndex, MmSystemCacheWorkingSetList);
+
+ //
+ // Put the entry on the free list and decrement the current
+ // size.
+ //
+
+ MmSystemCacheWsle[WorkingSetIndex].u1.Long =
+ MmSystemCacheWorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
+ MmSystemCacheWorkingSetList->FirstFree = WorkingSetIndex;
+
+ if (MmSystemCacheWs.WorkingSetSize > MmSystemCacheWs.MinimumWorkingSetSize) {
+ MmPagesAboveWsMinimum -= 1;
+ }
+ MmSystemCacheWs.WorkingSetSize -= 1;
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+#endif //0
+
+#if DBG
+VOID
+MiCheckNullIndex (
+ IN PMMWSL WorkingSetList
+ )
+
+{
+ PMMWSLE Wsle;
+ ULONG j;
+ ULONG Nulls = 0;
+
+ Wsle = WorkingSetList->Wsle;
+ for (j = 0;j <= WorkingSetList->LastInitializedWsle; j++) {
+ if ((Wsle[j].u1.Long >> MM_FREE_WSLE_SHIFT) == WSLE_NULL_INDEX ) {
+ Nulls += 1;
+ }
+ }
+ ASSERT (Nulls == 1);
+ return;
+}
+
+#endif //DBG
+
+
diff --git a/private/ntos/mm/wsmanage.c b/private/ntos/mm/wsmanage.c
new file mode 100644
index 000000000..7d8400bdf
--- /dev/null
+++ b/private/ntos/mm/wsmanage.c
@@ -0,0 +1,1190 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ wsmanage.c
+
+Abstract:
+
+ This module contains routines which manage the set of active working
+ set lists.
+
+ Working set management is accomplished by a parallel group of actions
+ 1. Writing modified pages
+ 2. Reducing (trimming) working sets which are above their maximum
+ towards their minimum.
+
+ The metrics are set such that writing modified pages is typically
+ accomplished before trimming working sets, however, under certain cases
+ where modified pages are being generated at a very high rate, working
+ set trimming will be initiated to free up more pages to modify.
+
+ When the first thread in a process is created, the memory management
+ system is notified that working set expansion is allowed. This
+ is noted by changing the FLINK field of the WorkingSetExpansionLink
+ entry in the process control block from MM_NO_WS_EXPANSION to
+ MM_ALLOW_WS_EXPANSION. As threads fault, the working set is eligible
+ for expansion if ample pages exist (MmAvailagePages is high enough).
+
+ Once a process has had its working set raised above the minimum
+ specified, the process is put on the Working Set Expanded list and
+ is now elgible for trimming. Note that at this time the FLINK field
+ in the WorkingSetExpansionLink has an address value.
+
+ When working set trimming is initiated, a process is removed from the
+ list (PFN mutex guards this list) and the FLINK field is set
+ to MM_NO_WS_EXPANSION, also, the BLINK field is set to
+ MM_WS_EXPANSION_IN_PROGRESS. The BLINK field value indicates to
+ the MmCleanUserAddressSpace function that working set trimming is
+ in progress for this process and it should wait until it completes.
+ This is accomplished by creating an event, putting the address of the
+ event in the BLINK field and then releasing the PFN mutex and
+ waiting on the event atomically. When working set trimming is
+ complete, the BLINK field is no longer MM_EXPANSION_IN_PROGRESS
+ indicating that the event should be set.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1990
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGELK, MiEmptyAllWorkingSets)
+#pragma alloc_text(INIT, MiAdjustWorkingSetManagerParameters)
+#endif
+
+//
+// Minimum number of page faults to take to avoid being trimmed on
+// an "ideal pass".
+//
+
+ULONG MiIdealPassFaultCountDisable;
+
+extern ULONG PsMinimumWorkingSet;
+
+extern PEPROCESS ExpDefaultErrorPortProcess;
+
+//
+// Number of times to wake up and do nothing before triming processes
+// with no faulting activity.
+//
+
+#define MM_TRIM_COUNTER_MAXIMUM_SMALL_MEM (4)
+#define MM_TRIM_COUNTER_MAXIMUM_LARGE_MEM (6)
+
+ULONG MiTrimCounterMaximum = MM_TRIM_COUNTER_MAXIMUM_LARGE_MEM;
+
+#define MM_REDUCE_FAULT_COUNT (10000)
+
+#define MM_IGNORE_FAULT_COUNT (100)
+
+#define MM_PERIODIC_AGRESSIVE_TRIM_COUNTER_MAXIMUM (30)
+ULONG MmPeriodicAgressiveTrimMinFree = 1000;
+ULONG MmPeriodicAgressiveCacheWsMin = 1250;
+ULONG MmPeriodicAgressiveTrimMaxFree = 2000;
+ULONG MiPeriodicAgressiveTrimCheckCounter;
+BOOLEAN MiDoPeriodicAgressiveTrimming = FALSE;
+
+ULONG MiCheckCounter;
+
+ULONG MmMoreThanEnoughFreePages = 1000;
+
+ULONG MmAmpleFreePages = 200;
+
+ULONG MmWorkingSetReductionMin = 12;
+ULONG MmWorkingSetReductionMinCacheWs = 12;
+
+ULONG MmWorkingSetReductionMax = 60;
+ULONG MmWorkingSetReductionMaxCacheWs = 60;
+
+ULONG MmWorkingSetReductionHuge = (512*1024) >> PAGE_SHIFT;
+
+ULONG MmWorkingSetVolReductionMin = 12;
+
+ULONG MmWorkingSetVolReductionMax = 60;
+ULONG MmWorkingSetVolReductionMaxCacheWs = 60;
+
+ULONG MmWorkingSetVolReductionHuge = (2*1024*1024) >> PAGE_SHIFT;
+
+ULONG MmWorkingSetSwapReduction = 75;
+
+ULONG MmWorkingSetSwapReductionHuge = (4*1024*1024) >> PAGE_SHIFT;
+
+ULONG MmForegroundSwitchCount;
+
+ULONG MmNumberOfForegroundProcesses;
+
+ULONG MmLastFaultCount;
+
+extern PVOID MmPagableKernelStart;
+extern PVOID MmPagableKernelEnd;
+
+VOID
+MiAdjustWorkingSetManagerParameters(
+ BOOLEAN WorkStation
+ )
+/*++
+
+Routine Description:
+
+ This function is called from MmInitSystem to adjust the working set manager
+ trim algorithms based on system type and size.
+
+Arguments:
+
+ WorkStation - TRUE if this is a workstation
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode
+
+--*/
+{
+ if ( WorkStation && (MmNumberOfPhysicalPages <= ((31*1024*1024)/PAGE_SIZE)) ) {
+
+ //
+ // periodic agressive trimming of marked processes (and the system cache)
+ // is done on 31mb and below workstations. The goal is to keep lots of free
+ // memory available and tu build better internal working sets for the
+ // marked processes
+ //
+
+ MiDoPeriodicAgressiveTrimming = TRUE;
+
+ //
+ // To get fault protection, you have to take 45 faults instead of
+ // the old 15 fault protection threshold
+ //
+
+ MiIdealPassFaultCountDisable = 45;
+
+
+ //
+ // Take more away when you are over your working set in both
+ // forced and voluntary mode, but leave cache WS trim amounts
+ // alone
+ //
+
+ MmWorkingSetVolReductionMax = 100;
+ MmWorkingSetReductionMax = 100;
+
+ //
+ // In forced mode, wven if you are within your working set, take
+ // memory away more agressively
+ //
+
+ MmWorkingSetReductionMin = 40;
+
+ MmPeriodicAgressiveCacheWsMin = 1000;
+
+
+ if (MmNumberOfPhysicalPages >= ((15*1024*1024)/PAGE_SIZE) ) {
+ MmPeriodicAgressiveCacheWsMin = 1100;
+ }
+
+ //
+ // For Larger Machines 19 - 31Mb, Keep the trim counter max
+ // set to 6 passes. Smaller machines < 19 are set up for an
+ // iteration count of 4. This will result in more frequent voluntary
+ // trimming
+ //
+
+ if (MmNumberOfPhysicalPages >= ((19*1024*1024)/PAGE_SIZE) ) {
+ MmPeriodicAgressiveCacheWsMin = 1250;
+ }
+
+
+ if (MmNumberOfPhysicalPages >= ((23*1024*1024)/PAGE_SIZE) ) {
+ MmPeriodicAgressiveCacheWsMin = 1500;
+ }
+ } else {
+ MiIdealPassFaultCountDisable = 15;
+ }
+}
+
+
+VOID
+MiObtainFreePages (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function examines the size of the modified list and the
+ total number of pages in use because of working set increments
+ and obtains pages by writing modified pages and/or reducing
+ working sets.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, working set and pfn mutexes held.
+
+--*/
+
+{
+
+ //
+ // Check to see if their are enough modified pages to institute a
+ // write.
+ //
+
+ if ((MmModifiedPageListHead.Total >= MmModifiedWriteClusterSize) ||
+ (MmModNoWriteInsert)) {
+
+ //
+ // Start the modified page writer.
+ //
+
+ KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE);
+ }
+
+ //
+ // See if there are enough working sets above the minimum
+ // threshold to make working set trimming worthwhile.
+ //
+
+ if ((MmPagesAboveWsMinimum > MmPagesAboveWsThreshold) ||
+ (MmAvailablePages < 5)) {
+
+ //
+ // Start the working set manager to reduce working sets.
+ //
+
+ KeSetEvent (&MmWorkingSetManagerEvent, 0, FALSE);
+ }
+}
+
+VOID
+MmWorkingSetManager (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Implements the NT working set manager thread. When the number
+ of free pages becomes critical and ample pages can be obtained by
+ reducing working sets, the working set manager's event is set, and
+ this thread becomes active.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ PEPROCESS CurrentProcess;
+ PEPROCESS ProcessToTrim;
+ PLIST_ENTRY ListEntry;
+ BOOLEAN Attached = FALSE;
+ ULONG MaxTrim;
+ ULONG Trim;
+ ULONG TotalReduction;
+ KIRQL OldIrql;
+ PMMSUPPORT VmSupport;
+ PMMWSL WorkingSetList;
+ LARGE_INTEGER CurrentTime;
+ ULONG DesiredFreeGoal;
+ ULONG DesiredReductionGoal;
+ ULONG FaultCount;
+ ULONG i;
+ ULONG NumberOfForegroundProcesses;
+ BOOLEAN OneSwitchedAlready;
+ BOOLEAN Responsive;
+ ULONG NumPasses;
+ ULONG count;
+ ULONG Available;
+ ULONG PageFaultCount;
+ BOOLEAN OnlyDoAgressiveTrim = FALSE;
+
+#if DBG
+ ULONG LastTrimFaultCount;
+#endif // DBG
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Check the number of pages available to see if any trimming
+ // is really required.
+ //
+
+ LOCK_PFN (OldIrql);
+ Available = MmAvailablePages;
+ PageFaultCount = MmInfoCounters.PageFaultCount;
+ UNLOCK_PFN (OldIrql);
+
+ if ((Available > MmMoreThanEnoughFreePages) &&
+ ((PageFaultCount - MmLastFaultCount) <
+ MM_REDUCE_FAULT_COUNT)) {
+
+ //
+ // Don't trim and zero the check counter.
+ //
+
+ MiCheckCounter = 0;
+
+
+ if ( MiDoPeriodicAgressiveTrimming ) {
+
+ //
+ // Not that simple. We have "more than enough" memory, and have taken
+ // very few faults.
+ //
+ // Now see if we are in the grey area between 4 and 8mb free and have
+ // been there for a bit. If so, then trim all marked processes down
+ // to their minimum. The effect here is that whenever it looks like
+ // we are going idle, we want to steal memory from the hard marked
+ // processes like the shell, csrss, ntvdm...
+ //
+
+ if ( (Available > MmPeriodicAgressiveTrimMinFree) &&
+ (Available <= MmPeriodicAgressiveTrimMaxFree) ) {
+
+ MiPeriodicAgressiveTrimCheckCounter++;
+ if ( MiPeriodicAgressiveTrimCheckCounter > MM_PERIODIC_AGRESSIVE_TRIM_COUNTER_MAXIMUM ) {
+ MiPeriodicAgressiveTrimCheckCounter = 0;
+ OnlyDoAgressiveTrim = TRUE;
+ goto StartTrimming;
+ }
+ }
+ }
+
+
+
+
+ } else if ((Available > MmAmpleFreePages) &&
+ ((PageFaultCount - MmLastFaultCount) <
+ MM_IGNORE_FAULT_COUNT)) {
+
+ //
+ // Don't do anything.
+ //
+
+ NOTHING;
+
+ } else if ((Available > MmFreeGoal) &&
+ (MiCheckCounter < MiTrimCounterMaximum)) {
+
+ //
+ // Don't trim, but increment the check counter.
+ //
+
+ MiCheckCounter += 1;
+
+ } else {
+
+StartTrimming:
+
+ TotalReduction = 0;
+
+ //
+ // Set the total reduction goals.
+ //
+
+ DesiredReductionGoal = MmPagesAboveWsMinimum >> 2;
+ if (MmPagesAboveWsMinimum > (MmFreeGoal << 1)) {
+ DesiredFreeGoal = MmFreeGoal;
+ } else {
+ DesiredFreeGoal = MmMinimumFreePages + 10;
+ }
+
+ //
+ // Calculate the number of faults to be taken to not be trimmed.
+ //
+
+ if (Available > MmMoreThanEnoughFreePages) {
+ FaultCount = 1;
+ } else {
+ FaultCount = MiIdealPassFaultCountDisable;
+ }
+
+#if DBG
+ if (MmDebug & MM_DBG_WS_EXPANSION) {
+ if ( OnlyDoAgressiveTrim ) {
+ DbgPrint("\nMM-wsmanage: Only Doing Agressive Trim Available Mem %d\n",Available);
+ } else {
+ DbgPrint("\nMM-wsmanage: checkcounter = %ld, Desired = %ld, Free = %ld Avail %ld\n",
+ MiCheckCounter, DesiredReductionGoal, DesiredFreeGoal, Available);
+ }
+ }
+#endif //DBG
+
+ KeQuerySystemTime (&CurrentTime);
+ MmLastFaultCount = PageFaultCount;
+
+ NumPasses = 0;
+ OneSwitchedAlready = FALSE;
+ NumberOfForegroundProcesses = 0;
+
+ LOCK_EXPANSION (OldIrql);
+ while (!IsListEmpty (&MmWorkingSetExpansionHead.ListHead)) {
+
+ //
+ // Remove the entry at the head and trim it.
+ //
+
+ ListEntry = RemoveHeadList (&MmWorkingSetExpansionHead.ListHead);
+ if (ListEntry != &MmSystemCacheWs.WorkingSetExpansionLinks) {
+ ProcessToTrim = CONTAINING_RECORD(ListEntry,
+ EPROCESS,
+ Vm.WorkingSetExpansionLinks);
+
+ VmSupport = &ProcessToTrim->Vm;
+ ASSERT (ProcessToTrim->AddressSpaceDeleted == 0);
+ } else {
+ VmSupport = &MmSystemCacheWs;
+ }
+
+ //
+ // Check to see if we've been here before.
+ //
+
+ if ((*(PLARGE_INTEGER)&VmSupport->LastTrimTime).QuadPart ==
+ (*(PLARGE_INTEGER)&CurrentTime).QuadPart) {
+
+ InsertHeadList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+
+ //
+ // If we are only doing agressive trimming then
+ // skip out once we have visited everone.
+ //
+
+ if ( OnlyDoAgressiveTrim ) {
+ break;
+ }
+
+
+ if (MmAvailablePages > MmMinimumFreePages) {
+
+
+ //
+ // Every process has been examined and ample pages
+ // now exist, place this process back on the list
+ // and break out of the loop.
+ //
+
+ MmNumberOfForegroundProcesses = NumberOfForegroundProcesses;
+
+ break;
+ } else {
+
+ //
+ // Wait 10 milliseconds for the modified page writer
+ // to catch up.
+ //
+
+ UNLOCK_EXPANSION (OldIrql);
+ KeDelayExecutionThread (KernelMode,
+ FALSE,
+ &MmShortTime);
+
+ if (MmAvailablePages < MmMinimumFreePages) {
+
+ //
+ // Change this to a forced trim, so we get pages
+ // available, and reset the current time.
+ //
+
+ MiPeriodicAgressiveTrimCheckCounter = 0;
+ MiCheckCounter = 0;
+ KeQuerySystemTime (&CurrentTime);
+
+ NumPasses += 1;
+ }
+ LOCK_EXPANSION (OldIrql);
+
+ //
+ // Get another process.
+ //
+
+ continue;
+ }
+ }
+
+ if (VmSupport != &MmSystemCacheWs) {
+
+ //
+ // If we are only doing agressive trimming,
+ // then only consider hard marked processes
+ //
+
+ if ( OnlyDoAgressiveTrim ) {
+ if ( (ProcessToTrim->MmAgressiveWsTrimMask & PS_WS_TRIM_FROM_EXE_HEADER) &&
+ VmSupport->WorkingSetSize > 5 ) {
+ goto ProcessSelected;
+ } else {
+
+ //
+ // Process is not marked, so skip it
+ //
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+ }
+ //
+ // Check to see if this is a forced trim or
+ // if we are trimming because check counter is
+ // at the maximum?
+ //
+
+ if ((ProcessToTrim->Vm.MemoryPriority == MEMORY_PRIORITY_FOREGROUND) && !NumPasses) {
+
+ NumberOfForegroundProcesses += 1;
+ }
+
+ if (MiCheckCounter >= MiTrimCounterMaximum) {
+
+ //
+ // Don't trim if less than 5 seconds has elapsed since
+ // it was last trimmed or the page fault count is
+ // too high.
+ //
+
+ if (((VmSupport->PageFaultCount -
+ VmSupport->LastTrimFaultCount) >
+ FaultCount)
+ ||
+ (VmSupport->WorkingSetSize <= 5)
+
+ ||
+ (((*(PLARGE_INTEGER)&CurrentTime).QuadPart -
+ (*(PLARGE_INTEGER)&VmSupport->LastTrimTime).QuadPart) <
+ (*(PLARGE_INTEGER)&MmWorkingSetProtectionTime).QuadPart)) {
+
+#if DBG
+ if (MmDebug & MM_DBG_WS_EXPANSION) {
+ if ( VmSupport->WorkingSetSize > 5 ) {
+ DbgPrint(" ***** Skipping %s Process %16s %5d Faults, WS %6d\n",
+ ProcessToTrim->MmAgressiveWsTrimMask ? "Marked" : "Normal",
+ ProcessToTrim->ImageFileName,
+ VmSupport->PageFaultCount - VmSupport->LastTrimFaultCount,
+ VmSupport->WorkingSetSize
+ );
+ }
+ }
+#endif //DBG
+
+
+ //
+ // Don't trim this one at this time. Set the trim
+ // time to the current time and set the page fault
+ // count to the process's current page fault count.
+ //
+
+ VmSupport->LastTrimTime = CurrentTime;
+ VmSupport->LastTrimFaultCount =
+ VmSupport->PageFaultCount;
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+ } else {
+
+ //
+ // This is a forced trim. If this process is at
+ // or below it's minimum, don't trim it unless stacks
+ // are swapped out or it's paging a bit.
+ //
+
+ if (VmSupport->WorkingSetSize <=
+ VmSupport->MinimumWorkingSetSize) {
+ if (((MmAvailablePages + 5) >= MmFreeGoal) &&
+ (((VmSupport->LastTrimFaultCount !=
+ VmSupport->PageFaultCount) ||
+ (!ProcessToTrim->ProcessOutswapEnabled)))) {
+
+ //
+ // This process has taken page faults since the
+ // last trim time. Change the time base and
+ // the fault count. And don't trim as it is
+ // at or below the maximum.
+ //
+
+ VmSupport->LastTrimTime = CurrentTime;
+ VmSupport->LastTrimFaultCount =
+ VmSupport->PageFaultCount;
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+
+ //
+ // If the working set is greater than 5 pages and
+ // the last fault occurred more than 5 seconds ago,
+ // trim.
+ //
+
+ if ((VmSupport->WorkingSetSize < 5)
+ ||
+ (((*(PLARGE_INTEGER)&CurrentTime).QuadPart -
+ (*(PLARGE_INTEGER)&VmSupport->LastTrimTime).QuadPart) <
+ (*(PLARGE_INTEGER)&MmWorkingSetProtectionTime).QuadPart)) {
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+ }
+ }
+
+ //
+ // Fix to supply foreground responsiveness by not trimming
+ // foreground priority applications as aggressively.
+ //
+
+ Responsive = FALSE;
+
+ if ( VmSupport->MemoryPriority == MEMORY_PRIORITY_FOREGROUND ) {
+
+ VmSupport->ForegroundSwitchCount =
+ (UCHAR)MmForegroundSwitchCount;
+ }
+
+ VmSupport->ForegroundSwitchCount = (UCHAR) MmForegroundSwitchCount;
+
+ if ((MmNumberOfForegroundProcesses <= 3) &&
+ (NumberOfForegroundProcesses <= 3) &&
+ (VmSupport->MemoryPriority)) {
+
+ if ((MmAvailablePages > (MmMoreThanEnoughFreePages >> 2)) ||
+ (VmSupport->MemoryPriority >= MEMORY_PRIORITY_FOREGROUND)) {
+
+ //
+ // Indicate that memory responsiveness to the foreground
+ // process is important (not so for large console trees).
+ //
+
+ Responsive = TRUE;
+ }
+ }
+
+ if (Responsive && !NumPasses) {
+
+ //
+ // Note that NumPasses yeilds a measurement of how
+ // desperate we are for memory, if numpasses is not
+ // zero, we are in trouble.
+ //
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+ProcessSelected:
+ VmSupport->LastTrimTime = CurrentTime;
+ VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION;
+ VmSupport->WorkingSetExpansionLinks.Blink =
+ MM_WS_EXPANSION_IN_PROGRESS;
+ UNLOCK_EXPANSION (OldIrql);
+ WorkingSetList = MmWorkingSetList;
+
+ //
+ // Attach to the process and trim away.
+ //
+
+ if (ProcessToTrim != CurrentProcess) {
+ if (KeTryToAttachProcess (&ProcessToTrim->Pcb) == FALSE) {
+
+ //
+ // The process is not in the proper state for
+ // attachment, go to the next one.
+ //
+
+ LOCK_EXPANSION (OldIrql);
+
+ //
+ // Indicate attach failed.
+ //
+
+ VmSupport->AllowWorkingSetAdjustment = MM_FORCE_TRIM;
+ goto WorkingSetLockFailed;
+ }
+
+ //
+ // Indicate that we are attached.
+ //
+
+ Attached = TRUE;
+ }
+
+ //
+ // Attempt to acquire the working set lock, if the
+ // lock cannot be acquired, skip over this process.
+ //
+
+ count = 0;
+ do {
+ if (ExTryToAcquireFastMutex(&ProcessToTrim->WorkingSetLock) != FALSE) {
+ break;
+ }
+ KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime);
+ count += 1;
+ if (count == 5) {
+
+ //
+ // Could not get the lock, skip this process.
+ //
+
+ if (Attached) {
+ KeDetachProcess ();
+ Attached = FALSE;
+ }
+
+ LOCK_EXPANSION (OldIrql);
+ VmSupport->AllowWorkingSetAdjustment = MM_FORCE_TRIM;
+ goto WorkingSetLockFailed;
+ }
+ } while (TRUE);
+
+#if DBG
+ LastTrimFaultCount = VmSupport->LastTrimFaultCount;
+#endif // DBG
+ VmSupport->LastTrimFaultCount = VmSupport->PageFaultCount;
+
+ } else {
+
+ //
+ // System cache, don't trim the system cache if this
+ // is a voluntary trim and the working set is within
+ // a 100 pages of the minimum, or if the system cache
+ // is at its minimum.
+ //
+
+#if DBG
+ LastTrimFaultCount = VmSupport->LastTrimFaultCount;
+#endif // DBG
+ VmSupport->LastTrimTime = CurrentTime;
+ VmSupport->LastTrimFaultCount = VmSupport->PageFaultCount;
+
+ //
+ // Always skip the cache if all we are doing is agressive trimming
+ //
+
+ if ((MiCheckCounter >= MiTrimCounterMaximum) &&
+ (((LONG)VmSupport->WorkingSetSize -
+ (LONG)VmSupport->MinimumWorkingSetSize) < 100) ){
+
+ //
+ // Don't trim the system cache.
+ //
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+
+ //
+ // Indicate that this process is being trimmed.
+ //
+
+ UNLOCK_EXPANSION (OldIrql);
+
+ ProcessToTrim = NULL;
+ WorkingSetList = MmSystemCacheWorkingSetList;
+ count = 0;
+
+ KeRaiseIrql (APC_LEVEL, &OldIrql);
+ if (!ExTryToAcquireResourceExclusiveLite (&MmSystemWsLock)) {
+
+ //
+ // System working set lock was not granted, don't trim
+ // the system cache.
+ //
+
+ KeLowerIrql (OldIrql);
+ LOCK_EXPANSION (OldIrql);
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ continue;
+ }
+
+ MmSystemLockOwner = PsGetCurrentThread();
+ VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION;
+ VmSupport->WorkingSetExpansionLinks.Blink =
+ MM_WS_EXPANSION_IN_PROGRESS;
+ }
+
+ if ((VmSupport->WorkingSetSize <= VmSupport->MinimumWorkingSetSize) &&
+ ((ProcessToTrim != NULL) &&
+ (ProcessToTrim->ProcessOutswapEnabled))) {
+
+ //
+ // Set the quota to the minimum and reduce the working
+ // set size.
+ //
+
+ WorkingSetList->Quota = VmSupport->MinimumWorkingSetSize;
+ Trim = VmSupport->WorkingSetSize - WorkingSetList->FirstDynamic;
+ if (Trim > MmWorkingSetSwapReduction) {
+ Trim = MmWorkingSetSwapReduction;
+ }
+
+ ASSERT ((LONG)Trim >= 0);
+
+ } else {
+
+ MaxTrim = VmSupport->WorkingSetSize -
+ VmSupport->MinimumWorkingSetSize;
+ if ((ProcessToTrim != NULL) &&
+ (ProcessToTrim->ProcessOutswapEnabled)) {
+
+ //
+ // All thread stacks have been swapped out.
+ //
+
+ Trim = MmWorkingSetSwapReduction;
+ i = VmSupport->WorkingSetSize - VmSupport->MaximumWorkingSetSize;
+ if ((LONG)i > 0) {
+ Trim = i;
+ if (Trim > MmWorkingSetSwapReductionHuge) {
+ Trim = MmWorkingSetSwapReductionHuge;
+ }
+ }
+
+ } else if ( OnlyDoAgressiveTrim ) {
+
+ //
+ // If we are in agressive mode,
+ // only trim the cache if it's WS exceeds 4.3mb and then
+ // just bring it down to 4.3mb
+ //
+
+ if (VmSupport != &MmSystemCacheWs) {
+ Trim = MaxTrim;
+ } else {
+ if ( VmSupport->WorkingSetSize > MmPeriodicAgressiveCacheWsMin ) {
+ Trim = VmSupport->WorkingSetSize - MmPeriodicAgressiveCacheWsMin;
+ } else {
+ Trim = 0;
+ }
+ }
+
+ } else if (MiCheckCounter >= MiTrimCounterMaximum) {
+
+ //
+ // Haven't faulted much, reduce a bit.
+ //
+
+ if (VmSupport->WorkingSetSize >
+ (VmSupport->MaximumWorkingSetSize +
+ (6 * MmWorkingSetVolReductionHuge))) {
+ Trim = MmWorkingSetVolReductionHuge;
+
+ } else if ( (VmSupport != &MmSystemCacheWs) &&
+ VmSupport->WorkingSetSize >
+ ( VmSupport->MaximumWorkingSetSize + (2 * MmWorkingSetReductionHuge))) {
+ Trim = MmWorkingSetReductionHuge;
+ } else if (VmSupport->WorkingSetSize > VmSupport->MaximumWorkingSetSize) {
+ if (VmSupport != &MmSystemCacheWs) {
+ Trim = MmWorkingSetVolReductionMax;
+ } else {
+ Trim = MmWorkingSetVolReductionMaxCacheWs;
+ }
+ } else {
+ Trim = MmWorkingSetVolReductionMin;
+ }
+
+ if ( ProcessToTrim && ProcessToTrim->MmAgressiveWsTrimMask ) {
+ Trim = MaxTrim;
+ }
+
+
+ } else {
+
+ if (VmSupport->WorkingSetSize >
+ (VmSupport->MaximumWorkingSetSize +
+ (2 * MmWorkingSetReductionHuge))) {
+ Trim = MmWorkingSetReductionHuge;
+
+ } else if (VmSupport->WorkingSetSize > VmSupport->MaximumWorkingSetSize) {
+ if (VmSupport != &MmSystemCacheWs) {
+ Trim = MmWorkingSetReductionMax;
+ } else {
+ Trim = MmWorkingSetReductionMaxCacheWs;
+ }
+ } else {
+ if (VmSupport != &MmSystemCacheWs) {
+ Trim = MmWorkingSetReductionMin;
+ } else {
+ Trim = MmWorkingSetReductionMinCacheWs;
+ }
+ }
+
+ if ( ProcessToTrim && ProcessToTrim->MmAgressiveWsTrimMask && VmSupport->MemoryPriority < MEMORY_PRIORITY_FOREGROUND) {
+ Trim = MaxTrim;
+ }
+
+ }
+
+ if (MaxTrim < Trim) {
+ Trim = MaxTrim;
+ }
+ }
+
+#if DBG
+ if ( MmDebug & MM_DBG_WS_EXPANSION) {
+ if ( Trim ) {
+ DbgPrint(" Trimming Process %16s %5d Faults, WS %6d, Trimming %5d ==> %5d\n",
+ ProcessToTrim ? ProcessToTrim->ImageFileName : "System Cache",
+ VmSupport->PageFaultCount - LastTrimFaultCount,
+ VmSupport->WorkingSetSize,
+ Trim,
+ VmSupport->WorkingSetSize-Trim
+ );
+ }
+ }
+#endif //DBG
+
+ if (Trim != 0) {
+ Trim = MiTrimWorkingSet (
+ Trim,
+ VmSupport,
+ OnlyDoAgressiveTrim ? OnlyDoAgressiveTrim : ((BOOLEAN)(MiCheckCounter < MiTrimCounterMaximum))
+ );
+ }
+
+ //
+ // Set the quota to the current size.
+ //
+
+ WorkingSetList->Quota = VmSupport->WorkingSetSize;
+ if (WorkingSetList->Quota < VmSupport->MinimumWorkingSetSize) {
+ WorkingSetList->Quota = VmSupport->MinimumWorkingSetSize;
+ }
+
+
+ if (VmSupport != &MmSystemCacheWs) {
+ UNLOCK_WS (ProcessToTrim);
+ if (Attached) {
+ KeDetachProcess ();
+ Attached = FALSE;
+ }
+
+ } else {
+ UNLOCK_SYSTEM_WS (OldIrql);
+ }
+
+ TotalReduction += Trim;
+
+ LOCK_EXPANSION (OldIrql);
+
+WorkingSetLockFailed:
+
+ ASSERT (VmSupport->WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION);
+ if (VmSupport->WorkingSetExpansionLinks.Blink ==
+ MM_WS_EXPANSION_IN_PROGRESS) {
+
+ //
+ // If the working set size is still above minimum
+ // add this back at the tail of the list.
+ //
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ } else {
+
+ //
+ // The value in the blink is the address of an event
+ // to set.
+ //
+
+ KeSetEvent ((PKEVENT)VmSupport->WorkingSetExpansionLinks.Blink,
+ 0,
+ FALSE);
+ }
+
+ if ( !OnlyDoAgressiveTrim ) {
+ if (MiCheckCounter < MiTrimCounterMaximum) {
+ if ((MmAvailablePages > DesiredFreeGoal) ||
+ (TotalReduction > DesiredReductionGoal)) {
+
+ //
+ // Ample pages now exist.
+ //
+
+ break;
+ }
+ }
+ }
+ }
+
+ MiPeriodicAgressiveTrimCheckCounter = 0;
+ MiCheckCounter = 0;
+ UNLOCK_EXPANSION (OldIrql);
+ }
+
+ //
+ // Signal the modified page writer as we have moved pages
+ // to the modified list and memory was critical.
+ //
+
+ if ((MmAvailablePages < MmMinimumFreePages) ||
+ (MmModifiedPageListHead.Total >= MmModifiedPageMaximum)) {
+ KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE);
+ }
+
+ return;
+}
+
+VOID
+MiEmptyAllWorkingSets (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine attempts to empty all the working stes on the
+ expansion list.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode. No locks held. Apc level or less.
+
+--*/
+
+{
+ PMMSUPPORT VmSupport;
+ PMMSUPPORT FirstSeen = NULL;
+ ULONG SystemCacheSeen = FALSE;
+ KIRQL OldIrql;
+ PLIST_ENTRY ListEntry;
+ PEPROCESS ProcessToTrim;
+
+ MmLockPagableSectionByHandle (ExPageLockHandle);
+ LOCK_EXPANSION (OldIrql);
+
+ while (!IsListEmpty (&MmWorkingSetExpansionHead.ListHead)) {
+
+ //
+ // Remove the entry at the head and trim it.
+ //
+
+ ListEntry = RemoveHeadList (&MmWorkingSetExpansionHead.ListHead);
+
+ if (ListEntry != &MmSystemCacheWs.WorkingSetExpansionLinks) {
+ ProcessToTrim = CONTAINING_RECORD(ListEntry,
+ EPROCESS,
+ Vm.WorkingSetExpansionLinks);
+
+ VmSupport = &ProcessToTrim->Vm;
+ ASSERT (ProcessToTrim->AddressSpaceDeleted == 0);
+ ASSERT (VmSupport->VmWorkingSetList == MmWorkingSetList);
+ } else {
+ VmSupport = &MmSystemCacheWs;
+ ProcessToTrim = NULL;
+ if (SystemCacheSeen != FALSE) {
+
+ //
+ // Seen this one already.
+ //
+
+ FirstSeen = VmSupport;
+ }
+ SystemCacheSeen = TRUE;
+ }
+
+ if (VmSupport == FirstSeen) {
+ InsertHeadList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ break;
+ }
+
+ VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION;
+ VmSupport->WorkingSetExpansionLinks.Blink =
+ MM_WS_EXPANSION_IN_PROGRESS;
+ UNLOCK_EXPANSION (OldIrql);
+
+ if (FirstSeen == NULL) {
+ FirstSeen == VmSupport;
+ }
+
+ //
+ // Empty the working set.
+ //
+
+ if (ProcessToTrim == NULL) {
+ MiEmptyWorkingSet (VmSupport);
+ } else {
+ if (ProcessToTrim->Vm.WorkingSetSize > 4) {
+ KeAttachProcess (&ProcessToTrim->Pcb);
+ MiEmptyWorkingSet (VmSupport);
+ KeDetachProcess ();
+ }
+ }
+
+ //
+ // Add back to the list.
+ //
+
+ LOCK_EXPANSION (OldIrql);
+ ASSERT (VmSupport->WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION);
+ if (VmSupport->WorkingSetExpansionLinks.Blink ==
+ MM_WS_EXPANSION_IN_PROGRESS) {
+
+ //
+ // If the working set size is still above minimum
+ // add this back at the tail of the list.
+ //
+
+ InsertTailList (&MmWorkingSetExpansionHead.ListHead,
+ &VmSupport->WorkingSetExpansionLinks);
+ } else {
+
+ //
+ // The value in the blink is the address of an event
+ // to set.
+ //
+
+ KeSetEvent ((PKEVENT)VmSupport->WorkingSetExpansionLinks.Blink,
+ 0,
+ FALSE);
+ }
+ }
+ UNLOCK_EXPANSION (OldIrql);
+ MmUnlockPagableImageSection (ExPageLockHandle);
+ return;
+}
diff --git a/private/ntos/mm/wstree.c b/private/ntos/mm/wstree.c
new file mode 100644
index 000000000..231936bee
--- /dev/null
+++ b/private/ntos/mm/wstree.c
@@ -0,0 +1,1418 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ wstree.c
+
+Abstract:
+
+ This module contains the routines which manipulate the working
+ set list tree.
+
+Author:
+
+ Lou Perazzoli (loup) 15-May-1989
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+#if (_MSC_VER >= 800)
+#pragma warning(disable:4010) /* Allow pretty pictures without the noise */
+#endif
+
+extern ULONG MmSystemCodePage;
+extern ULONG MmSystemCachePage;
+extern ULONG MmPagedPoolPage;
+extern ULONG MmSystemDriverPage;
+
+ULONG MmNumberOfInserts;
+
+ULONG
+MiLookupWsleHashIndex (
+ IN ULONG WsleEntry,
+ IN PMMWSL WorkingSetList
+ );
+
+VOID
+MiCheckWsleHash (
+ IN PMMWSL WorkingSetList
+ );
+
+
+VOID
+FASTCALL
+MiInsertWsle (
+ IN ULONG Entry,
+ IN PMMWSL WorkingSetList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine inserts a Working Set List Entry (WSLE) into the
+ working set tree.
+
+Arguments:
+
+ Entry - The index number of the WSLE to insert.
+
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working Set Mutex held.
+
+--*/
+
+{
+ ULONG i;
+ ULONG Parent;
+ PVOID VirtualAddress;
+ PMMWSLE Wsle;
+ PMMSUPPORT WsInfo;
+ ULONG Hash;
+ PMMWSLE_HASH Table;
+ ULONG j;
+ PMMPTE PointerPte;
+ ULONG Index;
+ LARGE_INTEGER TickCount;
+ ULONG Size;
+ KIRQL OldIrql;
+
+ Wsle = WorkingSetList->Wsle;
+
+ VirtualAddress = PAGE_ALIGN(Wsle[Entry].u1.VirtualAddress);
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ DbgPrint("inserting element %lx %lx\n", Entry, Wsle[Entry].u1.Long);
+ }
+
+ ASSERT (Wsle[Entry].u1.e1.Valid == 1);
+ ASSERT (Wsle[Entry].u1.e1.Direct != 1);
+#endif //DBG
+
+ WorkingSetList->NonDirectCount += 1;
+
+ if ((Table = WorkingSetList->HashTable) == NULL) {
+ return;
+ }
+
+#if DBG
+ MmNumberOfInserts += 1;
+#endif //DBG
+
+ Hash = (Wsle[Entry].u1.Long >> (PAGE_SHIFT - 2)) % (WorkingSetList->HashTableSize - 1);
+
+ //
+ // Check hash table size and see if there is enough room to
+ // hash or if the table should be grown.
+ //
+
+ if ((WorkingSetList->NonDirectCount + 10 +
+ (WorkingSetList->HashTableSize >> 4)) >
+ WorkingSetList->HashTableSize) {
+
+ if (MmWorkingSetList == WorkingSetList) {
+ WsInfo = &PsGetCurrentProcess()->Vm;
+ } else {
+ WsInfo = &MmSystemCacheWs;
+ }
+
+ if ((WorkingSetList->HashTableSize <
+ (MM_WSLE_MAX_HASH_SIZE - ((2*PAGE_SIZE) / sizeof (MMWSLE_HASH))))
+ &&
+ (WsInfo->AllowWorkingSetAdjustment)) {
+ LOCK_EXPANSION_IF_ALPHA (OldIrql);
+ WsInfo->AllowWorkingSetAdjustment = MM_GROW_WSLE_HASH;
+ UNLOCK_EXPANSION_IF_ALPHA (OldIrql);
+ }
+
+ if ((WorkingSetList->NonDirectCount +
+ (WorkingSetList->HashTableSize >> 4)) >
+ WorkingSetList->HashTableSize) {
+
+ //
+ // No more room in the hash table, remove one and add there.
+ // Pick a victum within 16 of where this would hash to.
+ //
+
+ KeQueryTickCount(&TickCount);
+ j = Hash + (TickCount.LowPart & 0xF);
+
+ Size = WorkingSetList->HashTableSize;
+
+ if (j >= Size) {
+ j = TickCount.LowPart & 0xF;
+ }
+
+ do {
+ if (Table[j].Key != 0) {
+ PointerPte = MiGetPteAddress (Table[j].Key);
+ Index = WorkingSetList->HashTable[j].Index;
+ ASSERT (Wsle[Index].u1.e1.Valid == 1);
+ PointerPte = MiGetPteAddress (Wsle[Index].u1.VirtualAddress);
+ if (MiFreeWsle (Index, WsInfo, PointerPte)) {
+ break;
+ }
+ }
+ j += 1;
+ if (j >= Size) {
+ j = 0;
+ }
+ } while (TRUE);
+ }
+ }
+
+ //
+ // Add to the hash table.
+ //
+
+ while (Table[Hash].Key != 0) {
+ Hash += 1;
+ if (Hash >= WorkingSetList->HashTableSize) {
+ Hash = 0;
+ }
+ }
+
+ Table[Hash].Key = Wsle[Entry].u1.Long & ~(PAGE_SIZE - 1);
+ Table[Hash].Index = Entry;
+
+#if DBG
+ if ((MmNumberOfInserts % 1000) == 0) {
+ MiCheckWsleHash (WorkingSetList);
+ }
+#endif //DBG
+ return;
+}
+
+#if DBG
+VOID
+MiCheckWsleHash (
+ IN PMMWSL WorkingSetList
+ )
+
+{
+ ULONG j;
+ ULONG found = 0;
+ PMMWSLE Wsle;
+
+ Wsle = WorkingSetList->Wsle;
+
+ for (j =0; j < WorkingSetList->HashTableSize ; j++ ) {
+ if (WorkingSetList->HashTable[j].Key != 0) {
+ found += 1;
+ ASSERT (WorkingSetList->HashTable[j].Key ==
+ (Wsle[WorkingSetList->HashTable[j].Index].u1.Long &
+ ~(PAGE_SIZE -1)));
+ }
+ }
+ if (found != WorkingSetList->NonDirectCount) {
+ DbgPrint("MMWSLE: Found %lx, nondirect %lx\n",
+ found, WorkingSetList->NonDirectCount);
+ DbgBreakPoint();
+ }
+}
+#endif //dbg
+
+
+ULONG
+FASTCALL
+MiLocateWsle (
+ IN PVOID VirtualAddress,
+ IN PMMWSL WorkingSetList,
+ IN ULONG WsPfnIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This function locates the specified virtual address within the
+ working set list.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual to locate within the working
+ set list.
+
+Return Value:
+
+ Returns the index into the working set list which contains the entry.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working Set Mutex held.
+
+--*/
+
+{
+ ULONG i;
+ PMMWSLE Wsle;
+ ULONG LastWsle;
+ ULONG Hash;
+ PMMWSLE_HASH Table;
+ ULONG Tries;
+
+ Wsle = WorkingSetList->Wsle;
+
+ VirtualAddress = PAGE_ALIGN(VirtualAddress);
+
+ if (WsPfnIndex <= WorkingSetList->LastInitializedWsle) {
+ if ((VirtualAddress == PAGE_ALIGN(Wsle[WsPfnIndex].u1.VirtualAddress)) &&
+ (Wsle[WsPfnIndex].u1.e1.Valid == 1)) {
+ return WsPfnIndex;
+ }
+ }
+
+ if (WorkingSetList->HashTable) {
+ Tries = 0;
+ Table = WorkingSetList->HashTable;
+
+ Hash = ((ULONG)VirtualAddress >> (PAGE_SHIFT - 2)) % (WorkingSetList->HashTableSize - 1);
+
+ while (Table[Hash].Key != (ULONG)VirtualAddress) {
+ Hash += 1;
+ if (Hash >= WorkingSetList->HashTableSize) {
+ Hash = 0;
+ if (Tries != 0) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x41284,
+ (ULONG)VirtualAddress,
+ WsPfnIndex,
+ (ULONG)WorkingSetList);
+ }
+ Tries = 1;
+ }
+ }
+ ASSERT (WorkingSetList->Wsle[Table[Hash].Index].u1.e1.Direct == 0);
+ return Table[Hash].Index;
+ }
+
+ i = 0;
+
+ for (; ; ) {
+ if ((VirtualAddress == PAGE_ALIGN(Wsle[i].u1.VirtualAddress)) &&
+ (Wsle[i].u1.e1.Valid == 1)) {
+ ASSERT (WorkingSetList->Wsle[i].u1.e1.Direct == 0);
+ return i;
+ }
+ i += 1;
+ }
+}
+
+
+#if 0
+
+ULONG
+MiLocateWsleAndParent (
+ IN PVOID VirtualAddress,
+ OUT PULONG Parent,
+ IN PMMWSL WorkingSetList,
+ IN ULONG WsPfnIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine locates both the working set list entry (via index) and
+ it's parent.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address of the WSLE to locate.
+
+ Parent - Returns the index into the working set list for the parent.
+
+ WorkingSetList - Supplies a pointer to the working set list.
+
+ WsPfnIndex - Supplies the index field from the PFN database for
+ the physical page that maps the specified virtual address.
+
+Return Value:
+
+ Retuns the index of the virtual address in the working set list.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working Set Mutex held.
+
+--*/
+
+{
+ ULONG Previous;
+ ULONG Entry;
+ PMMWSLE Wsle;
+
+ Wsle = WorkingSetList->Wsle;
+
+ //
+ // Check to see if the PfnIndex field refers to the WSLE in question.
+ // Make sure the index is within the specified working set list.
+ //
+
+ if (WsPfnIndex <= WorkingSetList->LastInitializedWsle) {
+ if (VirtualAddress == PAGE_ALIGN(Wsle[WsPfnIndex].u1.VirtualAddress)) {
+
+ //
+ // The index field points to the WSLE, however, this could
+ // have been just a coincidence, so check to ensure it
+ // really doesn't have a parent.
+ //
+
+ if (Wsle[WsPfnIndex].u2.BothPointers == 0) {
+
+ //
+ // Not in tree, therefore has no parent.
+ //
+
+ *Parent = WSLE_NULL_INDEX;
+ return WsPfnIndex;
+ }
+ }
+ }
+
+ //
+ // Search the tree for the entry remembering the parents.
+ //
+
+ Entry = WorkingSetList->Root;
+ Previous = Entry;
+
+ for (;;) {
+
+ ASSERT (Entry != WSLE_NULL_INDEX);
+
+ if (VirtualAddress == PAGE_ALIGN(Wsle[Entry].u1.VirtualAddress)) {
+ break;
+ }
+
+ if (VirtualAddress < PAGE_ALIGN(Wsle[Entry].u1.VirtualAddress)) {
+ Previous = Entry;
+ Entry = Wsle[Entry].u2.s.LeftChild;
+ } else {
+ Previous = Entry;
+ Entry = Wsle[Entry].u2.s.RightChild;
+ }
+ }
+
+ *Parent = Previous;
+ return Entry;
+}
+#endif //0
+
+
+VOID
+FASTCALL
+MiRemoveWsle (
+ ULONG Entry,
+ IN PMMWSL WorkingSetList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine removes a Working Set List Entry (WSLE) from the
+ working set tree.
+
+Arguments:
+
+ Entry - The index number of the WSLE to remove.
+
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working Set Mutex held.
+
+--*/
+{
+ ULONG i;
+ ULONG Parent;
+ ULONG Pred;
+ ULONG PredParent;
+ PMMWSLE Wsle;
+ PVOID VirtualAddress;
+ PMMWSLE_HASH Table;
+ ULONG Hash;
+ ULONG Tries;
+
+ Wsle = WorkingSetList->Wsle;
+
+ //
+ // Locate the entry in the tree.
+ //
+
+#if DBG
+ if (MmDebug & MM_DBG_PTE_UPDATE) {
+ DbgPrint("removing wsle %lx %lx\n",
+ Entry, Wsle[Entry].u1.Long);
+ }
+ if (MmDebug & MM_DBG_DUMP_WSL) {
+ MiDumpWsl();
+ DbgPrint(" \n");
+ }
+
+#endif //DBG
+
+ ASSERT (Wsle[Entry].u1.e1.Valid == 1);
+
+ VirtualAddress = PAGE_ALIGN (Wsle[Entry].u1.VirtualAddress);
+
+ if (WorkingSetList == MmSystemCacheWorkingSetList) {
+
+ //
+ // count system space inserts and removals.
+ //
+
+ if (VirtualAddress < (PVOID)MM_SYSTEM_CACHE_START) {
+ MmSystemCodePage -= 1;
+ } else if (VirtualAddress < MM_PAGED_POOL_START) {
+ MmSystemCachePage -= 1;
+ } else if (VirtualAddress < MmNonPagedSystemStart) {
+ MmPagedPoolPage -= 1;
+ } else {
+ MmSystemDriverPage -= 1;
+ }
+ }
+
+ Wsle[Entry].u1.e1.Valid = 0;
+
+ if (Wsle[Entry].u1.e1.Direct == 0) {
+
+ WorkingSetList->NonDirectCount -= 1;
+
+ if (WorkingSetList->HashTable) {
+ Hash = (Wsle[Entry].u1.Long >> (PAGE_SHIFT - 2)) % (WorkingSetList->HashTableSize - 1);
+ Table = WorkingSetList->HashTable;
+ Tries = 0;
+
+ while (Table[Hash].Key != (ULONG)VirtualAddress) {
+ Hash += 1;
+ if (Hash >= WorkingSetList->HashTableSize) {
+ Hash = 0;
+ if (Tries != 0) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x41784,
+ (ULONG)VirtualAddress,
+ Entry,
+ (ULONG)WorkingSetList);
+ }
+ Tries = 1;
+ }
+ }
+ Table[Hash].Key = 0;
+ }
+ }
+
+ return;
+}
+
+
+VOID
+MiSwapWslEntries (
+ IN ULONG SwapEntry,
+ IN ULONG Entry,
+ IN PMMSUPPORT WsInfo
+ )
+
+/*++
+
+Routine Description:
+
+ This routine swaps the working set list entries Entry and SwapEntry
+ in the specified working set list (process or system cache).
+
+Arguments:
+
+ SwapEntry - Supplies the first entry to swap. This entry must be
+ valid, i.e. in the working set at the current time.
+
+ Entry - Supplies the other entry to swap. This entry may be valid
+ or invalid.
+
+ WsInfo - Supplies the working set list.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working set lock and PFN lock held (if system cache),
+ APC's disabled.
+
+--*/
+
+{
+ MMWSLE WsleEntry;
+ MMWSLE WsleSwap;
+ PMMPTE PointerPte;
+ PMMPFN Pfn1;
+ PMMWSLE Wsle;
+ PMMWSL WorkingSetList;
+ PMMWSLE_HASH Table;
+#if DBG
+ ULONG CurrentSize = WsInfo->WorkingSetSize;
+#endif //DBG
+
+ WorkingSetList = WsInfo->VmWorkingSetList;
+ Wsle = WorkingSetList->Wsle;
+
+ WsleSwap = Wsle[SwapEntry];
+
+ ASSERT (WsleSwap.u1.e1.Valid != 0);
+
+ WsleEntry = Wsle[Entry];
+
+ Table = WorkingSetList->HashTable;
+
+ if (WsleEntry.u1.e1.Valid == 0) {
+
+ //
+ // Entry is not on any list. Remove it from the free list.
+ //
+
+ MiRemoveWsleFromFreeList (Entry, Wsle, WorkingSetList);
+
+ //
+ // Copy the Entry to this free one.
+ //
+
+ Wsle[Entry] = WsleSwap;
+
+ if (WsleSwap.u1.e1.Direct) {
+ PointerPte = MiGetPteAddress (WsleSwap.u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = Entry;
+ } else {
+
+ //
+ // Update hash table.
+ //
+
+ if (Table) {
+ Table [ MiLookupWsleHashIndex (WsleSwap.u1.Long,
+ WorkingSetList)].Index = Entry;
+ }
+ }
+
+ //
+ // Put entry on free list.
+ //
+
+ ASSERT (WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle);
+ Wsle[SwapEntry].u1.Long = WorkingSetList->FirstFree << MM_FREE_WSLE_SHIFT;
+ WorkingSetList->FirstFree = SwapEntry;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ } else {
+
+ //
+ // Both entries are valid.
+ //
+
+ Wsle[SwapEntry] = WsleEntry;
+
+ if (WsleEntry.u1.e1.Direct) {
+
+ //
+ // Swap the PFN WsIndex element to point to the new slot.
+ //
+
+ PointerPte = MiGetPteAddress (WsleEntry.u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = SwapEntry;
+ } else {
+
+ //
+ // Update hash table.
+ //
+
+ if (Table) {
+ Table[ MiLookupWsleHashIndex (WsleEntry.u1.Long,
+ WorkingSetList)].Index = SwapEntry;
+ }
+ }
+
+ Wsle[Entry] = WsleSwap;
+
+ if (WsleSwap.u1.e1.Direct) {
+
+ PointerPte = MiGetPteAddress (WsleSwap.u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = Entry;
+ } else {
+ if (Table) {
+ Table[ MiLookupWsleHashIndex (WsleSwap.u1.Long,
+ WorkingSetList)].Index = Entry;
+ }
+ }
+ }
+ ASSERT (CurrentSize == WsInfo->WorkingSetSize);
+ return;
+}
+
+ULONG
+MiLookupWsleHashIndex (
+ IN ULONG WsleEntry,
+ IN PMMWSL WorkingSetList
+ )
+
+{
+ ULONG Hash;
+ ULONG VirtualAddress;
+ PMMWSLE_HASH Table;
+ ULONG Tries = 0;
+
+ Table = WorkingSetList->HashTable;
+ VirtualAddress = WsleEntry & ~(PAGE_SIZE - 1);
+
+ Hash = ((ULONG)VirtualAddress >> (PAGE_SHIFT - 2)) % (WorkingSetList->HashTableSize - 1);
+
+ while (Table[Hash].Key != (ULONG)VirtualAddress) {
+ Hash += 1;
+ if (Hash >= WorkingSetList->HashTableSize) {
+ Hash = 0;
+ if (Tries != 0) {
+ KeBugCheckEx (MEMORY_MANAGEMENT,
+ 0x41884,
+ (ULONG)VirtualAddress,
+ WsleEntry,
+ (ULONG)WorkingSetList);
+ }
+ Tries = 1;
+ }
+ }
+ return Hash;
+}
+
+VOID
+MiRemoveWsleFromFreeList (
+ IN ULONG Entry,
+ IN PMMWSLE Wsle,
+ IN PMMWSL WorkingSetList
+ )
+
+/*++
+
+Routine Description:
+
+ This routine removes a working set list entry from the free list.
+ It is used when the entry to required is not the first element
+ in the free list.
+
+Arguments:
+
+ Entry - Supplies the index of the entry to remove.
+
+ Wsle - Supplies a pointer to the array of WSLEs.
+
+ WorkingSetList - Supplies a pointer to the working set list.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, Working set lock and PFN lock held, APC's disabled.
+
+--*/
+
+{
+ ULONG Free;
+ ULONG ParentFree;
+
+ Free = WorkingSetList->FirstFree;
+
+ if (Entry == Free) {
+ ASSERT ((Wsle[Entry].u1.Long >> MM_FREE_WSLE_SHIFT) <= WorkingSetList->LastInitializedWsle);
+ WorkingSetList->FirstFree = Wsle[Entry].u1.Long >> MM_FREE_WSLE_SHIFT;
+
+ } else {
+ do {
+ ParentFree = Free;
+ ASSERT (Wsle[Free].u1.e1.Valid == 0);
+ Free = Wsle[Free].u1.Long >> MM_FREE_WSLE_SHIFT;
+ } while (Free != Entry);
+
+ Wsle[ParentFree].u1.Long = Wsle[Entry].u1.Long;
+ }
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+ return;
+}
+
+
+#if 0
+
+VOID
+MiSwapWslEntries (
+ IN ULONG Entry,
+ IN ULONG Parent,
+ IN ULONG SwapEntry,
+ IN PMMWSL WorkingSetList
+ )
+
+/*++
+
+Routine Description:
+
+ This function swaps the specified entry and updates its parent with
+ the specified swap entry.
+
+ The entry must be valid, i.e., the page is resident. The swap entry
+ can be valid or on the free list.
+
+Arguments:
+
+ Entry - The index of the WSLE to swap.
+
+ Parent - The index of the parent of the WSLE to swap.
+
+ SwapEntry - The index to swap the entry with.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, working set mutex held, APC's disabled.
+
+--*/
+
+{
+
+ ULONG SwapParent;
+ ULONG SavedRight;
+ ULONG SavedLeft;
+ ULONG Free;
+ ULONG ParentFree;
+ ULONG SavedLong;
+ PVOID VirtualAddress;
+ PMMWSLE Wsle;
+ PMMPFN Pfn1;
+ PMMPTE PointerPte;
+
+ Wsle = WorkingSetList->Wsle;
+
+ if (Wsle[SwapEntry].u1.e1.Valid == 0) {
+
+ //
+ // This entry is not in use and must be removed from
+ // the free list.
+ //
+
+ Free = WorkingSetList->FirstFree;
+
+ if (SwapEntry == Free) {
+ WorkingSetList->FirstFree = Entry;
+ ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) ||
+ (WorkingSetList->FirstFree == WSLE_NULL_INDEX));
+
+ } else {
+
+ while (Free != SwapEntry) {
+ ParentFree = Free;
+ Free = Wsle[Free].u2.s.LeftChild;
+ }
+
+ Wsle[ParentFree].u2.s.LeftChild = Entry;
+ }
+
+ //
+ // Swap the previous entry and the new unused entry.
+ //
+
+ SavedLeft = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = SavedLeft;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[SwapEntry].u1.Long = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = 0;
+
+ //
+ // Make the parent point to the new entry.
+ //
+
+ if (Parent == WSLE_NULL_INDEX) {
+
+ //
+ // This entry is not in the tree.
+ //
+
+ PointerPte = MiGetPteAddress (Wsle[SwapEntry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = SwapEntry;
+ return;
+ }
+
+ if (Parent == Entry) {
+
+ //
+ // This element is the root, update the root pointer.
+ //
+
+ WorkingSetList->Root = SwapEntry;
+
+ } else {
+
+ if (Wsle[Parent].u2.s.LeftChild == Entry) {
+ Wsle[Parent].u2.s.LeftChild = SwapEntry;
+ } else {
+ ASSERT (Wsle[Parent].u2.s.RightChild == Entry);
+
+ Wsle[Parent].u2.s.RightChild = SwapEntry;
+ }
+ }
+
+ } else {
+
+ if ((Parent == WSLE_NULL_INDEX) &&
+ (Wsle[SwapEntry].u2.BothPointers == 0)) {
+
+ //
+ // Neither entry is in the tree, just swap their pointers.
+ //
+
+ SavedLong = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = SavedLong;
+
+ PointerPte = MiGetPteAddress (Wsle[Entry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = Entry;
+
+ PointerPte = MiGetPteAddress (Wsle[SwapEntry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = SwapEntry;
+
+ return;
+ }
+
+ //
+ // The entry at FirstDynamic is valid; swap it with this one and
+ // update both parents.
+ //
+
+ SwapParent = WorkingSetList->Root;
+
+ if (SwapParent == SwapEntry) {
+
+ //
+ // The entry we are swapping with is at the root.
+ //
+
+ if (Wsle[SwapEntry].u2.s.LeftChild == Entry) {
+
+ //
+ // The entry we are going to swap is the left child of this
+ // entry.
+ //
+ // R(SwapEntry)
+ // / \
+ // (entry)
+ //
+
+ WorkingSetList->Root = Entry;
+
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SwapEntry;
+ SavedRight = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SavedRight;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+
+ } else {
+
+ if (Wsle[SwapEntry].u2.s.RightChild == Entry) {
+
+ //
+ // The entry we are going to swap is the right child of this
+ // entry.
+ //
+ // R(SwapEntry)
+ // / \
+ // (entry)
+ //
+
+ WorkingSetList->Root = Entry;
+
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SwapEntry;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+ }
+ }
+
+ //
+ // The swap entry is the root, but the other entry is not
+ // its child.
+ //
+ //
+ // R(SwapEntry)
+ // / \
+ // .....
+ // Parent(Entry)
+ // \
+ // Entry (left or right)
+ //
+ //
+
+ WorkingSetList->Root = Entry;
+
+ SavedRight = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SavedRight;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ if (Parent == WSLE_NULL_INDEX) {
+
+ //
+ // This entry is not in the tree.
+ //
+
+ PointerPte = MiGetPteAddress (Wsle[SwapEntry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ Pfn1->u1.WsIndex = SwapEntry;
+ return;
+ }
+
+ //
+ // Change the parent of the entry to point to the swap entry.
+ //
+
+ if (Wsle[Parent].u2.s.RightChild == Entry) {
+ Wsle[Parent].u2.s.RightChild = SwapEntry;
+ } else {
+ Wsle[Parent].u2.s.LeftChild = SwapEntry;
+ }
+
+ return;
+
+ }
+
+ //
+ // The SwapEntry is not the root, find its parent.
+ //
+
+ if (Wsle[SwapEntry].u2.BothPointers == 0) {
+
+ //
+ // Entry is not in tree, therefore no parent.
+
+ SwapParent = WSLE_NULL_INDEX;
+
+ } else {
+
+ VirtualAddress = PAGE_ALIGN(Wsle[SwapEntry].u1.VirtualAddress);
+
+ for (;;) {
+
+ ASSERT (SwapParent != WSLE_NULL_INDEX);
+
+ if (Wsle[SwapParent].u2.s.LeftChild == SwapEntry) {
+ break;
+ }
+ if (Wsle[SwapParent].u2.s.RightChild == SwapEntry) {
+ break;
+ }
+
+
+ if (VirtualAddress < PAGE_ALIGN(Wsle[SwapParent].u1.VirtualAddress)) {
+ SwapParent = Wsle[SwapParent].u2.s.LeftChild;
+ } else {
+ SwapParent = Wsle[SwapParent].u2.s.RightChild;
+ }
+ }
+ }
+
+ if (Parent == WorkingSetList->Root) {
+
+ //
+ // The entry is at the root.
+ //
+
+ if (Wsle[Entry].u2.s.LeftChild == SwapEntry) {
+
+ //
+ // The entry we are going to swap is the left child of this
+ // entry.
+ //
+ // R(Entry)
+ // / \
+ // (SwapEntry)
+ //
+
+ WorkingSetList->Root = SwapEntry;
+
+ Wsle[Entry].u2.s.LeftChild = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Entry;
+ SavedRight = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = SavedRight;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+
+ } else if (Wsle[SwapEntry].u2.s.RightChild == Entry) {
+
+ //
+ // The entry we are going to swap is the right child of this
+ // entry.
+ //
+ // R(SwapEntry)
+ // / \
+ // (entry)
+ //
+
+ WorkingSetList->Root = Entry;
+
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SwapEntry;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+ }
+
+ //
+ // The swap entry is the root, but the other entry is not
+ // its child.
+ //
+ //
+ // R(SwapEntry)
+ // / \
+ // .....
+ // Parent(Entry)
+ // \
+ // Entry (left or right)
+ //
+ //
+
+ WorkingSetList->Root = Entry;
+
+ SavedRight = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SavedRight;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ if (SwapParent == WSLE_NULL_INDEX) {
+
+ //
+ // This entry is not in the tree.
+ //
+
+ PointerPte = MiGetPteAddress (Wsle[Entry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn1->u1.WsIndex == SwapEntry);
+ Pfn1->u1.WsIndex = Entry;
+ return;
+ }
+
+ //
+ // Change the parent of the entry to point to the swap entry.
+ //
+
+ if (Wsle[SwapParent].u2.s.RightChild == SwapEntry) {
+ Wsle[SwapParent].u2.s.RightChild = Entry;
+ } else {
+ Wsle[SwapParent].u2.s.LeftChild = Entry;
+ }
+
+ return;
+
+ }
+
+ //
+ // Neither entry is the root.
+ //
+
+ if (Parent == SwapEntry) {
+
+ //
+ // The parent of the entry is the swap entry.
+ //
+ //
+ // R
+ // .....
+ //
+ // (SwapParent)
+ // |
+ // (SwapEntry)
+ // |
+ // (Entry)
+ //
+
+ //
+ // Update the parent pointer for the swapentry.
+ //
+
+ if (Wsle[SwapParent].u2.s.LeftChild == SwapEntry) {
+ Wsle[SwapParent].u2.s.LeftChild = Entry;
+ } else {
+ Wsle[SwapParent].u2.s.RightChild = Entry;
+ }
+
+ //
+ // Determine if this goes left or right.
+ //
+
+ if (Wsle[SwapEntry].u2.s.LeftChild == Entry) {
+
+ //
+ // The entry we are going to swap is the left child of this
+ // entry.
+ //
+ // R
+ // .....
+ //
+ // (SwapParent)
+ //
+ // (SwapEntry) [Parent(entry)]
+ // / \
+ // (entry)
+ //
+
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SwapEntry;
+ SavedRight = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SavedRight;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+
+ } else {
+
+ ASSERT (Wsle[SwapEntry].u2.s.RightChild == Entry);
+
+ //
+ // The entry we are going to swap is the right child of this
+ // entry.
+ //
+ // R
+ // .....
+ //
+ // (SwapParent)
+ // \
+ // (SwapEntry)
+ // / \
+ // (entry)
+ //
+
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SwapEntry;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+ }
+
+
+ }
+ if (SwapParent == Entry) {
+
+
+ //
+ // The parent of the swap entry is the entry.
+ //
+ // R
+ // .....
+ //
+ // (Parent)
+ // |
+ // (Entry)
+ // |
+ // (SwapEntry)
+ //
+
+ //
+ // Update the parent pointer for the entry.
+ //
+
+ if (Wsle[Parent].u2.s.LeftChild == Entry) {
+ Wsle[Parent].u2.s.LeftChild = SwapEntry;
+ } else {
+ Wsle[Parent].u2.s.RightChild = SwapEntry;
+ }
+
+ //
+ // Determine if this goes left or right.
+ //
+
+ if (Wsle[Entry].u2.s.LeftChild == SwapEntry) {
+
+ //
+ // The entry we are going to swap is the left child of this
+ // entry.
+ //
+ // R
+ // .....
+ //
+ // (Parent)
+ // |
+ // (Entry)
+ // /
+ // (SwapEntry)
+ //
+
+ Wsle[Entry].u2.s.LeftChild = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Entry;
+ SavedRight = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = SavedRight;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+
+ } else {
+
+ ASSERT (Wsle[Entry].u2.s.RightChild == SwapEntry);
+
+ //
+ // The entry we are going to swap is the right child of this
+ // entry.
+ //
+ // R(Entry)
+ // / \
+ // (SwapEntry)
+ //
+
+ Wsle[Entry].u2.s.RightChild = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Entry;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+ }
+
+ }
+
+ //
+ // Neither entry is the parent of the other. Just swap them
+ // and update the parent entries.
+ //
+
+ if (Parent == WSLE_NULL_INDEX) {
+
+ //
+ // This entry is not in the tree.
+ //
+
+ PointerPte = MiGetPteAddress (Wsle[Entry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn1->u1.WsIndex == Entry);
+ Pfn1->u1.WsIndex = SwapEntry;
+
+ } else {
+
+ if (Wsle[Parent].u2.s.LeftChild == Entry) {
+ Wsle[Parent].u2.s.LeftChild = SwapEntry;
+ } else {
+ Wsle[Parent].u2.s.RightChild = SwapEntry;
+ }
+ }
+
+ if (SwapParent == WSLE_NULL_INDEX) {
+
+ //
+ // This entry is not in the tree.
+ //
+
+ PointerPte = MiGetPteAddress (Wsle[SwapEntry].u1.VirtualAddress);
+ Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber);
+ ASSERT (Pfn1->u1.WsIndex == SwapEntry);
+ Pfn1->u1.WsIndex = Entry;
+ } else {
+
+ if (Wsle[SwapParent].u2.s.LeftChild == SwapEntry) {
+ Wsle[SwapParent].u2.s.LeftChild = Entry;
+ } else {
+ Wsle[SwapParent].u2.s.RightChild = Entry;
+ }
+ }
+
+ SavedRight = Wsle[SwapEntry].u2.s.RightChild;
+ Wsle[SwapEntry].u2.s.RightChild = Wsle[Entry].u2.s.RightChild;
+ Wsle[Entry].u2.s.RightChild = SavedRight;
+ SavedLeft = Wsle[SwapEntry].u2.s.LeftChild;
+ Wsle[SwapEntry].u2.s.LeftChild = Wsle[Entry].u2.s.LeftChild;
+ Wsle[Entry].u2.s.LeftChild = SavedLeft;
+
+ SavedLong = Wsle[Entry].u1.Long;
+ Wsle[Entry].u1.Long = Wsle[SwapEntry].u1.Long;
+ Wsle[SwapEntry].u1.Long = SavedLong;
+
+ return;
+ }
+}
+#endif //0
diff --git a/private/ntos/mm/zeropage.c b/private/ntos/mm/zeropage.c
new file mode 100644
index 000000000..b79ff2d5c
--- /dev/null
+++ b/private/ntos/mm/zeropage.c
@@ -0,0 +1,158 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ zeropage.c
+
+Abstract:
+
+ This module contains the zero page thread for memory management.
+
+Author:
+
+ Lou Perazzoli (loup) 6-Apr-1991
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+VOID
+MmZeroPageThread (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ Implements the NT zeroing page thread. This thread runs
+ at priority zero and removes a page from the free list,
+ zeroes it, and places it on the zeroed page list.
+
+Arguments:
+
+ StartContext - not used.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ PVOID EndVa;
+ KIRQL OldIrql;
+ ULONG PageFrame;
+ PMMPFN Pfn1;
+ PVOID StartVa;
+ PKTHREAD Thread;
+ PVOID ZeroBase;
+ ULONG i;
+
+ //
+ // Before this becomes the zero page thread, free the kernel
+ // initialization code.
+ //
+
+ MiFindInitializationCode (&StartVa, &EndVa);
+ if (StartVa != NULL) {
+ MiFreeInitializationCode (StartVa, EndVa);
+ }
+
+ //
+ // The following code sets the current thread's base priority to zero
+ // and then sets its current priority to zero. This ensures that the
+ // thread always runs at a priority of zero.
+ //
+
+ Thread = KeGetCurrentThread();
+ Thread->BasePriority = 0;
+ KeSetPriorityThread (Thread, 0);
+
+ //
+ // Loop forever zeroing pages.
+ //
+
+ do {
+
+ //
+ // Wait until there are at least MmZeroPageMinimum pages
+ // on the free list.
+ //
+
+ KeWaitForSingleObject (&MmZeroingPageEvent,
+ WrFreePage,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ LOCK_PFN_WITH_TRY (OldIrql);
+ do {
+ if ((volatile)MmFreePageListHead.Total == 0) {
+
+ //
+ // No pages on the free list at this time, wait for
+ // some more.
+ //
+
+ MmZeroingPageThreadActive = FALSE;
+ UNLOCK_PFN (OldIrql);
+ break;
+
+ } else {
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ PageFrame = MmFreePagesByPrimaryColor[FreePageList][i].Flink;
+ if (PageFrame != MM_EMPTY_LIST) {
+ break;
+ }
+ }
+#else //MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ PageFrame = MmFreePageListHead.Flink;
+#endif //MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ Pfn1 = MI_PFN_ELEMENT(PageFrame);
+
+ ASSERT (PageFrame != MM_EMPTY_LIST);
+ Pfn1 = MI_PFN_ELEMENT(PageFrame);
+ MiRemoveAnyPage (MI_GET_SECONDARY_COLOR (PageFrame, Pfn1));
+
+ //
+ // Zero the page using the last color used to map the page.
+ //
+
+#if defined(_X86_)
+
+ ZeroBase = MiMapPageToZeroInHyperSpace (PageFrame);
+ UNLOCK_PFN (OldIrql);
+ RtlZeroMemory (ZeroBase, PAGE_SIZE);
+
+#elif defined(_PPC_)
+
+ UNLOCK_PFN (OldIrql);
+ KeZeroPage(PageFrame);
+
+#else
+
+ ZeroBase = (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT);
+ UNLOCK_PFN (OldIrql);
+ HalZeroPage(ZeroBase, ZeroBase, PageFrame);
+
+#endif //X86
+
+ LOCK_PFN_WITH_TRY (OldIrql);
+ MiInsertPageInList (MmPageLocationList[ZeroedPageList],
+ PageFrame);
+ }
+ } while(TRUE);
+ } while (TRUE);
+}