summaryrefslogtreecommitdiffstats
path: root/private/ntos/mm/ppc
diff options
context:
space:
mode:
authorAdam <you@example.com>2020-05-17 05:51:50 +0200
committerAdam <you@example.com>2020-05-17 05:51:50 +0200
commite611b132f9b8abe35b362e5870b74bce94a1e58e (patch)
treea5781d2ec0e085eeca33cf350cf878f2efea6fe5 /private/ntos/mm/ppc
downloadNT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.gz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.bz2
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.lz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.xz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.zst
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.zip
Diffstat (limited to 'private/ntos/mm/ppc')
-rw-r--r--private/ntos/mm/ppc/datappc.c136
-rw-r--r--private/ntos/mm/ppc/debugsup.c199
-rw-r--r--private/ntos/mm/ppc/hypermap.c344
-rw-r--r--private/ntos/mm/ppc/initppc.c874
-rw-r--r--private/ntos/mm/ppc/mippc.h2034
-rw-r--r--private/ntos/mm/ppc/setdirty.c124
-rw-r--r--private/ntos/mm/ppc/sources5
7 files changed, 3716 insertions, 0 deletions
diff --git a/private/ntos/mm/ppc/datappc.c b/private/ntos/mm/ppc/datappc.c
new file mode 100644
index 000000000..bc7946fc5
--- /dev/null
+++ b/private/ntos/mm/ppc/datappc.c
@@ -0,0 +1,136 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ datappc.c
+
+Abstract:
+
+ This module contains the private hardware specific global storage for
+ the memory management subsystem.
+
+Author:
+
+ Lou Perazzoli (loup) 27-Mar-1990
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-93
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// A zero Pte.
+//
+
+MMPTE ZeroPte = { 0 };
+
+
+//
+// A kernel zero PTE.
+//
+
+MMPTE ZeroKernelPte = { 0 };
+
+
+MMPTE ValidKernelPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidUserPte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidPtePte = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidPdePde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE ValidKernelPde = { MM_PTE_VALID_MASK |
+ MM_PTE_WRITE_MASK };
+
+
+MMPTE DemandZeroPde = { MM_READWRITE << 3 };
+
+
+MMPTE DemandZeroPte = { MM_READWRITE << 3 };
+
+
+MMPTE TransitionPde = { MM_PTE_TRANSITION_MASK | (MM_READWRITE << 3) };
+
+
+MMPTE PrototypePte = { 0xFFFFF000 | (MM_READWRITE << 3) | MM_PTE_PROTOTYPE_MASK };
+
+
+//
+// PTE which generates an access violation when referenced.
+//
+
+MMPTE NoAccessPte = {MM_NOACCESS << 3};
+
+
+//
+// Pool start and end.
+//
+
+PVOID MmNonPagedPoolStart;
+
+PVOID MmNonPagedPoolEnd = ((PVOID)MM_NONPAGED_POOL_END);
+
+PVOID MmPagedPoolStart = ((PVOID)MM_PAGED_POOL_START);
+
+PVOID MmPagedPoolEnd;
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+
+MMPFNLIST MmStandbyPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, StandbyPageList, MM_EMPTY_LIST, MM_EMPTY_LIST
+ };
+
+
+#endif
+
+PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+
+//
+// Color tables for modified pages destined for the paging file.
+//
+
+MMPFNLIST MmModifiedPageListByColor[MM_MAXIMUM_NUMBER_OF_COLORS] = {
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST,
+ 0, ModifiedPageList, MM_EMPTY_LIST, MM_EMPTY_LIST};
+
+ULONG MmSecondaryColorMask;
+
+//
+// Count of the number of modified pages destined for the paging file.
+//
+
+ULONG MmTotalPagesForPagingFile = 0;
+
+//
+// PTE reserved for mapping physical data for debugger.
+// Use 1 page from last 4MB of virtual address space
+// reserved for the HAL.
+//
+
+PMMPTE MmDebugPte = (MiGetPteAddress((PVOID)MM_HAL_RESERVED));
+
+
+//
+// 16 PTEs reserved for mapping MDLs (64k max).
+//
+
+PMMPTE MmCrashDumpPte = (MiGetPteAddress((PVOID)MM_HAL_RESERVED));
+
diff --git a/private/ntos/mm/ppc/debugsup.c b/private/ntos/mm/ppc/debugsup.c
new file mode 100644
index 000000000..bd71d496a
--- /dev/null
+++ b/private/ntos/mm/ppc/debugsup.c
@@ -0,0 +1,199 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ debugsup.c
+
+Abstract:
+
+ This module contains routines which provide support for the
+ kernel debugger.
+
+Author:
+
+ Lou Perazzoli (loup) 02-Aug-90
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-93
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+PVOID
+MmDbgReadCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for read access.
+
+ The address may be within the PowerPC kernel BAT or may be
+ otherwise valid and readable.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or readable, otherwise
+ returns the virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if ((VirtualAddress >= (PVOID)KIPCR) &&
+ (VirtualAddress < (PVOID)(KIPCR2 + PAGE_SIZE))) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgWriteCheck (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine returns the virtual address which is valid (mapped)
+ for write access.
+
+ The address may be within the PowerPC kernel BAT or may be
+ otherwise valid and writable.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address to check.
+
+Return Value:
+
+ Returns NULL if the address is not valid or writable, otherwise
+ returns the virtual address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PMMPTE PointerPte;
+
+ if ((VirtualAddress >= (PVOID)KSEG0_BASE) &&
+ (VirtualAddress < (PVOID)KSEG2_BASE)) {
+ return VirtualAddress;
+ }
+
+ if ((VirtualAddress >= (PVOID)KIPCR) &&
+ (VirtualAddress < (PVOID)(KIPCR2 + PAGE_SIZE))) {
+ return VirtualAddress;
+ }
+
+ if (!MmIsAddressValid (VirtualAddress)) {
+ return NULL;
+ }
+
+ //
+ // This is being added back in permanently since the PowerPC
+ // hardware debug registers break in before the instruction
+ // is executed. This will generally allow the kernel debugger
+ // to step over the instruction that triggered the hardware
+ // debug register breakpoint.
+ //
+
+ if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) {
+
+ // This code is similar in spirit to that in the MIPS version.
+ // It returns a writable alias for breakpoints in user pages.
+ // However, it uses the virtual address reserved for the debugger,
+ // rather than the wired-in KSEG0 translation available in MIPS.
+ //
+ // N.B. Microsoft says kernel debugger can't do user code at all.
+
+ return MmDbgTranslatePhysicalAddress (
+ MmGetPhysicalAddress (VirtualAddress) );
+ }
+
+ PointerPte = MiGetPteAddress (VirtualAddress);
+ if (PointerPte->u.Hard.Write == 0) {
+ return NULL;
+ }
+
+ return VirtualAddress;
+}
+
+PVOID
+MmDbgTranslatePhysicalAddress (
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ PowerPC implementation specific:
+
+ This routine maps the specified physical address and returns
+ the virtual address which maps the physical address.
+
+ The next call to MmDbgTranslatePhyiscalAddress removes the
+ previous phyiscal address translation, hence on a single
+ physical address can be examined at a time (can't cross page
+ boundaries).
+
+Arguments:
+
+ PhysicalAddress - Supplies the phyiscal address to map and translate.
+
+Return Value:
+
+ The virtual address which corresponds to the phyiscal address.
+
+Environment:
+
+ Kernel mode IRQL at DISPATCH_LEVEL or greater.
+
+--*/
+
+{
+ PVOID BaseAddress;
+
+ BaseAddress = MiGetVirtualAddressMappedByPte (MmDebugPte);
+
+ KiFlushSingleTb (TRUE, BaseAddress);
+
+ *MmDebugPte = ValidKernelPte;
+ MmDebugPte->u.Hard.PageFrameNumber = PhysicalAddress.LowPart >> PAGE_SHIFT;
+
+ return (PVOID)((ULONG)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart));
+}
diff --git a/private/ntos/mm/ppc/hypermap.c b/private/ntos/mm/ppc/hypermap.c
new file mode 100644
index 000000000..fd07b80af
--- /dev/null
+++ b/private/ntos/mm/ppc/hypermap.c
@@ -0,0 +1,344 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ hypermap.c
+
+Abstract:
+
+ This module contains the routines which map physical pages into
+ reserved PTEs within hyper space.
+
+ This module is machine dependent. This version is targetted
+ for PowerPC.
+
+Author:
+
+ Lou Perazzoli (loup) 5-Apr-1989
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 11-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+
+PVOID
+MiMapPageInHyperSpace (
+ IN ULONG PageFrameIndex,
+ IN PKIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified physical page into hyperspace
+ and returns the virtual address that maps the page.
+
+ ************************************
+ * *
+ * Returns with a spin lock held!!! *
+ * *
+ ************************************
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Virtual address in hyperspace that maps the page.
+
+ RETURNS WITH THE HYPERSPACE SPIN LOCK HELD!!!!
+
+ The routine MiUnmapPageInHyperSpace MUST be called to release the lock!!!!
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ ULONG i;
+ PMMPTE PointerPte;
+ MMPTE TempPte;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ //
+ // Find the proper location in hyper space and map the page there.
+ //
+
+ LOCK_HYPERSPACE(OldIrql);
+ PointerPte = MmFirstReservedMappingPte;
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ //
+ // All the pages in reserved for mapping have been used,
+ // flush the TB and reinitialize the pages.
+ //
+
+ RtlZeroMemory ((PVOID)MmFirstReservedMappingPte,
+ (NUMBER_OF_MAPPING_PTES + 1) * sizeof(MMPTE));
+ PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES;
+ KeFlushEntireTb (TRUE, FALSE);
+
+ }
+
+ //
+ // Get the offset to the first free PTE.
+ //
+
+ i = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Change the offset for the next time through.
+ //
+
+ PointerPte->u.Hard.PageFrameNumber = i - 1;
+
+ //
+ // Point to the free entry and make it valid.
+ //
+
+ PointerPte += i;
+
+ ASSERT (PointerPte->u.Hard.Valid == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ *PointerPte = TempPte;
+
+ return MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+PVOID
+MiMapImageHeaderInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This routine maps the specified physical page into hyperspace
+ at the address reserved explicitly for image page header mapping
+ and returns the virtual address that maps the page. No other
+ hyperspace maps will affect this map. If another thread attempts
+ to map an image at the same time, it will be forced to wait until
+ this header is unmapped.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Virtual address in hyperspace that maps the page.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ LOCK_PFN (OldIrql);
+
+ while (PointerPte->u.Long != 0) {
+
+ //
+ // If there is no event specified, set one up.
+ //
+
+ if (MmWorkingSetList->WaitingForImageMapping == (PKEVENT)NULL) {
+
+ //
+ // Set the global event into the field and wait for it.
+ //
+
+ MmWorkingSetList->WaitingForImageMapping = &MmImageMappingPteEvent;
+ }
+
+ //
+ // Release the PFN lock and wait on the event in an
+ // atomic operation.
+ //
+
+ KeEnterCriticalRegion();
+ UNLOCK_PFN_AND_THEN_WAIT(OldIrql);
+
+ KeWaitForSingleObject(MmWorkingSetList->WaitingForImageMapping,
+ Executive,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+ KeLeaveCriticalRegion();
+
+ LOCK_PFN (OldIrql);
+ }
+
+ ASSERT (PointerPte->u.Long == 0);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ UNLOCK_PFN (OldIrql);
+
+ return (PVOID)MiGetVirtualAddressMappedByPte (PointerPte);
+}
+
+VOID
+MiUnmapImageHeaderInHyperSpace (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure unmaps the PTE reserved for mapping the image
+ header, flushes the TB, and, if the WaitingForImageMapping field
+ is not NULL, sets the specified event.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+ KIRQL OldIrql;
+ PKEVENT Event;
+
+ PointerPte = MiGetPteAddress (IMAGE_MAPPING_PTE);
+
+ TempPte.u.Long = 0;
+
+ LOCK_PFN (OldIrql);
+
+ //
+ // Capture the current state of the event field and clear it out.
+ //
+
+ Event = MmWorkingSetList->WaitingForImageMapping;
+
+ MmWorkingSetList->WaitingForImageMapping = (PKEVENT)NULL;
+
+ ASSERT (PointerPte->u.Long != 0);
+
+ KeFlushSingleTb (IMAGE_MAPPING_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Hard);
+
+ UNLOCK_PFN (OldIrql);
+
+ if (Event != (PKEVENT)NULL) {
+
+ //
+ // If there was an event specified, set the event.
+ //
+
+ KePulseEvent (Event, 0, FALSE);
+ }
+
+ return;
+}
+
+PVOID
+MiMapPageToZeroInHyperSpace (
+ IN ULONG PageFrameIndex
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure maps the specified physical page into hyper space
+ and returns the virtual address which maps the page.
+
+ NOTE: it maps it into the same location reserved for fork operations!!
+ This is only to be used by the zeroing page thread.
+
+Arguments:
+
+ PageFrameIndex - Supplies the physical page number to map.
+
+Return Value:
+
+ Returns the virtual address where the specified physical page was
+ mapped.
+
+Environment:
+
+ Must be holding the PFN lock.
+
+--*/
+
+{
+ MMPTE TempPte;
+ PMMPTE PointerPte;
+
+#if DBG
+ if (PageFrameIndex == 0) {
+ DbgPrint("attempt to map physical page 0 in hyper space\n");
+ KeBugCheck (MEMORY_MANAGEMENT);
+ }
+#endif //DBG
+
+ PointerPte = MiGetPteAddress (ZEROING_PAGE_PTE);
+
+ TempPte.u.Long = 0;
+
+ KeFlushSingleTb (ZEROING_PAGE_PTE,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPte,
+ TempPte.u.Hard);
+
+ TempPte = ValidPtePte;
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+
+ *PointerPte = TempPte;
+
+ return ZEROING_PAGE_PTE;
+}
diff --git a/private/ntos/mm/ppc/initppc.c b/private/ntos/mm/ppc/initppc.c
new file mode 100644
index 000000000..e2a9f972e
--- /dev/null
+++ b/private/ntos/mm/ppc/initppc.c
@@ -0,0 +1,874 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ initppc.c
+
+Abstract:
+
+ This module contains the machine dependent initialization for the
+ memory management component. It is specifically tailored to the
+ PowerPC environment.
+
+Author:
+
+ Lou Perazzoli (loup) 3-Apr-1990
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 8-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+//
+// Local definitions
+//
+
+#define _16MB ((16*1024*1024)/PAGE_SIZE)
+
+
+VOID
+MiInitMachineDependent (
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This routine performs the necessary operations to enable virtual
+ memory. This includes building the page directory page, building
+ page table pages to map the code section, the data section, the'
+ stack section and the trap handler.
+
+ It also initializes the PFN database and populates the free list.
+
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode.
+
+--*/
+
+{
+
+ ULONG i, j;
+ ULONG HighPage;
+ ULONG PdePageNumber;
+ ULONG PdePage;
+ ULONG PageFrameIndex;
+ ULONG NextPhysicalPage;
+ ULONG PfnAllocation;
+ ULONG NumberOfPages;
+ ULONG MaxPool;
+ KIRQL OldIrql;
+ PEPROCESS CurrentProcess;
+ ULONG DirBase;
+ ULONG MostFreePage = 0;
+ ULONG MostFreeLowMem = 0;
+ PLIST_ENTRY NextMd;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem = NULL;
+ PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor;
+ MMPTE TempPte;
+ PMMPTE PointerPde;
+ PMMPTE PointerPte;
+ PMMPTE LastPte;
+ PMMPTE Pde;
+ PMMPTE StartPde;
+ PMMPTE EndPde;
+ PMMPFN Pfn1;
+ PMMPFN Pfn2;
+ ULONG va;
+
+ PointerPte = MiGetPdeAddress (PDE_BASE);
+
+// N.B. this will cause first HPT miss fault, DSI in real0.s should fix it!
+ PdePageNumber = PointerPte->u.Hard.PageFrameNumber;
+
+ DirBase = PdePageNumber << PAGE_SHIFT;
+
+ PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = DirBase;
+
+ KeSweepDcache (FALSE);
+
+ //
+ // Get the lower bound of the free physical memory and the
+ // number of physical pages by walking the memory descriptor lists.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1;
+ MmNumberOfPhysicalPages += MemoryDescriptor->PageCount;
+
+ if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) {
+ MmLowestPhysicalPage = MemoryDescriptor->BasePage;
+ }
+
+ if (HighPage > MmHighestPhysicalPage) {
+ MmHighestPhysicalPage = HighPage;
+ }
+
+ //
+ // Locate the largest free block and the largest free block below 16MB.
+ //
+
+ if ((MemoryDescriptor->MemoryType == LoaderFree) ||
+ (MemoryDescriptor->MemoryType == LoaderLoadedProgram) ||
+ (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) ||
+ (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) {
+
+ if ((MemoryDescriptor->BasePage < _16MB) &&
+ (MostFreeLowMem < MemoryDescriptor->PageCount) &&
+ (MostFreeLowMem < ((ULONG)_16MB - MemoryDescriptor->BasePage))) {
+
+ MostFreeLowMem = (ULONG)_16MB - MemoryDescriptor->BasePage;
+ if (MemoryDescriptor->PageCount < MostFreeLowMem) {
+ MostFreeLowMem = MemoryDescriptor->PageCount;
+ }
+ FreeDescriptorLowMem = MemoryDescriptor;
+
+ } else if (MemoryDescriptor->PageCount > MostFreePage) {
+
+ MostFreePage = MemoryDescriptor->PageCount;
+ FreeDescriptor = MemoryDescriptor;
+ }
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // This printout must be updated when the HAL goes to unicode
+ //
+
+ if (MmNumberOfPhysicalPages < 1024) {
+ KeBugCheckEx (INSTALL_MORE_MEMORY,
+ MmNumberOfPhysicalPages,
+ MmLowestPhysicalPage,
+ MmHighestPhysicalPage,
+ 0);
+ }
+
+ //
+ // Build non-paged pool using the physical pages following the
+ // data page in which to build the pool from. Non-page pool grows
+ // from the high range of the virtual address space and expands
+ // downward.
+ //
+ // At this time non-paged pool is constructed so virtual addresses
+ // are also physically contiguous.
+ //
+
+ if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) >
+ (7 * (MmNumberOfPhysicalPages << 3))) {
+
+ //
+ // More than 7/8 of memory allocated to nonpagedpool, reset to 0.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = 0;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) {
+
+ //
+ // Calculate the size of nonpaged pool.
+ // Use the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize;
+
+ MmSizeOfNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMinAdditionNonPagedPoolPerMb;
+ }
+
+ if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) {
+ MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL;
+ }
+
+ //
+ // Align to page size boundary.
+ //
+
+ MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1);
+
+ //
+ // Calculate the maximum size of pool.
+ //
+
+ if (MmMaximumNonPagedPoolInBytes == 0) {
+
+ //
+ // Calculate the size of nonpaged pool. If 4mb of less use
+ // the minimum size, then for every MB about 4mb add extra
+ // pages.
+ //
+
+ MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool;
+
+ //
+ // Make sure enough expansion for pfn database exists.
+ //
+
+ MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ MmMaximumNonPagedPoolInBytes +=
+ ((MmNumberOfPhysicalPages - 1024)/256) *
+ MmMaxAdditionNonPagedPoolPerMb;
+ }
+
+ MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 +
+ (ULONG)PAGE_ALIGN (
+ MmHighestPhysicalPage * sizeof(MMPFN));
+
+ if (MmMaximumNonPagedPoolInBytes < MaxPool) {
+ MmMaximumNonPagedPoolInBytes = MaxPool;
+ }
+
+ if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) {
+ MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL;
+ }
+
+ MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd
+ - (MmMaximumNonPagedPoolInBytes - 1));
+
+ MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart);
+
+ //
+ // Calculate the starting PDE for the system PTE pool which is
+ // right below the nonpaged pool.
+ //
+
+ MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart -
+ ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) &
+ (~PAGE_DIRECTORY_MASK));
+
+ if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) {
+ MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START;
+ MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart -
+ (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1;
+ ASSERT (MmNumberOfSystemPtes > 1000);
+ }
+
+ StartPde = MiGetPdeAddress (MmNonPagedSystemStart);
+
+ EndPde = MiGetPdeAddress((PVOID)((PCHAR)MmNonPagedPoolEnd - 1));
+
+ ASSERT ((ULONG)(EndPde - StartPde) < FreeDescriptorLowMem->PageCount);
+
+ //
+ // Start building the nonpaged pool with the largest free chunk of memory
+ // below 16MB.
+ //
+
+ NextPhysicalPage = FreeDescriptorLowMem->BasePage;
+ NumberOfPages = FreeDescriptorLowMem->PageCount;
+ TempPte = ValidKernelPte;
+
+ while (StartPde <= EndPde) {
+ if (StartPde->u.Hard.Valid == 0) {
+
+ //
+ // Map in a page directory page.
+ //
+
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ *StartPde = TempPte;
+
+ }
+ StartPde += 1;
+ }
+
+ ASSERT(NumberOfPages > 0);
+
+ //
+ // Zero the PTEs before nonpaged pool.
+ //
+
+ StartPde = MiGetPteAddress(MmNonPagedSystemStart);
+ PointerPte = MiGetPteAddress(MmNonPagedPoolStart);
+
+ RtlZeroMemory (StartPde, ((ULONG)PointerPte - (ULONG)StartPde));
+
+ //
+ // Fill in the PTEs for non-paged pool.
+ //
+
+ LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes - 1);
+ while (PointerPte <= LastPte) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ PointerPte++;
+ }
+
+ //
+ // Zero the remaining PTEs (if any).
+ //
+
+ while (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) {
+ *PointerPte = ZeroKernelPte;
+ PointerPte++;
+ }
+
+ MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart;
+
+ //
+ // Non-paged pages now exist, build the pool structures.
+ //
+
+ MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)MmNonPagedPoolStart +
+ MmSizeOfNonPagedPoolInBytes);
+ MiInitializeNonPagedPool (MmNonPagedPoolStart);
+
+ //
+ // Before Non-paged pool can be used, the PFN database must
+ // be built. This is due to the fact that the start and end of
+ // allocation bits for nonpaged pool are maintained in the
+ // PFN elements for the corresponding pages.
+ //
+
+ //
+ // Calculate the number of pages required from page zero to
+ // the highest page.
+ //
+ // Get the number of secondary colors and add the arrary for tracking
+ // secondary colors to the end of the PFN database.
+ //
+
+ //
+ // Get secondary color value from registry.
+ //
+
+ if (MmSecondaryColors == 0) {
+ MmSecondaryColors = PCR->SecondLevelDcacheSize;
+ }
+
+ MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT;
+
+ //
+ // Make sure value is power of two and within limits.
+ //
+
+ if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) ||
+ (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) ||
+ (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) {
+ MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT;
+ }
+
+ MmSecondaryColorMask = (MmSecondaryColors - 1) & ~MM_COLOR_MASK;
+
+ PfnAllocation = 1 + ((((MmHighestPhysicalPage + 1) * sizeof(MMPFN)) +
+ (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2))
+ >> PAGE_SHIFT);
+
+ //
+ // Calculate the start of the Pfn Database (it starts a physical
+ // page zero, even if the Lowest physical page is not zero).
+ //
+
+ PointerPte = MiReserveSystemPtes (PfnAllocation,
+ NonPagedPoolExpansion,
+ 0,
+ 0,
+ TRUE);
+
+ MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte));
+
+ //
+ // Go through the memory descriptors and for each physical page
+ // make the PFN database has a valid PTE to map it. This allows
+ // machines with sparse physical memory to have a minimal PFN
+ // database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage));
+
+ LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT(
+ MemoryDescriptor->BasePage +
+ MemoryDescriptor->PageCount))) - 1);
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ NumberOfPages -= 1;
+ if (NumberOfPages == 0) {
+ ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage +
+ FreeDescriptor->PageCount));
+ NextPhysicalPage = FreeDescriptor->BasePage;
+ NumberOfPages = FreeDescriptor->PageCount;
+ }
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ MmFreePagesByColor[0] = (PMMCOLOR_TABLES)
+ &MmPfnDatabase[MmHighestPhysicalPage + 1];
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ //
+ // Make sure the PTEs are mapped.
+ //
+
+
+ if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) {
+ PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]);
+
+ LastPte = MiGetPteAddress (
+ (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1));
+
+ while (PointerPte <= LastPte) {
+ if (PointerPte->u.Hard.Valid == 0) {
+ TempPte.u.Hard.PageFrameNumber = NextPhysicalPage;
+ NextPhysicalPage += 1;
+ *PointerPte = TempPte;
+ RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte),
+ PAGE_SIZE);
+ }
+ PointerPte++;
+ }
+ }
+
+ for (i = 0; i < MmSecondaryColors; i++) {
+ MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ }
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+ for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i++) {
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList;
+ MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST;
+ MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST;
+ }
+#endif
+
+ //
+ // Go through the page table entries and for any page which is
+ // valid, update the corresponding PFN database element.
+ //
+
+ Pde = MiGetPdeAddress (NULL);
+ PointerPde = MiGetPdeAddress (PTE_BASE);
+ va = 0;
+
+ for (i = 0; i < PDE_PER_PAGE; i++) {
+ if (Pde->u.Hard.Valid == 1) {
+
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT(PdePage);
+ Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (Pde));
+
+ PointerPte = MiGetPteAddress (va);
+
+ for (j = 0 ; j < PTE_PER_PAGE; j++) {
+ if (PointerPte->u.Hard.Valid == 1) {
+
+ Pfn1->u2.ShareCount += 1;
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ if (PageFrameIndex <= MmHighestPhysicalPage) {
+
+ Pfn2 = MI_PFN_ELEMENT(PageFrameIndex);
+
+ if (MmIsAddressValid(Pfn2) &&
+ MmIsAddressValid((PUCHAR)(Pfn2+1)-1)) {
+
+ Pfn2->PteFrame = PdePage;
+ Pfn2->PteAddress = PointerPte;
+ Pfn2->u2.ShareCount += 1;
+ Pfn2->u3.e2.ReferenceCount = 1;
+ Pfn2->u3.e1.PageLocation = ActiveAndValid;
+ Pfn2->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ PointerPte));
+ }
+ }
+ }
+ va += PAGE_SIZE;
+ PointerPte++;
+ }
+ } else {
+ va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE;
+ }
+ Pde++;
+ }
+
+ //
+ // If page zero is still unused, mark it as in use. This is
+ // temporary as we want to find bugs where a physical page
+ // is specified as zero.
+ //
+
+ Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage];
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Make the reference count non-zero and point it into a
+ // page directory.
+ //
+
+ Pde = MiGetPdeAddress (0xb0000000);
+ PdePage = Pde->u.Hard.PageFrameNumber;
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = Pde;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (Pde));
+ }
+
+ // end of temporary set to physical page zero.
+
+ //
+ //
+ // Walk through the memory descriptors and add pages to the
+ // free list in the PFN database.
+ //
+
+ NextMd = LoaderBlock->MemoryDescriptorListHead.Flink;
+
+ while (NextMd != &LoaderBlock->MemoryDescriptorListHead) {
+
+ MemoryDescriptor = CONTAINING_RECORD(NextMd,
+ MEMORY_ALLOCATION_DESCRIPTOR,
+ ListEntry);
+
+ i = MemoryDescriptor->PageCount;
+ NextPhysicalPage = MemoryDescriptor->BasePage;
+
+ switch (MemoryDescriptor->MemoryType) {
+ case LoaderBad:
+ while (i != 0) {
+ MiInsertPageInList (MmPageLocationList[BadPageList],
+ NextPhysicalPage);
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ case LoaderFree:
+ case LoaderLoadedProgram:
+ case LoaderFirmwareTemporary:
+ case LoaderOsloaderStack:
+
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+
+ //
+ // Set the PTE address to the phyiscal page for
+ // virtual address alignment checking.
+ //
+
+ Pfn1->PteAddress =
+ (PMMPTE)(NextPhysicalPage << PTE_SHIFT);
+
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ Pfn1->PteAddress));
+ MiInsertPageInList (MmPageLocationList[FreePageList],
+ NextPhysicalPage);
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ }
+ break;
+
+ default:
+
+ PointerPte = MiGetPteAddress (KSEG0_BASE +
+ (NextPhysicalPage << PAGE_SHIFT));
+ Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage);
+ while (i != 0) {
+
+ //
+ // Set page as in use.
+ //
+
+ if (Pfn1->u3.e2.ReferenceCount == 0) {
+ Pfn1->PteFrame = PdePageNumber;
+ Pfn1->PteAddress = PointerPte;
+ Pfn1->u2.ShareCount += 1;
+ Pfn1->u3.e2.ReferenceCount = 1;
+ Pfn1->u3.e1.PageLocation = ActiveAndValid;
+ Pfn1->u3.e1.PageColor = MI_GET_COLOR_FROM_SECONDARY(
+ MI_GET_PAGE_COLOR_FROM_PTE (
+ PointerPte));
+ }
+ Pfn1++;
+ i -= 1;
+ NextPhysicalPage += 1;
+ PointerPte += 1;
+ }
+
+ break;
+ }
+
+ NextMd = MemoryDescriptor->ListEntry.Flink;
+ }
+
+ //
+ // Indicate that the PFN database is allocated in NonPaged pool.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPhysicalPage])->u.Hard.PageFrameNumber);
+ Pfn1->u3.e1.EndOfAllocation = 1;
+
+ //
+ // Indicate that nonpaged pool must succeed is allocated in
+ // nonpaged pool.
+ //
+
+ i = MmSizeOfNonPagedMustSucceed;
+ Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(MmNonPagedMustSucceed)->u.Hard.PageFrameNumber);
+
+ while ((LONG)i > 0) {
+ Pfn1->u3.e1.StartOfAllocation = 1;
+ Pfn1->u3.e1.EndOfAllocation = 1;
+ i -= PAGE_SIZE;
+ Pfn1 += 1;
+ }
+
+ KeInitializeSpinLock (&MmSystemSpaceLock);
+ KeInitializeSpinLock (&MmPfnLock);
+
+ //
+ // Initialize the nonpaged available PTEs for mapping I/O space
+ // and kernel stacks.
+ //
+
+ PointerPte = MiGetPteAddress (MmNonPagedSystemStart);
+
+ PointerPte = (PMMPTE)PAGE_ALIGN (PointerPte);
+
+ MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolStart) - PointerPte - 1;
+
+ MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
+
+ //
+ // Initialize the nonpaged pool.
+ //
+
+ InitializePool(NonPagedPool,0);
+
+ //
+ // Initialize memory management structures for this process.
+ //
+
+ //
+ // Build working set list. System initialization has created
+ // a PTE for hyperspace.
+ //
+ // Note, we can't remove a zeroed page as hyper space does not
+ // exist and we map non-zeroed pages into hyper space to zero.
+ //
+
+ PointerPte = MiGetPdeAddress(HYPER_SPACE);
+
+ ASSERT (PointerPte->u.Hard.Valid == 1);
+ PointerPte->u.Hard.Write = 1;
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+
+ //
+ // Point to the page table page we just created and zero it.
+ //
+
+ PointerPte = MiGetPteAddress(HYPER_SPACE);
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ //
+ // Hyper space now exists, set the necessary variables.
+ //
+
+ MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE);
+ MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE);
+
+ MmWorkingSetList = WORKING_SET_LIST;
+ MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL));
+
+ //
+ // Initialize this process's memory management structures including
+ // the working set list.
+ //
+
+ //
+ // The pfn element for the page directory has already been initialized,
+ // zero the reference count and the share count so they won't be
+ // wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PdePageNumber);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 0;
+
+ //
+ // The pfn element for the PDE which maps hyperspace has already
+ // been initialized, zero the reference count and the share count
+ // so they won't be wrong.
+ //
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ Pfn1->u3.e1.PageColor = 1;
+
+
+ CurrentProcess = PsGetCurrentProcess ();
+
+ //
+ // Get a page for the working set list and map it into the Page
+ // directory at the page after hyperspace.
+ //
+
+ PointerPte = MiGetPteAddress (HYPER_SPACE);
+ PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte));
+ CurrentProcess->WorkingSetPage = PageFrameIndex;
+
+ TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1;
+
+ *PointerPde = TempPte;
+
+ PointerPte = MiGetVirtualAddressMappedByPte (PointerPde);
+
+ RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE);
+
+ TempPte = *PointerPde;
+ TempPte.u.Hard.Valid = 0;
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeFlushSingleTb (PointerPte,
+ TRUE,
+ FALSE,
+ (PHARDWARE_PTE)PointerPde,
+ TempPte.u.Hard);
+
+ KeLowerIrql(OldIrql);
+
+ CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax;
+ CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin;
+
+ MmInitializeProcessAddressSpace (CurrentProcess,
+ (PEPROCESS)NULL,
+ (PVOID)NULL);
+
+ *PointerPde = ZeroKernelPte;
+
+ //
+ // Check to see if moving the secondary page structures to the end
+ // of the PFN database is a waste of memory. And if so, copy it
+ // to paged pool.
+ //
+ // If the PFN datbase ends on a page aligned boundary and the
+ // size of the two arrays is less than a page, free the page
+ // and allocate nonpagedpool for this.
+ //
+
+ if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) &&
+ ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) {
+
+ PMMCOLOR_TABLES c;
+
+ c = MmFreePagesByColor[0];
+
+ MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES),
+ ' mM');
+
+ MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors];
+
+ RtlMoveMemory (MmFreePagesByColor[0],
+ c,
+ MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES));
+
+ //
+ // Free the page.
+ //
+
+ if (!MI_IS_PHYSICAL_ADDRESS(c)) {
+ PointerPte = MiGetPteAddress(c);
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ *PointerPte = ZeroKernelPte;
+ } else {
+ PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c);
+ }
+
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+ ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1));
+ Pfn1->u2.ShareCount = 0;
+ Pfn1->u3.e2.ReferenceCount = 0;
+ MI_SET_PFN_DELETED (Pfn1);
+#if DBG
+ Pfn1->u3.e1.PageLocation = StandbyPageList;
+#endif //DBG
+ MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex);
+ }
+
+ return;
+}
+
diff --git a/private/ntos/mm/ppc/mippc.h b/private/ntos/mm/ppc/mippc.h
new file mode 100644
index 000000000..c8f2ea751
--- /dev/null
+++ b/private/ntos/mm/ppc/mippc.h
@@ -0,0 +1,2034 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ mippc.h
+
+Abstract:
+
+ This module contains the private data structures and procedure
+ prototypes for the hardware dependent portion of the
+ memory management system.
+
+ It is specifically tailored for PowerPC.
+
+Author:
+
+ Lou Perazzoli (loup) 9-Jan-1991
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-1993
+
+Revision History:
+
+--*/
+
+/*++
+
+ Virtual Memory Layout for PowerPC is:
+
+ +------------------------------------+
+ 00000000 | |
+ | |
+ | |
+ | User Mode Addresses |
+ | |
+ | All pages within this range |
+ | are potentially accessable while |
+ | the CPU is in USER mode. |
+ | |
+ | |
+ +------------------------------------+
+ 7fff0000 | 64k No Access Area |
+ +------------------------------------+
+ 80000000 | | KSEG0
+ | OsLoader loads critical parts |
+ | of boot code and data in |
+ | this region. Mapped by BAT0. |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ 8xxx0000 | | KSEG1 KSEG2
+ | OsLoader loads remaining boot |
+ | code and data here. Mapped |
+ | by segment register 8. |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ 8yyy0000 | |
+ | |
+ | Unused NO ACCESS |
+ | |
+ | |
+ +------------------------------------+
+ 90000000 | System Cache Working Set |
+ 90400000 | System Cache |
+ | |
+ | |
+ | |
+ AE000000 | Kernel mode access only. |
+ +------------------------------------+
+ C0000000 | Page Table Pages mapped through |
+ | this 4mb region |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ C0400000 | HyperSpace - working set lists |
+ | and per process memory mangement |
+ | structures mapped in this 4mb |
+ | region. |
+ | Kernel mode access only. |
+ +------------------------------------+
+ C0800000 | NO ACCESS AREA |
+ | |
+ +------------------------------------+
+ D0000000 | System mapped views |
+ | Kernel mode access only. |
+ | |
+ +------------------------------------+
+ D3000000 | Start of paged system area |
+ | Kernel mode access only. |
+ | |
+ | |
+ | |
+ +------------------------------------+
+ E0000000 | |
+ | Kernel mode access only. |
+ | |
+ | |
+ EFBFFFFF | NonPaged System area |
+ +------------------------------------+
+ EFC00000 | Last 4mb reserved for HAL usage |
+ +------------------------------------+
+ F0000000 | Unused, No access. |
+ | |
+ FFFFD000 | Per Processor PCR |
+ FFFFE000 | Shared PCR2 |
+ FFFFF000 | Debugger Page for physical memory |
+ +------------------------------------+
+
+ Segment Register usage
+
+ 0 - 7 User mode addresses, switched at Process Switch time
+ 8 Constant, shared amongst processors and processes.
+ No change on switch to user mode but always invalid for
+ user mode. Very low part of this range is KSEG0, mapped
+ by a BAT register.
+ 9 - A Constant, Shared amongst processors and processes,
+ invalidated while in user mode.
+ C Per process kernel data. invalidated while in user mode.
+ D Constant, Shared amongst processors and processes,
+ invalidated while in user mode.
+ E Constant, shared amongst processors and processes.
+ No change on switch to user mode but always invalid for
+ user mode.
+ F Per processor. Kernel mode access only.
+
+--*/
+
+//
+// PAGE_SIZE for PowerPC is 4k, virtual page is 20 bits with a PAGE_SHIFT
+// byte offset.
+//
+
+#define MM_VIRTUAL_PAGE_SHIFT 20
+
+//
+// Address space layout definitions.
+//
+
+//#define PDE_BASE ((ULONG)0xC0300000)
+
+//#define PTE_BASE ((ULONG)0xC0000000)
+
+#define MM_SYSTEM_RANGE_START (0x80000000)
+
+#define MM_SYSTEM_SPACE_START (0xD0000000)
+
+//
+// N.B. This should ONLY be used for copying PDEs.
+// Segment 15 is only used for PCR pages,
+// hardwired PDE for the debuggers, and
+// crash dump.
+//
+
+#define MM_SYSTEM_SPACE_END (0xFFFFFFFF)
+
+#define MM_HAL_RESERVED (0xFFC00000)
+
+#define PDE_TOP 0xC03FFFFF
+
+#define HYPER_SPACE ((PVOID)0xC0400000)
+
+#define HYPER_SPACE_END 0xC07fffff
+
+//
+// Define the start and maximum size for the system cache.
+// Maximum size 476MB.
+//
+
+#define MM_SYSTEM_CACHE_AND_POOL_DISJOINT 1
+
+#define MM_SYSTEM_CACHE_WORKING_SET (0x90000000)
+
+#define MM_SYSTEM_CACHE_START (0x90400000)
+
+#define MM_SYSTEM_CACHE_END (0xAE000000)
+
+#define MM_MAXIMUM_SYSTEM_CACHE_SIZE \
+ (((ULONG)MM_SYSTEM_CACHE_END - (ULONG)MM_SYSTEM_CACHE_START) >> PAGE_SHIFT)
+
+//
+// Tell MM that boot code and data is pageable.
+//
+
+#define MM_BOOT_CODE_PAGEABLE 1
+
+#define MM_BOOT_CODE_START (0x80000000)
+#define MM_BOOT_CODE_END (0x90000000)
+
+//
+// Define MM_SYSTEM_CACHE_AND_POOL_DISJOINT so that MmCreateProcessAddressSpace
+// knows that it has to do two RtlCopyMemorys to copy the PDEs for the cache
+// and the rest of system space.
+//
+
+#define MM_SYSTEM_CACHE_AND_POOL_DISJOINT 1
+
+
+//
+// Define area for mapping views into system space.
+//
+
+#define MM_SYSTEM_VIEW_START (0xD0000000)
+
+#define MM_SYSTEM_VIEW_SIZE (48*1024*1024)
+
+#define MM_PAGED_POOL_START ((PVOID)(0xD3000000))
+
+#define MM_LOWEST_NONPAGED_SYSTEM_START ((PVOID)(0xE0000000))
+
+#define MmProtopte_Base ((ULONG)0xD3000000)
+
+#define MM_NONPAGED_POOL_END ((PVOID)(0xEFC00000))
+
+#define NON_PAGED_SYSTEM_END ((ULONG)0xEFFFFFF0) //quadword aligned.
+
+//
+// Define absolute minumum and maximum count for system ptes.
+//
+
+#define MM_MINIMUM_SYSTEM_PTES 9000
+
+#define MM_MAXIMUM_SYSTEM_PTES 35000
+
+#define MM_DEFAULT_SYSTEM_PTES 15000
+
+//
+// Pool limits
+//
+
+//
+// The maximim amount of nonpaged pool that can be initially created.
+//
+
+#define MM_MAX_INITIAL_NONPAGED_POOL ((ULONG)(128*1024*1024))
+
+//
+// The total amount of nonpaged pool (initial pool + expansion + system PTEs).
+//
+
+#define MM_MAX_ADDITIONAL_NONPAGED_POOL ((ULONG)(192*1024*1024))
+
+//
+// The maximum amount of paged pool that can be created.
+//
+
+#define MM_MAX_PAGED_POOL ((ULONG)(176*1024*1024))
+
+#define MM_MAX_TOTAL_POOL (((ULONG)MM_NONPAGED_POOL_END) - ((ULONG)(MM_PAGED_POOL_START)))
+
+
+//
+// Structure layout defintions.
+//
+
+#define PAGE_DIRECTORY_MASK ((ULONG)0x003FFFFF)
+
+#define MM_VA_MAPPED_BY_PDE (0x400000)
+
+// N.B. this is probably a real address, for what purpose?
+#define LOWEST_IO_ADDRESS (0x80000000)
+
+#define PTE_SHIFT (2)
+
+//
+// The number of bits in a physical address.
+//
+
+#define PHYSICAL_ADDRESS_BITS (32)
+
+#define MM_PROTO_PTE_ALIGNMENT ((ULONG)MM_MAXIMUM_NUMBER_OF_COLORS * (ULONG)PAGE_SIZE)
+
+//
+// Maximum number of paging files.
+//
+
+#define MAX_PAGE_FILES 16
+
+//
+// Hyper space definitions.
+//
+
+#define FIRST_MAPPING_PTE ((ULONG)0xC0400000)
+
+#define NUMBER_OF_MAPPING_PTES 255
+
+#define LAST_MAPPING_PTE \
+ ((ULONG)((ULONG)FIRST_MAPPING_PTE + (NUMBER_OF_MAPPING_PTES * PAGE_SIZE)))
+
+#define IMAGE_MAPPING_PTE ((PMMPTE)((ULONG)LAST_MAPPING_PTE + PAGE_SIZE))
+
+#define ZEROING_PAGE_PTE ((PMMPTE)((ULONG)IMAGE_MAPPING_PTE + PAGE_SIZE))
+
+#define WORKING_SET_LIST ((PVOID)((ULONG)ZEROING_PAGE_PTE + PAGE_SIZE))
+
+#define MM_MAXIMUM_WORKING_SET \
+ ((ULONG)((ULONG)2*1024*1024*1024 - 64*1024*1024) >> PAGE_SHIFT) //2Gb-64Mb
+
+#define MM_WORKING_SET_END ((ULONG)0xC07FF000)
+
+//
+// Define masks for fields within the PTE.
+//
+
+#define MM_PTE_PROTOTYPE_MASK 0x1
+#define MM_PTE_VALID_MASK 0x4
+#define MM_PTE_CACHE_DISABLE_MASK 0x28 // CacheInhibit | Guard
+#define MM_PTE_TRANSITION_MASK 0x2
+#define MM_PTE_WRITE_MASK 0x200
+#define MM_PTE_COPY_ON_WRITE_MASK 0x400
+
+//
+// Bit fields to or into PTE to make a PTE valid based on the
+// protection field of the invalid PTE.
+//
+
+#define MM_PTE_NOACCESS 0x0 // not expressable on PowerPC
+#define MM_PTE_READONLY 0x3
+#define MM_PTE_READWRITE (0x3 | MM_PTE_WRITE_MASK)
+#define MM_PTE_WRITECOPY (0x3 | MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_EXECUTE 0x3 // read-only on PowerPC
+#define MM_PTE_EXECUTE_READ 0x3
+#define MM_PTE_EXECUTE_READWRITE (0x3 | MM_PTE_WRITE_MASK)
+#define MM_PTE_EXECUTE_WRITECOPY (0x3 | MM_PTE_WRITE_MASK | MM_PTE_COPY_ON_WRITE_MASK)
+#define MM_PTE_NOCACHE (MM_PTE_CACHE_DISABLE_MASK)
+#define MM_PTE_GUARD 0x0 // not expressable on PowerPC
+#define MM_PTE_CACHE 0x0
+
+#define MM_PROTECT_FIELD_SHIFT 3
+
+//
+// Zero PTE
+//
+
+#define MM_ZERO_PTE 0
+
+//
+// Zero Kernel PTE
+//
+
+#define MM_ZERO_KERNEL_PTE 0
+
+
+//
+// A demand zero PTE with a protection of PAGE_READWRITE.
+//
+
+#define MM_DEMAND_ZERO_WRITE_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A demand zero PTE with a protection of PAGE_READWRITE for system space.
+//
+
+#define MM_KERNEL_DEMAND_ZERO_PTE (MM_READWRITE << MM_PROTECT_FIELD_SHIFT)
+
+//
+// A no access PTE for system space.
+//
+
+#define MM_KERNEL_NOACCESS_PTE (MM_NOACCESS << MM_PROTECT_FIELD_SHIFT)
+
+//
+// Dirty bit definitions for clean and dirty.
+//
+
+#define MM_PTE_CLEAN 3
+#define MM_PTE_DIRTY 0
+
+
+//
+// Kernel stack alignment requirements.
+//
+
+#define MM_STACK_ALIGNMENT 0x0
+#define MM_STACK_OFFSET 0x0
+
+//
+// System process definitions
+//
+
+#define PDE_PER_PAGE ((ULONG)1024)
+
+#define PTE_PER_PAGE ((ULONG)1024)
+
+//
+// Number of page table pages for user addresses.
+//
+
+#define MM_USER_PAGE_TABLE_PAGES (512)
+
+//
+// Indicate the number of page colors required.
+//
+
+#define MM_NUMBER_OF_COLORS 2
+#define MM_MAXIMUM_NUMBER_OF_COLORS 2
+
+//
+// Mask for obtaining color from a physical page number.
+//
+
+#define MM_COLOR_MASK 1
+
+//
+// Define secondary color stride.
+//
+
+#define MM_COLOR_STRIDE 3
+
+//
+// Boundary for aligned pages of like color upon.
+//
+
+#define MM_COLOR_ALIGNMENT 0x2000
+
+//
+// Mask for isolating color from virtual address.
+//
+
+#define MM_COLOR_MASK_VIRTUAL 0x1000
+
+//
+// Define 256K worth of secondary colors.
+//
+
+#define MM_SECONDARY_COLORS_DEFAULT ((256*1024) >> PAGE_SHIFT)
+
+#define MM_SECONDARY_COLORS_MIN (2)
+
+#define MM_SECONDARY_COLORS_MAX (2048)
+
+//
+// Mask for isolating secondary color from physical page number;
+//
+
+extern ULONG MmSecondaryColorMask;
+
+//
+// Define macro to initialize directory table base.
+//
+
+#define INITIALIZE_DIRECTORY_TABLE_BASE(dirbase,pfn) \
+ *((PULONG)(dirbase)) = ((pfn) << PAGE_SHIFT)
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE (
+// OUT OUTPTE,
+// IN FRAME,
+// IN PMASK,
+// IN OWNER
+// );
+//
+// Routine Description:
+//
+// This macro makes a valid PTE from a page frame number, protection mask,
+// and owner.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// FRAME - Supplies the page frame number for the PTE.
+//
+// PMASK - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE which is being made valid.
+// For prototype PTEs NULL should be specified.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE(OUTPTE,FRAME,PMASK,PPTE) \
+ { \
+ (OUTPTE).u.Long = ((FRAME << 12) | \
+ (MmProtectToPteMask[PMASK]) | \
+ MM_PTE_VALID_MASK); \
+ if (((OUTPTE).u.Hard.Write == 1) && \
+ (((PMMPTE)PPTE) >= MiGetPteAddress(MM_LOWEST_NONPAGED_SYSTEM_START)))\
+ { \
+ (OUTPTE).u.Hard.Dirty = MM_PTE_DIRTY; \
+ } \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_TRANSITION (
+// IN OUT OUTPTE
+// IN PROTECT
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the current valid PTE. This PTE is then
+// modified to become a transition PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_TRANSITION(OUTPTE,PROTECT) \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Valid = 0; \
+ (OUTPTE).u.Trans.Prototype = 0; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE (
+// OUT OUTPTE,
+// IN PAGE,
+// IN PROTECT,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid pte and turns it into a transition PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the transition PTE.
+//
+// PAGE - Supplies the page frame number for the PTE.
+//
+// PROTECT - Supplies the protection to set in the transition PTE.
+//
+// PPTE - Supplies a pointer to the PTE, this is used to determine
+// the owner of the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE(OUTPTE,PAGE,PROTECT,PPTE) \
+ (OUTPTE).u.Long = 0; \
+ (OUTPTE).u.Trans.PageFrameNumber = PAGE; \
+ (OUTPTE).u.Trans.Transition = 1; \
+ (OUTPTE).u.Trans.Protection = PROTECT;
+
+
+//++
+//VOID
+//MI_MAKE_TRANSITION_PTE_VALID (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a transition pte and makes it a valid PTE.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the transition PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_TRANSITION_PTE_VALID(OUTPTE,PPTE) \
+ (OUTPTE).u.Long = (((PPTE)->u.Long & 0xFFFFF000) | \
+ (MmProtectToPteMask[(PPTE)->u.Trans.Protection]) | \
+ MM_PTE_VALID_MASK);
+
+//++
+//VOID
+//MI_SET_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set dirty.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_DIRTY(PTE) (PTE).u.Hard.Dirty = MM_PTE_DIRTY
+
+
+//++
+//VOID
+//MI_SET_PTE_CLEAN (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro clears the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to set clear.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PTE_CLEAN(PTE) (PTE).u.Hard.Dirty = MM_PTE_CLEAN
+
+
+
+//++
+//VOID
+//MI_IS_PTE_DIRTY (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks the dirty bit(s) in the specified PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to check.
+//
+// Return Value:
+//
+// TRUE if the page is dirty (modified), FALSE otherwise.
+//
+//--
+
+#define MI_IS_PTE_DIRTY(PTE) ((PTE).u.Hard.Dirty != MM_PTE_CLEAN)
+
+
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_BIT_IF_SYSTEM (
+// OUT OUTPTE,
+// IN PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit if the pointer PTE is within
+// system space.
+//
+// Argments
+//
+// OUTPTE - Supplies the PTE in which to build the valid PTE.
+//
+// PPTE - Supplies a pointer to the PTE becoming valid.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_BIT_IF_SYSTEM(OUTPTE,PPTE)
+
+
+//++
+//VOID
+//MI_SET_GLOBAL_STATE (
+// IN MMPTE PTE,
+// IN ULONG STATE
+// );
+//
+// Routine Description:
+//
+// This macro sets the global bit in the PTE. if the pointer PTE is within
+//
+// Argments
+//
+// PTE - Supplies the PTE to set global state into.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_GLOBAL_STATE(PTE,STATE)
+
+
+
+//++
+//VOID
+//MI_ENABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// enabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_ENABLE_CACHING(PTE) \
+ ((PTE).u.Hard.CacheDisable = (PTE).u.Hard.GuardedStorage = 0)
+
+
+//++
+//VOID
+//MI_DISABLE_CACHING (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and sets the caching state to be
+// disabled.
+//
+// Argments
+//
+// PTE - Supplies a valid PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_DISABLE_CACHING(PTE) \
+ ((PTE).u.Hard.CacheDisable = (PTE).u.Hard.GuardedStorage = 1)
+
+
+//++
+//BOOLEAN
+//MI_IS_CACHING_DISABLED (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro takes a valid PTE and returns TRUE if caching is
+// disabled.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the valid PTE.
+//
+// Return Value:
+//
+// TRUE if caching is disabled, FALSE if it is enabled.
+//
+//--
+
+#define MI_IS_CACHING_DISABLED(PPTE) \
+ ((PPTE)->u.Hard.CacheDisable == 1)
+
+
+//++
+//VOID
+//MI_SET_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element and indicates that
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_SET_PFN_DELETED(PPFN) ((PPFN)->PteAddress = (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//BOOLEAN
+//MI_IS_PFN_DELETED (
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro takes a pointer to a PFN element a determines if
+// the PFN is no longer in use.
+//
+// Argments
+//
+// PPTE - Supplies a pointer to the PFN element.
+//
+// Return Value:
+//
+// TRUE if PFN is no longer used, FALSE if it is still being used.
+//
+//--
+
+#define MI_IS_PFN_DELETED(PPFN) \
+ ((PPFN)->PteAddress == (PMMPTE)0xFFFFFFFF)
+
+
+//++
+//VOID
+//MI_CHECK_PAGE_ALIGNMENT (
+// IN ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro takes a PFN element number (Page) and checks to see
+// if the virtual alignment for the previous address of the page
+// is compatable with the new address of the page. If they are
+// not compatable, the D cache is flushed.
+//
+// Argments
+//
+// PAGE - Supplies the PFN element.
+// PPTE - Supplies a pointer to the new PTE which will contain the page.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_CHECK_PAGE_ALIGNMENT(PAGE,COLOR) \
+{ \
+ PMMPFN PPFN; \
+ ULONG OldColor; \
+ PPFN = MI_PFN_ELEMENT(PAGE); \
+ OldColor = PPFN->u3.e1.PageColor; \
+ if ((COLOR) != OldColor) { \
+ PPFN->u3.e1.PageColor = COLOR; \
+ } \
+}
+
+
+//++
+//VOID
+//MI_INITIALIZE_HYPERSPACE_MAP (
+// HYPER_PAGE
+// );
+//
+// Routine Description:
+//
+// This macro initializes the PTEs reserved for double mapping within
+// hyperspace.
+//
+// Argments
+//
+// HYPER_PAGE - Phyical page number for the page to become hyperspace.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_INITIALIZE_HYPERSPACE_MAP(HYPER_PAGE) \
+ { \
+ PMMPTE Base; \
+ KIRQL OldIrql; \
+ Base = MiMapPageInHyperSpace (HYPER_PAGE, &OldIrql); \
+ Base->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES; \
+ MiUnmapPageInHyperSpace (OldIrql); \
+ }
+
+
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_PTE (
+// IN PMMPTE PTEADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// PTEADDRESS - Supplies the PTE address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_PTE(PTEADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTEADDRESS)) >> 2) & MM_COLOR_MASK))
+
+//++
+//ULONG
+//MI_GET_PAGE_COLOR_FROM_VA (
+// IN PVOID ADDRESS
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_GET_PAGE_COLOR_FROM_VA(ADDRESS) \
+ ((ULONG)((MmSystemPageColor += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_PTE_PROCESS (
+// IN PCHAR COLOR,
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_PTE_PROCESS(PTE,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(PTE)) >> 2) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_PAGE_COLOR_VA_PROCESS (
+// IN PVOID ADDRESS,
+// IN PEPROCESS COLOR
+// );
+//
+// Routine Description:
+//
+// This macro determines the pages color based on the PTE address
+// that maps the page.
+//
+// Argments
+//
+// ADDRESS - Supplies the address the page is (or was) mapped at.
+//
+// Return Value:
+//
+// The pages color.
+//
+//--
+
+#define MI_PAGE_COLOR_VA_PROCESS(ADDRESS,COLOR) \
+ ((ULONG)(((*(COLOR)) += MM_COLOR_STRIDE) & \
+ MmSecondaryColorMask) | \
+ ((((ULONG)(ADDRESS)) >> PAGE_SHIFT) & MM_COLOR_MASK))
+
+
+//++
+//ULONG
+//MI_GET_NEXT_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the next color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the next of.
+//
+// Return Value:
+//
+// Next color in sequence.
+//
+//--
+
+#define MI_GET_NEXT_COLOR(COLOR) ((COLOR + 1) & MM_COLOR_MASK)
+
+
+//++
+//ULONG
+//MI_GET_PREVIOUS_COLOR (
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the previous color in the sequence.
+//
+// Argments
+//
+// COLOR - Supplies the color to return the previous of.
+//
+// Return Value:
+//
+// Previous color in sequence.
+//
+//--
+
+#define MI_GET_PREVIOUS_COLOR(COLOR) ((COLOR - 1) & MM_COLOR_MASK)
+
+#define MI_GET_SECONDARY_COLOR(PAGE,PFN) \
+ ((((ULONG)(PAGE) & MmSecondaryColorMask)) | (PFN)->u3.e1.PageColor)
+
+#define MI_GET_COLOR_FROM_SECONDARY(COLOR) ((COLOR) & MM_COLOR_MASK)
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_BY_COLOR (
+// OUT ULONG PAGE,
+// IN ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. It does NOT remove the page
+// from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_BY_COLOR(PAGE,COLOR) \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink
+
+
+//++
+//VOID
+//MI_GET_MODIFIED_PAGE_ANY_COLOR (
+// OUT ULONG PAGE,
+// IN OUT ULONG COLOR
+// );
+//
+// Routine Description:
+//
+// This macro returns the first page destined for a paging
+// file with the desired color. If not page of the desired
+// color exists, all colored lists are searched for a page.
+// It does NOT remove the page from its list.
+//
+// Argments
+//
+// PAGE - Returns the page located, the value MM_EMPTY_LIST is
+// returned if there is no page of the specified color.
+//
+// COLOR - Supplies the color of page to locate and returns the
+// color of the page located.
+//
+// Return Value:
+//
+// none.
+//
+//--
+
+#define MI_GET_MODIFIED_PAGE_ANY_COLOR(PAGE,COLOR) \
+ { \
+ if (MmTotalPagesForPagingFile == 0) { \
+ PAGE = MM_EMPTY_LIST; \
+ } else { \
+ while (MmModifiedPageListByColor[COLOR].Flink == \
+ MM_EMPTY_LIST) { \
+ COLOR = MI_GET_NEXT_COLOR(COLOR); \
+ } \
+ PAGE = MmModifiedPageListByColor[COLOR].Flink; \
+ } \
+ }
+
+
+//++
+//VOID
+//MI_MAKE_VALID_PTE_WRITE_COPY (
+// IN OUT PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if the PTE indicates that the
+// page is writable and if so it clears the write bit and
+// sets the copy-on-write bit.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKE_VALID_PTE_WRITE_COPY(PPTE) \
+ if ((PPTE)->u.Hard.Write == 1) { \
+ (PPTE)->u.Hard.CopyOnWrite = 1; \
+ (PPTE)->u.Hard.Dirty = MM_PTE_CLEAN; \
+ }
+
+
+//++
+//ULONG
+//MI_DETERMINE_OWNER (
+// IN MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro examines the virtual address of the PTE and determines
+// if the PTE resides in system space or user space.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_DETERMINE_OWNER(PPTE) \
+ ((((PPTE) <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) || \
+ ((PPTE) >= MiGetPdeAddress(NULL) && \
+ ((PPTE) <= MiGetPdeAddress(MM_HIGHEST_USER_ADDRESS)))) ? 1 : 0)
+
+
+//++
+//VOID
+//MI_SET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro sets the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// 1 if the owner is USER_MODE, 0 if the owner is KERNEL_MODE.
+//
+//--
+
+#define MI_SET_ACCESSED_IN_PTE(PPTE,ACCESSED)
+
+
+//++
+//ULONG
+//MI_GET_ACCESSED_IN_PTE (
+// IN OUT MMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro returns the state of the ACCESSED field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the ACCESSED field.
+//
+//--
+
+#define MI_GET_ACCESSED_IN_PTE(PPTE) 0
+
+
+//++
+//VOID
+//MI_SET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// IN ULONG OWNER
+// );
+//
+// Routine Description:
+//
+// This macro sets the owner field in the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_OWNER_IN_PTE(PPTE,OWNER)
+
+
+//++
+//ULONG
+//MI_GET_OWNER_IN_PTE (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro gets the owner field from the PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The state of the OWNER field.
+//
+//--
+
+#define MI_GET_OWNER_IN_PTE(PPTE) KernelMode
+
+
+// bit mask to clear out fields in a PTE to or in paging file location.
+
+#define CLEAR_FOR_PAGE_FILE ((ULONG)(0x0F8))
+
+
+//++
+//VOID
+//MI_SET_PAGING_FILE_INFO (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// FILEINFO - Supplies the number of the paging file.
+//
+// OFFSET - Supplies the offset into the paging file.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define SET_PAGING_FILE_INFO(PTE,FILEINFO,OFFSET) \
+ ((((PTE).u.Long & CLEAR_FOR_PAGE_FILE) | \
+ (((FILEINFO) << 8) | \
+ (OFFSET << 12))))
+
+
+//++
+//PMMPTE
+//MiPteToProto (
+// IN OUT MMPTE PPTE,
+// IN ULONG FILEINFO,
+// IN ULONG OFFSET
+// );
+//
+// Routine Description:
+//
+// This macro returns the address of the corresponding prototype which
+// was encoded earlier into the supplied PTE.
+//
+// NOTE THAT AS PROTOPTE CAN RESIDE IN BOTH PAGED AND NONPAGED POOL
+// THIS MACRO LIMITS THE COMBINED SIZES OF TWO POOLS AND REQUIRES THEM
+// TO BE WITHIN THE MAX SIZE CONSTRAINTS
+//
+// MAX SIZE = 2^(2+8+20) = 2^30 = 1GB
+//
+// NOTE, that the valid bit must be zero!
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Pointer to the prototype PTE that backs this PTE.
+//
+//--
+
+#define MiPteToProto(lpte) ((PMMPTE)((((lpte)->u.Long >> 4) << 2) + \
+ MmProtopte_Base))
+
+
+//++
+//ULONG
+//MiProtoAddressForPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForPte(proto_va) \
+ ((ULONG)((((ULONG)proto_va - MmProtopte_Base) << 2) | MM_PTE_PROTOTYPE_MASK))
+
+
+//++
+//ULONG
+//MiProtoAddressForKernelPte (
+// IN PMMPTE proto_va
+// );
+//
+// Routine Description:
+//
+// This macro sets into the specified PTE the supplied information
+// to indicate where the backing store for the page is located.
+// MiProtoAddressForPte returns the bit field to OR into the PTE to
+// reference a prototype PTE. And set the protoPTE bit,
+// MM_PTE_PROTOTYPE_MASK.
+//
+// This macro also sets any other information (such as global bits)
+// required for kernel mode PTEs.
+//
+// Argments
+//
+// proto_va - Supplies the address of the prototype PTE.
+//
+// Return Value:
+//
+// Mask to set into the PTE.
+//
+//--
+
+#define MiProtoAddressForKernelPte(proto_va) MiProtoAddressForPte(proto_va)
+
+
+//++
+//PSUBSECTION
+//MiGetSubsectionAddress (
+// IN PMMPTE lpte
+// );
+//
+// Routine Description:
+//
+// This macro takes a PTE and returns the address of the subsection that
+// the PTE refers to. Subsections are quadword structures allocated
+// from nonpaged pool.
+//
+// NOTE THIS MACRO LIMITS THE SIZE OF NONPAGED POOL!
+// MAXIMUM NONPAGED POOL = 2^(3+1+24) = 2^28 = 256mb.
+//
+//
+// Argments
+//
+// lpte - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// A pointer to the subsection referred to by the supplied PTE.
+//
+//--
+
+#define MiGetSubsectionAddress(lpte) \
+ ((PSUBSECTION)((ULONG)MM_NONPAGED_POOL_END - \
+ (((((lpte)->u.Long) >> 8) << 4) | \
+ ((((lpte)->u.Long) << 2) & 0x8))))
+
+
+//++
+//ULONG
+//MiGetSubsectionAddressForPte (
+// IN PSUBSECTION VA
+// );
+//
+// Routine Description:
+//
+// This macro takes the address of a subsection and encodes it for use
+// in a PTE.
+//
+// NOTE - THE SUBSECTION ADDRESS MUST BE QUADWORD ALIGNED!
+//
+// Argments
+//
+// VA - Supplies a pointer to the subsection to encode.
+//
+// Return Value:
+//
+// The mask to set into the PTE to make it reference the supplied
+// subsetion.
+//
+//--
+
+#define MiGetSubsectionAddressForPte(VA) \
+ (((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) << 4) & (ULONG)0xffffff00) | \
+ ((((ULONG)MM_NONPAGED_POOL_END - (ULONG)VA) >> 2) & (ULONG)0x2))
+
+
+//++
+//PMMPTE
+//MiGetPdeAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeAddress returns the address of the PDE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PDE for.
+//
+// Return Value:
+//
+// The address of the PDE.
+//
+//--
+
+#define MiGetPdeAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 22) << 2) + PDE_BASE))
+
+
+//++
+//PMMPTE
+//MiGetPteAddress (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteAddress returns the address of the PTE which maps the
+// given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the PTE for.
+//
+// Return Value:
+//
+// The address of the PTE.
+//
+//--
+
+#define MiGetPteAddress(va) ((PMMPTE)(((((ULONG)(va)) >> 12) << 2) + PTE_BASE))
+
+
+//++
+//ULONG
+//MiGetPdeOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPdeOffset returns the offset into a page directory
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page directory table the corresponding PDE is at.
+//
+//--
+
+#define MiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+
+
+//++
+//ULONG
+//MiGetPteOffset (
+// IN PVOID va
+// );
+//
+// Routine Description:
+//
+// MiGetPteOffset returns the offset into a page table page
+// for a given virtual address.
+//
+// Argments
+//
+// Va - Supplies the virtual address to locate the offset for.
+//
+// Return Value:
+//
+// The offset into the page table page table the corresponding PTE is at.
+//
+//--
+
+#define MiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
+
+
+//++
+//PMMPTE
+//MiGetProtoPteAddress (
+// IN PMMPTE VAD,
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// MiGetProtoPteAddress returns a pointer to the prototype PTE which
+// is mapped by the given virtual address descriptor and address within
+// the virtual address descriptor.
+//
+// Argments
+//
+// VAD - Supplies a pointer to the virtual address descriptor that contains
+// the VA.
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// A pointer to the proto PTE which corresponds to the VA.
+//
+//--
+
+#define MiGetProtoPteAddress(VAD,VA) \
+ (((((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte) <= (ULONG)(VAD)->LastContiguousPte) ? \
+ ((PMMPTE)(((((ULONG)(VA) - (ULONG)(VAD)->StartingVa) >> PAGE_SHIFT) << PTE_SHIFT) + \
+ (ULONG)(VAD)->FirstPrototypePte)) : \
+ MiGetProtoPteAddressExtended ((VAD),(VA)))
+
+
+//++
+//PVOID
+//MiGetVirtualAddressMappedByPte (
+// IN PMMPTE PTE
+// );
+//
+// Routine Description:
+//
+// MiGetVirtualAddressMappedByPte returns the virtual address
+// which is mapped by a given PTE address.
+//
+// Argments
+//
+// PTE - Supplies the PTE to get the virtual address for.
+//
+// Return Value:
+//
+// Virtual address mapped by the PTE.
+//
+//--
+
+#define MiGetVirtualAddressMappedByPte(va) ((PVOID)((ULONG)(va) << 10))
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_NUMBER (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the paging file number from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file number.
+//
+//--
+
+#define GET_PAGING_FILE_NUMBER(PTE) ((((PTE).u.Long) >> 8) & 0xF)
+
+
+//++
+//ULONG
+//GET_PAGING_FILE_OFFSET (
+// IN MMPTE PTE
+// );
+//
+// Routine Description:
+//
+// This macro extracts the offset into the paging file from a PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// The paging file offset.
+//
+//--
+
+#define GET_PAGING_FILE_OFFSET(PTE) ((((PTE).u.Long) >> 12) & 0x000FFFFF)
+
+
+//++
+//ULONG
+//IS_PTE_NOT_DEMAND_ZERO (
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// This macro checks to see if a given PTE is NOT a demand zero PTE.
+//
+// Argments
+//
+// PTE - Supplies the PTE to operate upon.
+//
+// Return Value:
+//
+// Returns 0 if the PTE is demand zero, non-zero otherwise.
+//
+//--
+
+#define IS_PTE_NOT_DEMAND_ZERO(PTE) ((PTE).u.Long & (ULONG)0xFFFFF007)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_PTE_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make a single valid PTE invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_VALID_PTE_INVALID(SYSTEM_WIDE)
+
+
+//++
+//VOID
+//MI_MAKING_VALID_MULTIPLE_PTES_INVALID(
+// IN PMMPTE PPTE
+// );
+//
+// Routine Description:
+//
+// Prepare to make multiple valid PTEs invalid.
+// No action is required on x86.
+//
+// Argments
+//
+// SYSTEM_WIDE - Supplies TRUE if this will happen on all processors.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_MAKING_MULTIPLE_PTES_INVALID(SYSTEM_WIDE)
+
+
+//
+// Make a writable PTE, writeable-copy PTE. This takes advantage of
+// the fact that the protection field in the PTE (5 bit protection) is
+// set up such that write is a bit.
+//
+
+#define MI_MAKE_PROTECT_WRITE_COPY(PTE) \
+ if ((PTE).u.Long & 0x20) { \
+ ((PTE).u.Long |= 0x8); \
+ }
+
+
+//++
+//VOID
+//MI_SET_PAGE_DIRTY(
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro sets the dirty bit (and release page file space).
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_SET_PAGE_DIRTY(PPTE,VA,PFNHELD) \
+ if ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ }
+
+
+//++
+//VOID
+//MI_NO_FAULT_FOUND(
+// IN TEMP,
+// IN PMMPTE PPTE,
+// IN PVOID VA,
+// IN PVOID PFNHELD
+// );
+//
+// Routine Description:
+//
+// This macro handles the case when a page fault is taken and no
+// PTE with the valid bit clear is found.
+//
+// Argments
+//
+// TEMP - Supplies a temporary for usage.
+//
+// PPTE - Supplies a pointer to the PTE that corresponds to VA.
+//
+// VA - Supplies a the virtual address of the page fault.
+//
+// PFNHELD - Supplies TRUE if the PFN lock is held.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_NO_FAULT_FOUND(TEMP,PPTE,VA,PFNHELD) \
+ if (StoreInstruction && ((PPTE)->u.Hard.Dirty == MM_PTE_CLEAN)) { \
+ MiSetDirtyBit ((VA),(PPTE),(PFNHELD)); \
+ } else { \
+ KeFillEntryTb ((PHARDWARE_PTE)PPTE, VA, FALSE); \
+ }
+// KeFillEntryTb((PHARDWARE_PTE)(MiGetPdeAddress(VA)),(PVOID)PPTE,FALSE);
+ //
+ // If the PTE was already valid, assume that the PTE
+ // in the TB is stall and just reload the PTE.
+ //
+
+
+//++
+//ULONG
+//MI_CAPTURE_DIRTY_BIT_TO_PFN (
+// IN PMMPTE PPTE,
+// IN PMMPFN PPFN
+// );
+//
+// Routine Description:
+//
+// This macro gets captures the state of the dirty bit to the PFN
+// and frees any associated page file space if the PTE has been
+// modified element.
+//
+// NOTE - THE PFN LOCK MUST BE HELD!
+//
+// Argments
+//
+// PPTE - Supplies the PTE to operate upon.
+//
+// PPFN - Supplies a pointer to the PFN database element that corresponds
+// to the page mapped by the PTE.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#define MI_CAPTURE_DIRTY_BIT_TO_PFN(PPTE,PPFN) \
+ if (((PPFN)->u3.e1.Modified == 0) && \
+ ((PPTE)->u.Hard.Dirty == MM_PTE_DIRTY)) { \
+ (PPFN)->u3.e1.Modified = 1; \
+ if (((PPFN)->OriginalPte.u.Soft.Prototype == 0) && \
+ ((PPFN)->u3.e1.WriteInProgress == 0)) { \
+ MiReleasePageFileSpace ((PPFN)->OriginalPte); \
+ (PPFN)->OriginalPte.u.Soft.PageFileHigh = 0; \
+ } \
+ }
+
+
+//++
+//BOOLEAN
+//MI_IS_PHYSICAL_ADDRESS (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro deterines if a give virtual address is really a
+// physical address.
+//
+// Argments
+//
+// VA - Supplies the virtual address.
+//
+// Return Value:
+//
+// FALSE if it is not a physical address, TRUE if it is.
+//
+//--
+
+#define MI_IS_PHYSICAL_ADDRESS(Va) \
+ (((ULONG)Va >= KSEG0_BASE) && ((ULONG)Va < KSEG2_BASE))
+
+
+//++
+//ULONG
+//MI_CONVERT_PHYSICAL_TO_PFN (
+// IN PVOID VA
+// );
+//
+// Routine Description:
+//
+// This macro converts a physical address (see MI_IS_PHYSICAL_ADDRESS)
+// to its corresponding physical frame number.
+//
+// Argments
+//
+// VA - Supplies a pointer to the physical address.
+//
+// Return Value:
+//
+// Returns the PFN for the page.
+//
+//--
+
+#define MI_CONVERT_PHYSICAL_TO_PFN(Va) (((ULONG)Va << 2) >> 14)
+
+
+typedef struct _MMCOLOR_TABLES {
+ ULONG Flink;
+ PVOID Blink;
+} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
+
+typedef struct _MMPRIMARY_COLOR_TABLES {
+ LIST_ENTRY ListHead;
+} MMPRIMARY_COLOR_TABLES, *PMMPRIMARY_COLOR_TABLES;
+
+
+#if MM_MAXIMUM_NUMBER_OF_COLORS > 1
+extern MMPFNLIST MmFreePagesByPrimaryColor[2][MM_MAXIMUM_NUMBER_OF_COLORS];
+#endif
+
+extern PMMCOLOR_TABLES MmFreePagesByColor[2];
+
+extern ULONG MmTotalPagesForPagingFile;
+
+
+
+//
+// A valid Page Table Entry has the following definition.
+//
+
+// N.B. defined as in comments below in ../public/sdk/inc/ntppc.h
+
+// typedef struct _HARDWARE_PTE {
+// ULONG Dirty : 2;
+// ULONG Valid : 1; // software
+// ULONG GuardedStorage : 1;
+// ULONG MemoryCoherence : 1;
+// ULONG CacheDisable : 1;
+// ULONG WriteThrough : 1;
+// ULONG Change : 1;
+// ULONG Reference : 1;
+// ULONG Write : 1; // software
+// ULONG CopyOnWrite : 1; // software
+// ULONG rsvd1 : 1;
+// ULONG PageFrameNumber : 20;
+// } HARDWARE_PTE, *PHARDWARE_PTE;
+
+
+//
+// Invalid Page Table Entries have the following definitions.
+//
+
+typedef struct _MMPTE_TRANSITION {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG filler4 : 4;
+ ULONG PageFrameNumber : 20;
+} MMPTE_TRANSITION;
+
+typedef struct _MMPTE_SOFTWARE {
+ ULONG Prototype : 1;
+ ULONG Transition : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG PageFileLow : 4;
+ ULONG PageFileHigh : 20;
+} MMPTE_SOFTWARE;
+
+typedef struct _MMPTE_PROTOTYPE {
+ ULONG Prototype : 1;
+ ULONG filler1 : 1;
+ ULONG Valid : 1;
+ ULONG ReadOnly : 1;
+ ULONG ProtoAddressLow : 8;
+ ULONG ProtoAddressHigh : 20;
+} MMPTE_PROTOTYPE;
+
+typedef struct _MMPTE_SUBSECTION {
+ ULONG Prototype : 1;
+ ULONG SubsectionAddressLow : 1;
+ ULONG Valid : 1;
+ ULONG Protection : 5;
+ ULONG SubsectionAddressHigh : 24;
+} MMPTE_SUBSECTION;
+
+typedef struct _MMPTE_LIST {
+ ULONG filler2 : 2;
+ ULONG Valid : 1;
+ ULONG OneEntry : 1;
+ ULONG filler8 : 8;
+ ULONG NextEntry : 20;
+} MMPTE_LIST;
+
+
+//
+// A Page Table Entry has the following definition.
+//
+
+typedef struct _MMPTE {
+ union {
+ ULONG Long;
+ HARDWARE_PTE Hard;
+ HARDWARE_PTE Flush;
+ MMPTE_TRANSITION Trans;
+ MMPTE_SOFTWARE Soft;
+ MMPTE_PROTOTYPE Proto;
+ MMPTE_SUBSECTION Subsect;
+ MMPTE_LIST List;
+ } u;
+} MMPTE;
+
+typedef MMPTE *PMMPTE;
+
diff --git a/private/ntos/mm/ppc/setdirty.c b/private/ntos/mm/ppc/setdirty.c
new file mode 100644
index 000000000..9217bb39a
--- /dev/null
+++ b/private/ntos/mm/ppc/setdirty.c
@@ -0,0 +1,124 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1993 IBM Corporation
+
+Module Name:
+
+ setdirty.c
+
+Abstract:
+
+ This module contains the setting dirty bit routine for memory management.
+
+ PowerPC specific.
+
+Author:
+
+ Lou Perazzoli (loup) 10-Apr-1990.
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 6-Oct-1993
+
+Revision History:
+
+--*/
+
+#include "mi.h"
+
+VOID
+MiSetDirtyBit (
+ IN PVOID FaultingAddress,
+ IN PMMPTE PointerPte,
+ IN ULONG PfnHeld
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets dirty in the specified PTE and the modify bit in the
+ correpsonding PFN element. If any page file space is allocated, it
+ is deallocated.
+
+Arguments:
+
+ FaultingAddress - Supplies the faulting address.
+
+ PointerPte - Supplies a pointer to the corresponding valid PTE.
+
+ PfnHeld - Supplies TRUE if the PFN mutex is already held.
+
+Return Value:
+
+ None.
+
+Environment:
+
+ Kernel mode, APC's disabled, Working set mutex held.
+
+--*/
+
+{
+ MMPTE TempPte;
+ ULONG PageFrameIndex;
+ PMMPFN Pfn1;
+ KIRQL OldIrql;
+
+ //
+ // The page is NOT copy on write, update the PTE setting both the
+ // dirty bit and the accessed bit. Note, that as this PTE is in
+ // the TB, the TB must be flushed.
+ //
+
+ PageFrameIndex = PointerPte->u.Hard.PageFrameNumber;
+ Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
+
+ TempPte = *PointerPte;
+ TempPte.u.Hard.Dirty = MM_PTE_DIRTY;
+ MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
+ *PointerPte = TempPte;
+
+ //
+ // Check state of PFN mutex and if not held, don't update PFN database.
+ //
+
+
+ if (PfnHeld) {
+
+ //
+ // Set the modified field in the PFN database, also, if the phyiscal
+ // page is currently in a paging file, free up the page file space
+ // as the contents are now worthless.
+ //
+
+ if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) &&
+ (Pfn1->u3.e1.WriteInProgress == 0)) {
+
+ //
+ // This page is in page file format, deallocate the page file space.
+ //
+
+ MiReleasePageFileSpace (Pfn1->OriginalPte);
+
+ //
+ // Change original PTE to indicate no page file space is reserved,
+ // otherwise the space will be deallocated when the PTE is
+ // deleted.
+ //
+
+ Pfn1->OriginalPte.u.Soft.PageFileHigh = 0;
+ }
+
+ Pfn1->u3.e1.Modified = 1;
+ }
+
+ //
+ // The TB entry must be flushed as the valid PTE with the dirty bit clear
+ // has been fetched into the TB. If it isn't flushed, another fault
+ // is generated as the dirty bit is not set in the cached TB entry.
+ //
+
+ KeFillEntryTb ((PHARDWARE_PTE)PointerPte, FaultingAddress, TRUE);
+
+ return;
+}
diff --git a/private/ntos/mm/ppc/sources b/private/ntos/mm/ppc/sources
new file mode 100644
index 000000000..5cfcd8eb6
--- /dev/null
+++ b/private/ntos/mm/ppc/sources
@@ -0,0 +1,5 @@
+PPC_SOURCES=..\ppc\initppc.c \
+ ..\ppc\datappc.c \
+ ..\ppc\debugsup.c \
+ ..\ppc\hypermap.c \
+ ..\ppc\setdirty.c