summaryrefslogtreecommitdiffstats
path: root/private/ntos/ex/ppc
diff options
context:
space:
mode:
authorAdam <you@example.com>2020-05-17 05:51:50 +0200
committerAdam <you@example.com>2020-05-17 05:51:50 +0200
commite611b132f9b8abe35b362e5870b74bce94a1e58e (patch)
treea5781d2ec0e085eeca33cf350cf878f2efea6fe5 /private/ntos/ex/ppc
downloadNT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.gz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.bz2
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.lz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.xz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.zst
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.zip
Diffstat (limited to '')
-rw-r--r--private/ntos/ex/ppc/evpair.s124
-rw-r--r--private/ntos/ex/ppc/fmutex.s529
-rw-r--r--private/ntos/ex/ppc/gettick.s70
-rw-r--r--private/ntos/ex/ppc/hifreqlk.s190
-rw-r--r--private/ntos/ex/ppc/intrlock.s1219
-rw-r--r--private/ntos/ex/ppc/probe.c120
-rw-r--r--private/ntos/ex/ppc/raisests.c268
-rw-r--r--private/ntos/ex/ppc/sources8
-rw-r--r--private/ntos/ex/ppc/splocks.c86
9 files changed, 2614 insertions, 0 deletions
diff --git a/private/ntos/ex/ppc/evpair.s b/private/ntos/ex/ppc/evpair.s
new file mode 100644
index 000000000..31b06ebe2
--- /dev/null
+++ b/private/ntos/ex/ppc/evpair.s
@@ -0,0 +1,124 @@
+// TITLE("Fast Event Pair Support")
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// evpair.s
+//
+// Abstract:
+//
+// This module contains the implementation for the fast event pair
+// system services that are used for client/server synchronization.
+//
+// Author:
+//
+// Chuck Bauman 3-Sep-1993
+//
+// Environment:
+//
+// Kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+// SBTTL("Set Low Wait High Thread")
+//++
+//
+// NTSTATUS
+// NtSetLowWaitHighThread (
+// )
+//
+// Routine Description:
+//
+// This function uses the prereferenced client/server event pair pointer
+// and sets the low event of the event pair and waits on the high event
+// of the event pair object.
+//
+// N.B. This service assumes that it has been called from user mode.
+//
+// N.B. This routine is highly optimized since this is a very heavily
+// used service.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// STATUS_NO_EVENT_PAIR is returned if no event pair is associated with
+// the current thread. Otherwise, the status of the wait operation is
+// returned as the function value.
+//
+//
+//--
+.extern ..KiSetServerWaitClientEvent
+
+ LEAF_ENTRY(NtSetLowWaitHighThread)
+
+ lwz r.3,KiPcr+PcCurrentThread(r.0)
+ lwz r.3,EtEventPair(r.3) // get address of event pair object
+ cmpwi r.3,0
+ addi r.4,r.3,EpEventHigh // compute address of high event
+ beq noevent_1 // if eq, no event pair associated
+ addi r.3,r.3,EpEventLow // compute address of low event
+ li r.5, 1 // set user mode value
+ b ..KiSetServerWaitClientEvent // finish in wait code
+
+noevent_1:
+ LWI(r.3,STATUS_NO_EVENT_PAIR)
+ LEAF_EXIT(NtSetLowWaitHighThread)
+
+// .end NtSetLowWaitHighThread
+
+// SBTTL("Set High Wait Low Thread")
+//++
+//
+// NTSTATUS
+// NtSetHighWaitLowThread (
+// )
+//
+// Routine Description:
+//
+// This function uses the prereferenced client/server event pair pointer
+// and sets the High event of the event pair and waits on the low event
+// of the event pair object.
+//
+// N.B. This service assumes that it has been called from user mode.
+//
+// N.B. This routine is highly optimized since this is a very heavily
+// used service.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// STATUS_NO_EVENT_PAIR is returned if no event pair is associated with
+// the current thread. Otherwise, the status of the wait operation is
+// returned as the function value.
+//
+//
+//--
+
+ LEAF_ENTRY(NtSetHighWaitLowThread)
+
+ lwz r.3,KiPcr+PcCurrentThread(r.0)
+ lwz r.3,EtEventPair(r.3) // get address of event pair object
+ cmpwi r.3,0
+ addi r.4,r.3,EpEventLow // compute address of low event
+ beq noevent_2 // if eq, no event pair associated
+ addi r.3,r.3,EpEventHigh // compute address of high event
+ li r.5,1 // set user mode value
+ b ..KiSetServerWaitClientEvent // finish in wait code
+
+noevent_2:
+ LWI(r.3,STATUS_NO_EVENT_PAIR)
+ LEAF_EXIT(NtSetHighWaitLowThread)
+
+// .end NtSetHighWaitLowThread
diff --git a/private/ntos/ex/ppc/fmutex.s b/private/ntos/ex/ppc/fmutex.s
new file mode 100644
index 000000000..c5ac57ff3
--- /dev/null
+++ b/private/ntos/ex/ppc/fmutex.s
@@ -0,0 +1,529 @@
+// TITLE("Fast Mutex Support")
+//++
+//
+// Copyright (c) 1994 Microsoft Corporation
+// Copyright (c) 1994 IBM Corporation
+//
+// Module Name:
+//
+// fmutex.s
+//
+// Abstract:
+//
+// This module implements the code necessary to acquire and release fast
+// mutxes.
+//
+//
+// Author:
+//
+// David N. Cutler (davec) 13-Apr-1994
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// Peter L Johnston (plj@vnet.ibm.com) 12-Jun-1994
+//
+// Ported to PowerPC architecture.
+//
+//--
+
+ .extern ..DbgBreakPoint
+ .extern ..KeWaitForSingleObject
+ .extern ..KeSetEventBoostPriority
+ .extern ..KiDispatchSoftwareInterrupt
+
+#include "ksppc.h"
+
+ SBTTL("Acquire Fast Mutex")
+//++
+//
+// VOID
+// ExAcquireFastMutex (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function acquires ownership of a fast mutex and raises IRQL to
+// APC level. This routine is coded as a leaf routine on the assumption
+// that it will acquire the mutex without waiting. If we have to wait
+// for the mutex, we convert to a nested routine.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+FmAddr: .space 4 // saved fast mutex address
+FmIrql: .space 4 // old IRQL value
+Fm31: .space 4 // room for LR save
+FmThr: .space 4 // save current thread
+ .align 3 // ensure 8 byte alignment
+FastMutexFrameLength: // frame length
+
+ LEAF_ENTRY(ExAcquireFastMutex)
+
+//
+// Raise IRQL to APC_LEVEL.
+//
+
+ li r.4, APC_LEVEL // set new IRQL level
+ lbz r.5, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+//
+// Decrement ownership count.
+//
+
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get current thread address
+ addi r.7, r.3, FmCount // compute address of ownership count
+
+afmDec:
+ lwarx r.8, 0, r.7 // get ownership count
+ subi r.9, r.8, 1 // decrement ownership count
+ stwcx. r.9, 0, r.7 // conditionally store ownership count
+ bne- afmDec // if store conditional failed
+ cmpwi r.8, 0 // check if already owned
+ ble ..ExxAcquireFastMutex // jif already owned
+ stb r.5, FmOldIrql(r.3) // save old IRQL in fast mutex
+ stw r.6, FmOwner(r.3) // set owner thread address
+
+ LEAF_EXIT(ExAcquireFastMutex)
+
+
+//
+// Fast mutex is currently owned by another thread. Increment the contention
+// count and wait for ownership.
+//
+
+ SPECIAL_ENTRY(ExxAcquireFastMutex)
+
+ lwz r.8, FmContention(r.3) // get contention count
+ li r.4, Executive // set reason for wait
+ stw r.31, Fm31-FastMutexFrameLength(r.sp)
+ li r.7, 0 // set NULL timeout pointer
+ stwu r.sp, -FastMutexFrameLength(r.sp)
+ mflr r.31 // save link register
+
+ PROLOGUE_END(ExxAcquireFastMutex)
+
+ stb r.5, FmIrql(r.sp) // save old IRQL
+ stw r.3, FmAddr(r.sp) // save address of fast mutex
+ li r.5, KernelMode // set mode of wait
+ addi r.8, r.8, 1 // increment contention count
+ stw r.6, FmThr(r.sp) // save current thread address
+ stw r.8, FmContention(r.3) //
+
+#if DBG
+
+ lwz r.8, FmOwner(r.3) // get owner thread address
+ cmpw r.8, r.6 // check thread isn't owner
+ bne afmWait
+ bl ..DbgBreakPoint // break
+afmWait:
+
+#endif
+
+ li r.6, FALSE // set nonalertable wait
+ addi r.3, r.3, FmEvent // compute address of event
+ bl ..KeWaitForSingleObject // wait for ownership
+ lwz r.3, FmAddr(r.sp) // get address of fast mutex
+ lbz r.5, FmIrql(r.sp) // get old IRQL value
+ lwz r.6, FmThr(r.sp) // get current thread address
+ mtlr r.31 // set return address
+ lwz r.31, Fm31(r.sp) // restore r.31
+ stb r.5, FmOldIrql(r.3) // save old IRQL in fast mutex
+ stw r.6, FmOwner(r.3) // set owner thread address
+
+ addi r.sp, r.sp, FastMutexFrameLength
+
+ SPECIAL_EXIT(ExxAcquireFastMutex)
+
+ SBTTL("Release Fast Mutex")
+//++
+//
+// VOID
+// ExReleaseFastMutex (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function releases ownership to a fast mutex and lowers IRQL to
+// its previous level.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(ExReleaseFastMutex)
+
+//
+// Increment ownership count and release waiter if contention.
+//
+
+ lbz r.4, FmOldIrql(r.3) // get old IRQL value
+ li r.0, 0
+ stw r.0, FmOwner(r.3) // clear owner thread address
+ addi r.5, r.3, FmCount // compute address of ownership count
+afmInc:
+ lwarx r.8, 0, r.5 // get ownership count
+ addi r.9, r.8, 1 // increment ownership count
+ stwcx. r.9, 0, r.5 // conditionally store ownership count
+ bne- afmInc // if store conditional failed
+ cmpwi r.8, 0 // check if another waiter
+ bne ..ExxReleaseFastMutex // jif there is a waiter
+
+//
+// Lower IRQL to its previous level.
+//
+
+ DISABLE_INTERRUPTS(r.8,r.9)
+
+ lhz r.7, KiPcr+PcSoftwareInterrupt(r.0)
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ ENABLE_INTERRUPTS(r.8)
+
+//
+// Check to see if a software interrupt could be run at this level.
+// We know we are at most at APC_LEVEL.
+//
+// IF IRQL <= APC LEVEL and IRQL < PcSoftwareInterrupt then a
+// software interrupt could run at this time.
+//
+
+ cmpw r.4,r.7
+ bgelr+ // jif no runnable interrupt
+ b ..KiDispatchSoftwareInterrupt
+
+ DUMMY_EXIT(ExReleaseFastMutex)
+
+
+//
+// There is contention for the fast mutex. Wake up a waiting thread and
+// boost its priority to the priority of the current thread.
+//
+
+ SPECIAL_ENTRY(ExxReleaseFastMutex)
+
+ stwu r.sp, -FastMutexFrameLength(r.sp)
+ stw r.31, Fm31(r.sp) // save r.31
+ mflr r.31 // save link register (in r.31)
+
+ PROLOGUE_END(ExxReleaseFastMutex)
+
+ stw r.3, FmAddr(r.sp) // save address of fast mutex
+ stb r.4, FmIrql(r.sp) // save old IRQL value
+ addi r.4, r.3, FmOwner // compute address to store owner
+ addi r.3, r.3, FmEvent // compute address of event
+ bl ..KeSetEventBoostPriority// set event and boost priority
+ lbz r.4, FmIrql(r.sp) // restore old IRQL value
+
+//
+// Lower IRQL to its previous value.
+//
+
+ DISABLE_INTERRUPTS(r.8, r.9)
+
+ cmpwi cr.1, r.4, APC_LEVEL
+ lhz r.7, KiPcr+PcSoftwareInterrupt(r.0)
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ ENABLE_INTERRUPTS(r.8)
+
+ cmpw r.4, r.7
+ bge+ rfmexit // jif no runnable interrupt
+
+ bl ..KiDispatchSoftwareInterrupt
+
+rfmexit:
+ mtlr r.31 // set return address
+ lwz r.31, Fm31(r.sp) // restore r.31
+ addi r.sp, r.sp, FastMutexFrameLength
+
+ SPECIAL_EXIT(ExxReleaseFastMutex)
+
+ SBTTL("Try To Acquire Fast Mutex")
+//++
+//
+// BOOLEAN
+// ExTryToAcquireFastMutex (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquire ownership of a fast mutex, and if
+// successful, raises IRQL to APC level.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// If the fast mutex was successfuly acquired, then a value of TRUE
+// is returned as the function vlaue. Otherwise, a value of FALSE is
+// returned.
+//
+//--
+
+ LEAF_ENTRY(ExTryToAcquireFastMutex)
+
+ DISABLE_INTERRUPTS(r.11,r.5)
+
+ li r.4, APC_LEVEL // set new IRQL level
+ lbz r.7, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+
+ addi r.5, r.3, FmCount // compute address of ownership count
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get current thread address
+
+//
+// Decrement ownership count if and only if the fast mutex is not currently
+// owned.
+//
+
+afmTry:
+ lwarx r.8, 0, r.5 // get ownership count
+ subic. r.9, r.8, 1 // decrement ownership count
+ blt afmFailed // if result ltz, mutex already owned
+ stwcx. r.9, 0, r.5 // conditionally store ownership count
+ bne- afmTry // if store conditional failed
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ ENABLE_INTERRUPTS(r.11) // re-enable interrupts
+
+ stb r.7, FmOldIrql(r.3) // store old IRQL
+ stw r.6, FmOwner(r.3) // set owner thread address
+ li r.3, TRUE // return success
+ blr
+
+//
+// Fast mutex is currently owned by another thread. Enable interrupts and
+// return FALSE.
+//
+
+afmFailed:
+
+ ENABLE_INTERRUPTS(r.11)
+
+ li r.3, FALSE // return failure
+
+ LEAF_EXIT(ExTryToAcquireFastMutex)
+
+ SBTTL("Acquire Fast Mutex Unsafe")
+//++
+//
+// VOID
+// ExAcquireFastMutexUnsafe (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function acquires ownership of a fast mutex, but does not raise
+// IRQL to APC level.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(ExAcquireFastMutexUnsafe)
+
+//
+// Decrement ownership count.
+//
+
+ addi r.7, r.3, FmCount // compute address of ownership count
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get current thread address
+
+afmDecUnsafe:
+ lwarx r.8, 0, r.7 // get ownership count
+ subi r.9, r.8, 1 // decrement ownership count
+ stwcx. r.9, 0, r.7 // conditionally store ownership count
+ bne- afmDecUnsafe // if store conditional failed
+ cmpwi r.8, 0 // check if already owned
+ ble ..ExxAcquireFastMutexUnsafe// jif mutex already owned
+ stw r.6, FmOwner(r.3) // set owner thread address
+
+ LEAF_EXIT(ExAcquireFastMutexUnsafe)
+
+//
+// Fast mutex is currently owned by another thread. Increment the contention
+// count and wait for ownership.
+//
+
+ SPECIAL_ENTRY(ExxAcquireFastMutexUnsafe)
+
+ lwz r.8, FmContention(r.3) // get contention count
+ li r.7, 0 // set NULL timeout pointer
+ li r.4, Executive // set reason for wait
+ stwu r.sp, -FastMutexFrameLength(r.sp)
+ li r.5, KernelMode // set mode of wait
+ stw r.31, Fm31(r.sp) // save r.31
+ mflr r.31 // save link register (in r.31)
+
+ PROLOGUE_END(ExxAcquireFastMutexUnsafe)
+
+ stw r.6, FmThr(r.sp) // save thread address
+ addi r.8, r.8, 1 // increment contention count
+ stw r.3, FmAddr(r.sp) // save address of fast mutex
+ stw r.8, FmContention(r.3) //
+
+#if DBG
+
+ lwz r.8, FmOwner(r.3) // get owner thread address
+ cmpw r.8, r.6 // check thread isn't owner
+ bne afmWaitUnsafe
+ bl ..DbgBreakPoint // break
+afmWaitUnsafe:
+
+#endif
+
+ li r.6, FALSE // set nonalertable wait
+ addi r.3, r.3, FmEvent // compute address of event
+ bl ..KeWaitForSingleObject// wait for ownership
+ lwz r.3, FmAddr(r.sp) // get address of fast mutex
+ lwz r.6, FmThr(r.sp) // get current thread address
+ mtlr r.31 // set return address
+ lwz r.31, Fm31(r.sp) // restore r.31
+ stw r.6, FmOwner(r.3) // set owner thread address
+
+ addi r.sp, r.sp, FastMutexFrameLength // deallocate stack frame
+
+ SPECIAL_EXIT(ExxAcquireFastMutexUnsafe)
+
+ SBTTL("Release Fast Mutex Unsafe")
+//++
+//
+// VOID
+// ExReleaseFastMutexUnsafe (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function releases ownership of a fast mutex, and does not
+// restore IRQL to its previous value.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(ExReleaseFastMutexUnsafe)
+
+//
+// Increment ownership count and release waiter if contention.
+//
+
+ li r.0, 0
+ stw r.0, FmOwner(r.3) // clear owner thread address
+ addi r.5, r.3, FmCount // compute address of ownership count
+afmIncUnsafe:
+ lwarx r.8, 0, r.5 // get ownership count
+ addi r.9, r.8, 1 // increment ownership count
+ stwcx. r.9, 0, r.5 // conditionally store ownership count
+ bne- afmIncUnsafe // if store conditional failed
+ cmpwi r.8, 0 // check if another waiter
+ beqlr+ // retorn if no waiter
+
+//
+// There is contention for the fast mutex. Wake up a waiting thread and
+// boost its priority to the priority of the current thread.
+//
+// N.B. KeSetEventBoostPriority returns directly to our caller.
+//
+
+ addi r.4, r.3, FmOwner // compute address to store owner
+ addi r.3, r.3, FmEvent // compute address of event
+ b ..KeSetEventBoostPriority// set event and boost priority
+
+ LEAF_EXIT(ExReleaseFastMutexUnsafe, FastMutexFrameLength, 0, 0)
+
+ SBTTL("Try To Acquire Fast Mutex Unsafe")
+//++
+//
+// BOOLEAN
+// ExTryToAcquireFastMutexUnsafe (
+// IN PFAST_MUTEX FastMutex
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquire ownership of a fast mutex, and if
+// successful, does not raise IRQL to APC level.
+//
+// Arguments:
+//
+// FastMutex (r.3) - Supplies a pointer to a fast mutex.
+//
+// Return Value:
+//
+// If the fast mutex was successfuly acquired, then a value of TRUE
+// is returned as the function vlaue. Otherwise, a value of FALSE is
+// returned.
+//
+//--
+
+#if 0
+
+ LEAF_ENTRY(ExTryToAcquireFastMutexUnsafe)
+
+//
+// Decrement ownership count if and only if the fast mutex is not currently
+// owned.
+//
+
+ addi r.5, r.3, FmCount // compute address of ownership count
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get current thread address
+
+afmTryUnsafe:
+ lwarx r.8, 0, r.5 // get ownership count
+ subic. r.9, r.8, 1 // decrement ownership count
+ blt afmTryUnsafeFailed // if result ltz, mutex already owned
+ stwcx. r.9, 0, r.5 // conditionally store ownership count
+ bne- afmTryUnsafe // if store conditional failed
+ stw r.6, FmOwner(r.3) // set owner thread address
+ li r.3, TRUE // set return value
+ blr // return
+
+//
+// Fast mutex is currently owned by another thread.
+//
+
+afmTryUnsafeFailed:
+
+ li r.3, FALSE // set return value
+
+ LEAF_EXIT(ExTryToAcquireFastMutexUnsafe)
+
+#endif
diff --git a/private/ntos/ex/ppc/gettick.s b/private/ntos/ex/ppc/gettick.s
new file mode 100644
index 000000000..20bbbace1
--- /dev/null
+++ b/private/ntos/ex/ppc/gettick.s
@@ -0,0 +1,70 @@
+// TITLE("Get Tick Count")
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// gettick.s
+//
+// Abstract:
+//
+// This module contains the implementation for the get tick count
+// system service that returns the number of milliseconds since the
+// system was booted.
+//
+// Author:
+//
+// Chuck Bauman 3-Sep-1993
+//
+// Environment:
+//
+// Kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+// SBTTL("Get Tick Count")
+//++
+//
+// ULONG
+// NtGetTickCount (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function computes the number of milliseconds since the system
+// was booted. The computation is performed by multiplying the clock
+// interrupt count by a scaled fixed binary multiplier and then right
+// shifting the 64-bit result to extract the 32-bit millisecond count.
+//
+// N.B. The tick count value wraps every 46.29 days.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The number of milliseconds since the system was booted is returned
+// as the function value.
+//
+//--
+ .extern KeTickCount
+ .extern ExpTickCountMultiplier
+
+ LEAF_ENTRY(NtGetTickCount)
+
+ lwz r.8,KiPcr2 + Pc2TickCountLow(r.0)// get current tick count value
+ lwz r.9,KiPcr2 + Pc2TickCountMultiplier(r.0)// get tick count multiplier
+ mulhwu r.3,r.8,r.9 // compute 64-bit unsigned product
+ mullw r.4,r.8,r.9
+ rlwinm r.3,r.3,8,0xFFFFFF00 // extract 32-bit integer part
+ rlwimi r.3,r.4,8,0x000000FF // " " " "
+
+ LEAF_EXIT(NtGetTickCount) // return
+
diff --git a/private/ntos/ex/ppc/hifreqlk.s b/private/ntos/ex/ppc/hifreqlk.s
new file mode 100644
index 000000000..c7ceda83a
--- /dev/null
+++ b/private/ntos/ex/ppc/hifreqlk.s
@@ -0,0 +1,190 @@
+// TITLE("High Frequency Spin Locks")
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// hifreqlk.s
+//
+// Abstract:
+//
+// This module contains storage for high frequency spin locks. Each
+// is allocated to a separate cache line.
+//
+// Author:
+//
+// Chuck Bauman 3-Sep-1993
+//
+// Environment:
+//
+// Kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+#if defined(NT_UP)
+
+#define ALIGN
+
+#else
+
+#define ALIGN .align 6
+
+#endif
+
+ .data
+ .globl CcMasterSpinLock
+ ALIGN
+CcMasterSpinLock: // cache manager master lock
+ .word 0 //
+
+ .globl CcVacbSpinLock
+ ALIGN
+CcVacbSpinLock: // cache manager VACB lock
+ .word 0
+
+ .globl ExpResourceSpinLock
+ ALIGN
+ExpResourceSpinLock: // resource package lock
+ .word 0
+
+ .globl IopCancelSpinLock
+ ALIGN
+IopCancelSpinLock: // I/O cancel lock
+ .word 0
+
+ .globl IopCompletionLock
+ ALIGN
+IopCompletionLock: // I/O completion lock
+ .word 0 //
+
+ .globl IopDatabaseLock
+ ALIGN
+IopDatabaseLock: // I/O database lock
+ .word 0 //
+
+ .globl IopFastLockSpinLock
+ ALIGN
+IopFastLockSpinLock: // fast I/O spin lock
+ .word 0 //
+
+ .globl IopVpbSpinLock
+ ALIGN
+IopVpbSpinLock: // I/O VPB lock
+ .word 0 //
+
+ .globl IoStatisticsLock
+ ALIGN
+IoStatisticsLock: // I/O statistics lock
+ .word 0 //
+
+ .globl KiContextSwapLock
+ ALIGN
+KiContextSwapLock: // context swap lock
+ .word 0 //
+
+ .globl KiDispatcherLock
+ ALIGN
+KiDispatcherLock: // dispatcher database lock
+ .word 0 //
+
+ .globl MmChargeCommitmentLock
+ ALIGN
+MmChargeCommitmentLock: // charge commitment lock
+ .word 0
+
+ .globl MmPfnLock
+ ALIGN
+MmPfnLock: // page frame database lock
+ .word 0
+
+ .globl NonPagedPoolLock
+ ALIGN
+NonPagedPoolLock: // nonpage pool allocation lock
+ .word 0
+
+//
+// KeTickCount - This is the number of clock ticks that have occurred since
+// the system was booted. This count is used to compute a millisecond
+// tick counter.
+//
+
+ .align 6
+ .globl KeTickCount
+KeTickCount: //
+ .word 0, 0, 0
+
+//
+// KeMaximumIncrement - This is the maximum time between clock interrupts
+// in 100ns units that is supported by the host HAL.
+//
+
+ .globl KeMaximumIncrement
+KeMaximumIncrement: //
+ .word 0
+
+//
+// KeTimeAdjustment - This is the actual number of 100ns units that are to
+// be added to the system time at each interval timer interupt. This
+// value is copied from KeTimeIncrement at system start up and can be
+// later modified via the set system information service.
+// timer table entries.
+//
+
+ .globl KeTimeAdjustment
+KeTimeAdjustment: //
+ .word 0
+
+//
+// KiTickOffset - This is the number of 100ns units remaining before a tick
+// is added to the tick count and the system time is updated.
+//
+
+ .globl KiTickOffset
+KiTickOffset: //
+ .word 0
+
+//
+// KiMaximumDpcQueueDepth - This is used to control how many DPCs can be
+// queued before a DPC of medium importance will trigger a dispatch
+// interrupt.
+//
+
+ .globl KiMaximumDpcQueueDepth
+KiMaximumDpcQueueDepth: //
+ .word 4
+
+//
+// KiMinimumDpcRate - This is the rate of DPC requests per clock tick that
+// must be exceeded before DPC batching of medium importance DPCs
+// will occur.
+//
+
+ .globl KiMinimumDpcRate
+KiMinimumDpcRate: //
+ .word 3
+
+//
+// KiAdjustDpcThreshold - This is the threshold used by the clock interrupt
+// routine to control the rate at which the processor's DPC queue depth
+// is dynamically adjusted.
+//
+
+ .globl KiAdjustDpcThreshold
+KiAdjustDpcThreshold: //
+ .word 20
+
+//
+// KiIdealDpcRate - This is used to control the aggressiveness of the DPC
+// rate adjusting algorithm when decrementing the queue depth. As long
+// as the DPC rate for the last tick is greater than this rate, the
+// DPC queue depth will not be decremented.
+//
+
+ .globl KiIdealDpcRate
+KiIdealDpcRate: //
+ .word 20
diff --git a/private/ntos/ex/ppc/intrlock.s b/private/ntos/ex/ppc/intrlock.s
new file mode 100644
index 000000000..8d84cd1ba
--- /dev/null
+++ b/private/ntos/ex/ppc/intrlock.s
@@ -0,0 +1,1219 @@
+// TITLE("Interlocked Support")
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// intrlock.s
+//
+// Abstract:
+//
+// This module implements functions to support interlocked operations.
+// Interlocked operations can only operate on nonpaged data and the
+// specified spinlock cannot be used for any other purpose.
+//
+// Author:
+//
+// Chuck Bauman 3-Sep-1993
+//
+// Environment:
+//
+// Kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+#if COLLECT_PAGING_DATA
+ .extern KeNumberProcessors
+ .extern KiProcessorBlock
+ .extern ..RtlCopyMemory
+#endif
+
+
+// SBTTL("Interlocked Add Large Integer")
+//++
+//
+// LARGE_INTEGER
+// ExInterlockedAddLargeInteger (
+// IN PLARGE_INTEGER Addend,
+// IN LARGE_INTEGER Increment,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked add of an increment value to an
+// addend variable of type large integer. The initial value of the addend
+// variable is returned as the function value.
+//
+// Arguments:
+//
+// Addend (r4) - Supplies a pointer to a variable whose value is to be
+// adjusted by the increment value.
+//
+// Increment (r5, r6) - Supplies the increment value to be added to the
+// addend variable.
+//
+// Lock (r7) - Supplies a pointer to a spin lock to be used to
+// synchronize access to the addend variable.
+//
+// Return Value:
+//
+// The initial value of the addend variable is stored at the address
+// supplied by r3.
+//
+// Implementation Note:
+//
+// The arithmetic for this function is performed as if this were an
+// unsigned large integer since this routine may not incur an overflow
+// exception.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedAddLargeInteger)
+
+//
+// N.B. ExInterlockedExchangeAddLargeInteger is the same as
+// ExInterlockedAddLargeInteger on 32-bit machines.
+// On 64-bit machines, an optimized version of the Exchange
+// version could be implemented using ldarx and stdcx.
+// The optimized version wouldn't need to take the spin lock.
+//
+
+ ALTERNATE_ENTRY(ExInterlockedExchangeAddLargeInteger)
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.7, r.7, r.9, addli, addliw)
+#endif
+
+ lwz r.10,0(r.4) // get low part of addend value
+ lwz r.11,4(r.4) // get high part of addend value
+ addc r.5,r.10,r.5 // add low parts of large integer,(CA?)
+ adde r.6,r.11,r.6 // add high parts of large integer + CA
+ stw r.5,0(r.4) // store low part of result
+ stw r.6,4(r.4) // store high part of result
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.7, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ stw r.10,0(r.3) // set low part of initial value
+ stw r.11,4(r.3) // set high part of initial value
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.7, r.9, addli, addliw, addlix, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedAddLargeInteger)
+
+// SBTTL("Interlocked Add Large Statistic")
+//++
+//
+// VOID
+// ExInterlockedAddLargeStatistic (
+// IN PLARGE_INTEGER Addend,
+// IN ULONG Increment
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked add of an increment value to an
+// addend variable of type large integer.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// adjusted by the increment value.
+//
+// Increment (r.4) - Supplies the increment value to be added to the
+// addend variable.
+//
+// Return Value:
+//
+// None.
+//
+// Implementation Note:
+//
+// The arithmetic for this function is performed as if this were an
+// unsigned large integer since this routine may not incur an overflow
+// exception.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedAddLargeStatistic)
+
+storelsfailed:
+ lwarx r.8, 0, r.3 // get low part of large statistic
+ addc r.9, r.8, r.4 // add increment to low part
+ stwcx. r.9, 0, r.3 // store result of low part add
+ bne- storelsfailed // if ne, store conditional failed
+
+ subfe. r.0, r.0, r.0 // check carry bit (result is 0
+ // if CA is set)
+ bnelr+ // if ne, carry clear, so return
+
+ li r.10, 4 // high part offset
+
+storels2failed:
+ lwarx r.8, r.10, r.3 // get high part of large statistic
+ addi r.8, r.8, 1 // add carry to high part
+ stwcx. r.8, r.10, r.3 // store result of high part add
+ bne- storels2failed // if ne, store conditional failed
+
+ LEAF_EXIT(ExInterlockedAddLargeStatistic)
+
+// SBTTL("Interlocked Add Unsigned Long")
+//++
+//
+// ULONG
+// ExInterlockedAddUlong (
+// IN PULONG Addend,
+// IN ULONG Increment,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked add of an increment value to an
+// addend variable of type unsigned long. The initial value of the addend
+// variable is returned as the function value.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// adjusted by the increment value.
+//
+// Increment (r.4) - Supplies the increment value to be added to the
+// addend variable.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the addend variable.
+//
+// Return Value:
+//
+// The initial value of the addend variable.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedAddUlong)
+
+ ori r.6,r.3,0 // move addend address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.5, r.5, r.9, addul, addulw)
+#endif
+
+ lwz r.3,0(r.6) // get initial addend value
+ add r.4,r.3,r.4 // compute adjusted value
+ stw r.4,0(r.6) // set updated addend value
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.5, r.9)
+#endif
+
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.5, r.9, addul, addulw, addulx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedAddUlong)
+
+// SBTTL("Interlocked Exchange Unsigned Long")
+//++
+//
+// ULONG
+// ExInterlockedExchangeUlong (
+// IN PULONG Source,
+// IN ULONG Value,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked exchange of a longword value with
+// a longword in memory and returns the memory value.
+//
+// N.B. There is an alternate entry point provided for this routine which
+// is PPC target specific and whose prototype does not include the
+// spinlock parameter. Since the routine never refers to the spinlock
+// parameter, no additional code is required.
+//
+// There is an additional alternate entry point, InterlockedExchange,
+// which is not platform-specific and which also does not take the
+// spinlock parameter.
+//
+// Arguments:
+//
+// Source (r.3) - Supplies a pointer to a variable whose value is to be
+// exchanged.
+//
+// Value (r.4) - Supplies the value to exchange with the source value.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the source variable.
+//
+// Return Value:
+//
+// The source value is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedExchangeUlong)
+
+ ALTERNATE_ENTRY(ExPpcInterlockedExchangeUlong)
+ ALTERNATE_ENTRY(InterlockedExchange)
+
+exchgfailed:
+ lwarx r.5,0,r.3 // get current source value
+ stwcx. r.4,0,r.3 // set new source value
+ bne- exchgfailed // if ne, store conditional failed
+
+ ori r.3,r.5,0 // return old value
+
+ LEAF_EXIT(ExInterlockedExchangeUlong) // return
+
+// SBTTL("Interlocked Decrement Long")
+//++
+//
+// INTERLOCKED_RESULT
+// ExInterlockedDecrementLong (
+// IN PLONG Addend,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked decrement on an addend variable
+// of type signed long. The sign and whether the result is zero is returned
+// as the function value.
+//
+// N.B. There is an alternate entry point provided for this routine which
+// is PPC target specific and whose prototype does not include the
+// spinlock parameter. Since the routine never refers to the spinlock
+// parameter, no additional code is required.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// decremented.
+//
+// Lock (r.4) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the addend variable.
+//
+// Return Value:
+//
+// RESULT_NEGATIVE is returned if the resultant addend value is negative.
+// RESULT_ZERO is returned if the resultant addend value is zero.
+// RESULT_POSITIVE is returned if the resultant addend value is positive.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedDecrementLong)
+
+ ALTERNATE_ENTRY(ExPpcInterlockedDecrementLong)
+
+decfailed:
+ lwarx r.5,0,r.3 // get current addend value
+ subi r.4,r.5,1 // decrement addend value
+ stwcx. r.4,0,r.3 // set new addend value
+ bne- decfailed // if ne, store conditional failed
+
+ addic. r.5,r.5,-1 // Reset CR0
+ mfcr r.3 // Rotate bit 0 & 1 and right justify
+ rlwinm r.3,r.3,2,30,31 // 0 = 0, 1 = positive, 2 = negative
+ neg r.3,r.3 // 0 = 0, -1 = positive, -2 = negative
+
+ LEAF_EXIT(ExInterlockedDecrementLong) // return
+
+// SBTTL("Interlocked Decrement")
+//++
+//
+// LONG
+// InterlockedDecrement (
+// IN PLONG Addend
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked decrement on an addend variable
+// of type signed long. The result is returned as the function value.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// decremented.
+//
+// Return Value:
+//
+// (r.3) The result of the decrement is returned.
+//
+//--
+
+ LEAF_ENTRY(InterlockedDecrement)
+
+dec2failed:
+ lwarx r.5,0,r.3 // get current addend value
+ subi r.4,r.5,1 // decrement addend value
+ stwcx. r.4,0,r.3 // set new addend value
+ bne- dec2failed // if ne, store conditional failed
+
+ ori r.3,r.4,0 // return result
+
+ LEAF_EXIT(InterlockedDecrement) // return
+
+// SBTTL("Interlocked Increment Long")
+//++
+//
+// INTERLOCKED_RESULT
+// ExInterlockedIncrementLong (
+// IN PLONG Addend,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked increment on an addend variable
+// of type signed long. The sign and whether the result is zero is returned
+// as the function value.
+//
+// N.B. There is an alternate entry point provided for this routine which
+// is PPC target specific and whose prototype does not include the
+// spinlock parameter. Since the routine never refers to the spinlock
+// parameter, no additional code is required.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// incremented.
+//
+// Lock (r.4) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the addend variable.
+//
+// Return Value:
+//
+// RESULT_NEGATIVE is returned if the resultant addend value is negative.
+// RESULT_ZERO is returned if the resultant addend value is zero.
+// RESULT_POSITIVE is returned if the resultant addend value is positive.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedIncrementLong)
+
+ ALTERNATE_ENTRY(ExPpcInterlockedIncrementLong)
+
+incfailed:
+ lwarx r.5,0,r.3 // get current addend value
+ addi r.4,r.5,1 // increment addend value
+ stwcx. r.4,0,r.3 // set new addend value
+ bne- incfailed // if ne, store conditional failed
+
+ addic. r.5,r.5,1 // Reset CR0
+ mfcr r.3 // Rotate bit 0 & 1 and right justify
+ rlwinm r.3,r.3,2,30,31 // 0 = 0, 1 = positive, 2 = negative
+ neg r.3,r.3 // 0 = 0, -1 = positive, -2 = negative
+
+ LEAF_EXIT(ExInterlockedIncrementLong) // return
+
+// SBTTL("Interlocked Increment")
+//++
+//
+// LONG
+// InterlockedIncrement (
+// IN PLONG Addend
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked increment on an addend variable
+// of type signed long. The result is returned as the function value.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// incremented.
+//
+// Return Value:
+//
+// (r.3) The result of the increment is returned.
+//
+//--
+
+ LEAF_ENTRY(InterlockedIncrement)
+
+inc2failed:
+ lwarx r.5,0,r.3 // get current addend value
+ addi r.4,r.5,1 // increment addend value
+ stwcx. r.4,0,r.3 // set new addend value
+ bne- inc2failed // if ne, store conditional failed
+
+ ori r.3,r.4,0 // return result
+
+ LEAF_EXIT(InterlockedIncrement) // return
+
+// SBTTL("Interlocked Compare Exchange")
+//++
+//
+// PVOID
+// InterlockedCompareExchange (
+// IN OUT PVOID *Destination,
+// IN PVOID Exchange,
+// IN PVOID Comperand
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked compare of the destination
+// value with the comperand value. If the destination value is equal
+// to the comperand value, then the exchange value is stored in the
+// destination. Otherwise, no opeation is performed.
+//
+// Arguments:
+//
+// Destination (r.3) - Supplies a pointer to the destination value.
+//
+// Exchange (r.4) - Supplies the exchange.
+//
+// Comperand (r.5) - Supplies the comperand value.
+//
+// Return Value:
+//
+// The initial destination value is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(InterlockedCompareExchange)
+
+Icmp10: lwarx r.6,0,r.3 // get current operand value
+ cmpw r.6,r.5 // compare with comperand
+ bne- Icmp20 // if ne, compare failed
+ stwcx. r.4,0,r.3 // set new operand value
+ bne- Icmp10 // if ne, store conditional failed
+
+Icmp20: ori r.3,r.6,0 // return result
+
+ LEAF_EXIT(InterlockedCompareExchange) // return
+
+// SBTTL("Interlocked Exchange Add")
+//++
+//
+// LONG
+// InterlockedExchangeAdd (
+// IN PLONG Addend,
+// IN LONG Increment
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked add of an increment value to an
+// addend variable of type unsigned long. The initial value of the addend
+// variable is returned as the function value.
+//
+// Arguments:
+//
+// Addend (r.3) - Supplies a pointer to a variable whose value is to be
+// adjusted by the increment value.
+//
+// Increment (r.4) - Supplies the increment value to be added to the
+// addend variable.
+//
+// Return Value:
+//
+// The initial value of the addend variable.
+//
+//--
+
+ LEAF_ENTRY(InterlockedExchangeAdd)
+
+add1failed:
+ lwarx r.5,0,r.3 // get current addend value
+ add r.6,r.5,r.4 // increment addend value
+ stwcx. r.6,0,r.3 // set new addend value
+ bne- add1failed // if ne, store conditional failed
+
+ ori r.3,r.5,0 // return result
+
+ LEAF_EXIT(InterlockedExchangeAdd) // return
+
+// SBTTL("Interlocked Insert Head List")
+//++
+//
+// PLIST_ENTRY
+// ExInterlockedInsertHeadList (
+// IN PLIST_ENTRY ListHead,
+// IN PLIST_ENTRY ListEntry,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function inserts an entry at the head of a doubly linked list
+// so that access to the list is synchronized in a multiprocessor system.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the head of the doubly linked
+// list into which an entry is to be inserted.
+//
+// ListEntry (r.4) - Supplies a pointer to the entry to be inserted at the
+// head of the list.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// Pointer to entry that was at the head of the list or NULL if the list
+// was empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedInsertHeadList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.5, r.5, r.9, inshl, inshlw)
+#endif
+
+ lwz r.7,LsFlink(r.6) // get address of next entry
+ stw r.6,LsBlink(r.4) // store previous link in entry
+ stw r.7,LsFlink(r.4) // store next link in entry
+ xor. r.3,r.7,r.6 // check if list was empty
+ stw r.4,LsBlink(r.7) // store previous link in next
+ stw r.4,LsFlink(r.6) // store next link in head
+ beq nullhlist // if eq, list was null
+ ori r.3,r.7,0 // return previous entry at head
+nullhlist:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.5, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.5, r.9, inshl, inshlw, inshlx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedInsertHeadList)
+
+
+// SBTTL("Interlocked Insert Tail List")
+//++
+//
+// PLIST_ENTRY
+// ExInterlockedInsertTailList (
+// IN PLIST_ENTRY ListHead,
+// IN PLIST_ENTRY ListEntry,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function inserts an entry at the tail of a doubly linked list
+// so that access to the list is synchronized in a multiprocessor system.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the head of the doubly linked
+// list into which an entry is to be inserted.
+//
+// ListEntry (r.4) - Supplies a pointer to the entry to be inserted at the
+// tail of the list.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// Pointer to entry that was at the tail of the list or NULL if the list
+// was empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedInsertTailList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.5, r.5, r.9, instl, instlw)
+#endif
+
+ lwz r.7,LsBlink(r.6) // get address of previous entry
+ stw r.6,LsFlink(r.4) // store next link in entry
+ stw r.7,LsBlink(r.4) // store previous link in entry
+ xor. r.3,r.7,r.6 // check if list was empty
+ stw r.4,LsBlink(r.6) // store previous link in next
+ stw r.4,LsFlink(r.7) // store next link in head
+ beq nulltlist // if eq, list was empty
+ ori r.3,r.7,0 // return previous entry at tail
+nulltlist:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.5, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.5, r.9, instl, instlw, instlx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedInsertTailList)
+
+// SBTTL("Interlocked Remove Head List")
+//++
+//
+// PLIST_ENTRY
+// ExInterlockedRemoveHeadList (
+// IN PLIST_ENTRY ListHead,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function removes an entry from the head of a doubly linked list
+// so that access to the list is synchronized in a multiprocessor system.
+// If there are no entries in the list, then a value of NULL is returned.
+// Otherwise, the address of the entry that is removed is returned as the
+// function value.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the head of the doubly linked
+// list from which an entry is to be removed.
+//
+// Lock (r.4) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// The address of the entry removed from the list, or NULL if the list is
+// empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedRemoveHeadList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.4, r.4, r.9, remhl, remhlw)
+#endif
+
+ lwz r.7,LsFlink(r.6) // get address of next entry
+ xor. r.3,r.7,r.6 // check if list is empty
+ beq nullrlist // if eq, list is empty
+ lwz r.10,LsFlink(r.7) // get address of next entry
+ ori r.3,r.7,0 // set address of entry removed
+ stw r.10,LsFlink(r.6) // store address of next in head
+ stw r.6,LsBlink(r.10) // store address of previous in next
+nullrlist:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.4, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.4, r.9, remhl, remhlw, remhlx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedRemoveHeadList)
+
+// SBTTL("Interlocked Pop Entry List")
+//++
+//
+// PSINGLE_LIST_ENTRY
+// ExInterlockedPopEntryList (
+// IN PSINGLE_LIST_ENTRY ListHead,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function removes an entry from the front of a singly linked list
+// so that access to the list is synchronized in a multiprocessor system.
+// If there are no entries in the list, then a value of NULL is returned.
+// Otherwise, the address of the entry that is removed is returned as the
+// function value.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the head of the singly linked
+// list from which an entry is to be removed.
+//
+// Lock (r.4) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// The address of the entry removed from the list, or NULL if the list is
+// empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedPopEntryList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.4, r.4, r.9, popel, popelw)
+#endif
+
+ lwz r.3,0(r.6) // get address of next entry
+ cmpwi r.3,0 // check if list is empty
+ beq nullplist // if eq, list is empty
+ lwz r.5,0(r.3) // get address of next entry
+ stw r.5,0(r.6) // store address of next in head
+nullplist:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.4, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.4, r.9, popel, popelw, popelx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedPopEntryList)
+
+// SBTTL("Interlocked Push Entry List")
+//++
+//
+// PSINGLE_LIST_ENTRY
+// ExInterlockedPushEntryList (
+// IN PSINGLE_LIST_ENTRY ListHead,
+// IN PSINGLE_LIST_ENTRY ListEntry,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function inserts an entry at the head of a singly linked list
+// so that access to the list is synchronized in a multiprocessor system.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the head of the singly linked
+// list into which an entry is to be inserted.
+//
+// ListEntry (r.4) - Supplies a pointer to the entry to be inserted at the
+// head of the list.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// Previous contents of ListHead. NULL implies list went from empty
+// to not empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedPushEntryList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.5, r.5, r.9, pushel, pushelw)
+#endif
+
+ lwz r.3,0(r.6) // get address of first entry (return value also)
+ stw r.4,0(r.6) // set address of first entry
+ stw r.3,0(r.4) // set address of next in new entry
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.5, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.5, r.9, pushel, pushelw, pushelx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedPushEntryList)
+
+// SBTTL("Interlocked Pop Entry Sequenced List")
+//++
+//
+// PSINGLE_LIST_ENTRY
+// ExInterlockedPopEntrySList (
+// IN PSLIST_HEADER ListHead,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function removes an entry from the front of a sequenced singly
+// linked list so that access to the list is synchronized in a MP system.
+// If there are no entries in the list, then a value of NULL is returned.
+// Otherwise, the address of the entry that is removed is returned as the
+// function value.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the sequenced listhead from which
+// an entry is to be removed.
+//
+// Lock (r.4) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// The address of the entry removed from the list, or NULL if the list is
+// empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedPopEntrySList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+ li r.0,0 // zero r.0
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.4, r.4, r.9, popesl, popeslw)
+#endif
+
+ lwz r.3,0(r.6) // get address of next entry
+ ori r.0,r.0,0xffff // set r.0 to 0x0000ffff
+ cmpwi r.3,0 // check if list is empty
+ lwz r.7,4(r.6) // get depth and sequence number
+ beq nullpslist // if eq, list is empty
+
+ lwz r.5,0(r.3) // get address of next entry
+ add r.7,r.7,r.0 // decrement depth and increment
+ // sequence number
+ stw r.5,0(r.6) // store address of next in head
+ stw r.7,4(r.6) // store new depth and sequence number
+
+nullpslist:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.4, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.4, r.9, popesl, popeslw, popeslx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedPopEntrySList)
+
+// SBTTL("Interlocked Push Entry Sequenced List")
+//++
+//
+// PSINGLE_LIST_ENTRY
+// ExInterlockedPushEntrySList (
+// IN PSLIST_HEADER ListHead,
+// IN PSINGLE_LIST_ENTRY ListEntry,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function inserts an entry at the head of a sequenced singly linked
+// list so that access to the list is synchronized in an MP system.
+//
+// Arguments:
+//
+// ListHead (r.3) - Supplies a pointer to the sequenced listhead into which
+// an entry is to be inserted.
+//
+// ListEntry (r.4) - Supplies a pointer to the entry to be inserted at the
+// head of the list.
+//
+// Lock (r.5) - Supplies a pointer to a spin lock to be used to synchronize
+// access to the list.
+//
+// Return Value:
+//
+// Previous contents of ListHead. NULL implies list went from empty
+// to not empty.
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedPushEntrySList)
+
+ ori r.6,r.3,0 // move listhead address
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.5, r.5, r.9, pushesl, pusheslw)
+#endif
+
+ lwz r.7,4(r.6) // get depth and sequence number
+ lwz r.3,0(r.6) // get address of first entry (return value also)
+ addi r.7,r.7,1 // increment depth
+ stw r.4,0(r.6) // set address of first entry
+ addis r.7,r.7,1 // increment sequence number
+ stw r.3,0(r.4) // set address of next in new entry
+ stw r.7,4(r.6) // store new depth and sequence number
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.5, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.5, r.9, pushesl, pusheslw, pusheslx, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedPushEntrySList)
+
+// SBTTL("Interlocked Compare Exchange 64-bits")
+//++
+//
+// ULONGLONG
+// ExInterlockedCompareExchange64 (
+// IN PULONGLONG Destination,
+// IN PULONGLONG Exchange,
+// IN PULONGLONG Comparand,
+// IN PKSPIN_LOCK Lock
+// )
+//
+// Routine Description:
+//
+// This function performs an interlocked compare and exchange of 64-bits.
+//
+// Arguments:
+//
+// Destination (r3) - Supplies a pointer to the destination variable.
+//
+// Exchange (r4) - Supplies a pointer to the exchange value.
+//
+// Comparand (r5) - Supplies a pointer to the comparand value.
+//
+// Lock (r6) - Supplies a pointer to a spin lock to use to synchronize
+// access to Destination.
+//
+// Return Value:
+//
+// The current destination value is returned as the function value
+// (in r3 and r4).
+//
+//--
+
+ LEAF_ENTRY(ExInterlockedCompareExchange64)
+
+ lwz r.0,0(r.5) // get comparand (low)
+ lwz r.5,4(r.5) // get comparand (high)
+
+ DISABLE_INTERRUPTS(r.8,r.12) // disable interrupts
+ // r.8 <- previous msr value
+ // r.12 <- new (disabled) msr
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.6, r.6, r.9, cmpex64, cmpex64w)
+#endif
+
+ lwz r.10,0(r.3) // get current value (low)
+ lwz r.11,4(r.3) // get current value (high)
+ cmpw cr.0,r.0,r.10 // compare current with comparand (low)
+ cmpw cr.1,r.5,r.11 // compare current with comparand (high)
+ bne cr.0,cmpex64_no // if ne, current and comparand mismatch (low)
+ bne cr.1,cmpex64_no // if ne, current and comparand mismatch (high)
+
+ lwz r.0,0(r.4) // get exchange value (low)
+ lwz r.5,4(r.4) // get exchange value (high)
+ stw r.0,0(r.3) // store exchange value (low)
+ stw r.5,4(r.3) // store exchange value (high)
+
+cmpex64_no:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.6, r.9)
+#endif
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+ ori r.3,r.10,0 // return current value (low)
+ ori r.4,r.11,0 // return current value (low)
+
+ blr // return
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.6, r.9, cmpex64, cmpex64w, cmpex64x, r.8, r.12)
+#endif
+
+ DUMMY_EXIT(ExInterlockedCompareExchange64)
+
+#if COLLECT_PAGING_DATA
+
+ .struct 0
+ .space StackFrameHeaderLength
+gpiLr: .space 4
+gpiR31: .space 4
+gpiR30: .space 4
+gpiR29: .space 4
+gpiR28: .space 4
+gpiR27: .space 4
+gpiR26: .space 4
+gpiR25: .space 4
+gpiR24: .space 4
+ .align 3
+gpiFrameLength:
+
+ SPECIAL_ENTRY(ExpGetPagingInformation)
+
+ mflr r0
+ stwu sp, -gpiFrameLength(sp)
+ stw r31, gpiR31(sp)
+ stw r30, gpiR30(sp)
+ stw r29, gpiR29(sp)
+ stw r28, gpiR28(sp)
+ stw r27, gpiR27(sp)
+ stw r26, gpiR26(sp)
+ stw r25, gpiR25(sp)
+ stw r24, gpiR24(sp)
+ stw r0, gpiLr(sp)
+
+ PROLOGUE_END(ExpGetPagingInformation)
+
+ ori r30, r3, 0
+ ori r31, r4, 0
+
+ lwz r29, [toc]KeNumberProcessors(r2)
+ lwz r29, 0(r29)
+ stw r29, 0(r30)
+
+ mfsdr1 r28
+ addi r27, r28, 1
+ rlwinm r27, r27, 16, 0x03ff0000
+ stw r27, 4(r30)
+ rlwinm r24, r27, 31, 0x7fffffff
+
+ subi r31, r31, 8
+ addi r30, r30, 8
+
+ lwz r26, [toc]KiProcessorBlock(r2)
+
+gpi_procloop:
+
+ lwz r25, 0(r26)
+ lwz r25, PbPcrPage(r25)
+ slwi r25, r25, PAGE_SHIFT
+ oris r25, r25, 0x8000
+
+ addi r26, r26, 4
+ subi r29, r29, 1
+
+ subic. r31, r31, CTR_SIZE
+ blt skip_processor_data
+ ori r3, r30, 0
+ la r4, PcPagingData(r25)
+ li r5, CTR_SIZE
+ bl ..RtlCopyMemory
+
+skip_processor_data:
+
+ addi r30, r30, CTR_SIZE
+
+ cmpwi r29, 0
+ bne gpi_procloop
+
+ sub. r31, r31, r27
+ blt skip_hpt
+ ori r3, r30, 0
+ rlwinm r28, r28, 0, 0xffff0000
+ oris r4, r28, 0x8000
+ ori r5, r27, 0
+ bl ..RtlCopyMemory
+
+skip_hpt:
+
+ ori r3, r31, 0
+
+ lwz r0, gpiLr(sp)
+ lwz r31, gpiR31(sp)
+ lwz r30, gpiR30(sp)
+ lwz r29, gpiR29(sp)
+ lwz r28, gpiR28(sp)
+ lwz r27, gpiR27(sp)
+ lwz r26, gpiR26(sp)
+ lwz r25, gpiR25(sp)
+ lwz r24, gpiR24(sp)
+ mtlr r0
+ addi sp, sp, gpiFrameLength
+
+ SPECIAL_EXIT(ExpGetPagingInformation)
+
+#endif // COLLECT_PAGING_DATA
diff --git a/private/ntos/ex/ppc/probe.c b/private/ntos/ex/ppc/probe.c
new file mode 100644
index 000000000..d473ca54d
--- /dev/null
+++ b/private/ntos/ex/ppc/probe.c
@@ -0,0 +1,120 @@
+/*++
+
+Copyright (c) 1990-1993 Microsoft Corporation
+
+Module Name:
+
+ probe.c
+
+Abstract:
+
+ This module implements the probe for write function.
+
+Author:
+
+ David N. Cutler (davec) 19-Jan-1990
+
+Environment:
+
+ Any mode.
+
+Revision History:
+
+--*/
+
+#include "exp.h"
+
+#if defined(ALLOC_PRAGMA)
+#pragma alloc_text(PAGE, ProbeForWrite)
+#endif
+
+
+VOID
+ProbeForWrite (
+ IN PVOID Address,
+ IN ULONG Length,
+ IN ULONG Alignment
+ )
+
+/*++
+
+Routine Description:
+
+ This function probes a structure for write accessibility and ensures
+ correct alignment of the structure. If the structure is not accessible
+ or has incorrect alignment, then an exception is raised.
+
+Arguments:
+
+ Address - Supplies a pointer to the structure to be probed.
+
+ Length - Supplies the length of the structure.
+
+ Alignment - Supplies the required alignment of the structure expressed
+ as the number of bytes in the primitive datatype (e.g., 1 for char,
+ 2 for short, 4 for long, and 8 for quad).
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG EndAddress;
+ ULONG StartAddress;
+
+ //
+ // If the structure has zero length, then do not probe the structure for
+ // write accessibility or alignment.
+ //
+
+ if (Length != 0) {
+
+ //
+ // If the structure is not properly aligned, then raise a data
+ // misalignment exception.
+ //
+
+ ASSERT((Alignment == 1) || (Alignment == 2) ||
+ (Alignment == 4) || (Alignment == 8));
+
+ StartAddress = (ULONG)Address;
+ if ((StartAddress & (Alignment - 1)) == 0) {
+
+ //
+ // Compute the ending address of the structure and probe for
+ // write accessibility.
+ //
+
+ EndAddress = StartAddress + Length - 1;
+ if ((StartAddress <= EndAddress) &
+ (EndAddress < MM_USER_PROBE_ADDRESS)) {
+
+ //
+ // N.B. Only the contents of the buffer may be probed.
+ // Therefore the starting byte is probed for the
+ // first page, and then the first byte in the page
+ // for each succeeding page.
+ //
+
+ EndAddress = (EndAddress & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
+ do {
+ *(volatile CHAR *)StartAddress = *(volatile CHAR *)StartAddress;
+ StartAddress = (StartAddress & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
+ } while (StartAddress != EndAddress);
+
+ return;
+
+ } else {
+ ExRaiseAccessViolation();
+ }
+
+ } else {
+ ExRaiseDatatypeMisalignment();
+ }
+ }
+
+ return;
+}
diff --git a/private/ntos/ex/ppc/raisests.c b/private/ntos/ex/ppc/raisests.c
new file mode 100644
index 000000000..5988065fb
--- /dev/null
+++ b/private/ntos/ex/ppc/raisests.c
@@ -0,0 +1,268 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ raisests.c
+
+Abstract:
+
+ This module implements routines to raise a general exception from kernel
+ mode or a noncontinuable exception from kernel mode.
+
+Author:
+
+ David N. Cutler (davec) 18-Oct-1990
+
+Environment:
+
+ Any mode.
+
+Revision History:
+
+ Tom Wood 23-Aug-1994
+ Add stack limit parameters to RtlVirtualUnwind.
+
+--*/
+
+#include "exp.h"
+
+//
+// Define private function prototypes.
+//
+
+VOID
+ExpRaiseException (
+ IN PEXCEPTION_RECORD ExceptionRecord
+ );
+
+VOID
+ExpRaiseStatus (
+ IN NTSTATUS ExceptionCode
+ );
+
+VOID
+ExRaiseException (
+ IN PEXCEPTION_RECORD ExceptionRecord
+ )
+
+/*++
+
+Routine Description:
+
+ This function raises a software exception by building a context record
+ and calling the exception dispatcher directly.
+
+ N.B. This routine is a shell routine that simply calls another routine
+ to do the real work. The reason this is done is to avoid a problem
+ in try/finally scopes where the last statement in the scope is a
+ call to raise an exception.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ExpRaiseException(ExceptionRecord);
+ return;
+}
+
+VOID
+ExpRaiseException (
+ IN PEXCEPTION_RECORD ExceptionRecord
+ )
+
+/*++
+
+Routine Description:
+
+ This function raises a software exception by building a context record
+ and calling the exception dispatcher directly.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG ControlPc;
+ CONTEXT ContextRecord;
+ ULONG EstablisherFrame;
+ PRUNTIME_FUNCTION FunctionEntry;
+ BOOLEAN InFunction;
+ ULONG NextPc;
+ NTSTATUS Status;
+
+ //
+ // Capture the current context, virtually unwind to the caller of this
+ // routine, set the fault instruction address to that of the caller, and
+ // call the exception dispatcher.
+ //
+
+ RtlCaptureContext(&ContextRecord);
+ ControlPc = ContextRecord.Lr - 4;
+ FunctionEntry = RtlLookupFunctionEntry(ControlPc);
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ &ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL,
+ 0,
+ 0xffffffff);
+
+ ContextRecord.Iar = NextPc + 4;
+ ExceptionRecord->ExceptionAddress = (PVOID)ContextRecord.Iar;
+
+ //
+ // If the exception is successfully dispatched, then continue execution.
+ // Otherwise, give the kernel debugger a chance to handle the exception.
+ //
+
+ if (RtlDispatchException(ExceptionRecord, &ContextRecord)) {
+ Status = ZwContinue(&ContextRecord, FALSE);
+
+ } else {
+ Status = ZwRaiseException(ExceptionRecord, &ContextRecord, FALSE);
+ }
+
+ //
+ // Either the attempt to continue execution or the attempt to give
+ // the kernel debugger a chance to handle the exception failed. Raise
+ // a noncontinuable exception.
+ //
+
+ ExRaiseStatus(Status);
+}
+
+VOID
+ExRaiseStatus (
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function raises an exception with the specified status value by
+ building an exception record, building a context record, and calling the
+ exception dispatcher directly. The exception is marked as noncontinuable
+ with no parameters. There is no return from this function.
+
+ N.B. This routine is a shell routine that simply calls another routine
+ to do the real work. The reason this is done is to avoid a problem
+ in try/finally scopes where the last statement in the scope is a
+ call to raise an exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ExpRaiseStatus(ExceptionCode);
+ return;
+}
+
+VOID
+ExpRaiseStatus (
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function raises an exception with the specified status value by
+ building an exception record, building a context record, and calling the
+ exception dispatcher directly. The exception is marked as noncontinuable
+ with no parameters. There is no return from this function.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG ControlPc;
+ CONTEXT ContextRecord;
+ ULONG EstablisherFrame;
+ EXCEPTION_RECORD ExceptionRecord;
+ PRUNTIME_FUNCTION FunctionEntry;
+ BOOLEAN InFunction;
+ ULONG NextPc;
+ NTSTATUS Status;
+
+ //
+ // Construct an exception record.
+ //
+
+ ExceptionRecord.ExceptionCode = ExceptionCode;
+ ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL;
+ ExceptionRecord.NumberParameters = 0;
+ ExceptionRecord.ExceptionFlags = EXCEPTION_NONCONTINUABLE;
+
+ //
+ // Capture the current context, virtually unwind to the caller of this
+ // routine, set the fault instruction address to that of the caller, and
+ // call the exception dispatcher.
+ //
+
+ RtlCaptureContext(&ContextRecord);
+ ControlPc = ContextRecord.Lr - 4;
+ FunctionEntry = RtlLookupFunctionEntry(ControlPc);
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ &ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL,
+ 0,
+ 0xffffffff);
+
+ ContextRecord.Iar = NextPc + 4;
+ ExceptionRecord.ExceptionAddress = (PVOID)ContextRecord.Iar;
+ RtlDispatchException(&ExceptionRecord, &ContextRecord);
+
+ //
+ // An unwind was not initiated during the dispatching of a noncontinuable
+ // exception. Give the kernel debugger a chance to handle the exception.
+ //
+
+ Status = ZwRaiseException(&ExceptionRecord, &ContextRecord, FALSE);
+
+ //
+ // The attempt to give the kernel debugger a chance to handle the exception
+ // failed. Raise another noncontinuable exception.
+ //
+
+ ExRaiseStatus(Status);
+}
diff --git a/private/ntos/ex/ppc/sources b/private/ntos/ex/ppc/sources
new file mode 100644
index 000000000..496be6c20
--- /dev/null
+++ b/private/ntos/ex/ppc/sources
@@ -0,0 +1,8 @@
+PPC_SOURCES=..\ppc\evpair.s \
+ ..\ppc\fmutex.s \
+ ..\ppc\gettick.s \
+ ..\ppc\hifreqlk.s \
+ ..\ppc\intrlock.s \
+ ..\ppc\probe.c \
+ ..\ppc\raisests.c \
+ ..\ppc\splocks.c
diff --git a/private/ntos/ex/ppc/splocks.c b/private/ntos/ex/ppc/splocks.c
new file mode 100644
index 000000000..8793b3c37
--- /dev/null
+++ b/private/ntos/ex/ppc/splocks.c
@@ -0,0 +1,86 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ splocks.c
+
+Abstract:
+
+ All global spinlocks in the kernel image are declared in this
+ module. This is done so that each spinlock can be spaced out
+ sufficiently to guaarantee that the L2 cache does not thrash
+ by having a spinlock and another high use varible in the same
+ cache line.
+
+
+Author:
+
+ Ken Reneris (kenr) 13-Jan-1992
+
+Revision History:
+
+--*/
+
+#include "exp.h"
+
+//
+// Static SpinLocks from ntos\cc\cachedat.c
+//
+
+KSPIN_LOCK CcWorkQueueSpinlock = 0;
+KSPIN_LOCK CcDeferredWriteSpinLock = 0;
+KSPIN_LOCK CcDebugTraceLock = 0;
+KSPIN_LOCK CcBcbSpinLock = 0;
+
+//
+// Static SpinLocks from ntos\ex
+//
+
+KSPIN_LOCK ExpLuidLock = 0;
+KSPIN_LOCK PoolTraceLock = 0;
+
+//
+// Static SpinLocks from ntos\io\iodata.c
+//
+
+KSPIN_LOCK IopErrorLogLock = 0;
+KSPIN_LOCK IopErrorLogAllocationLock = 0;
+KSPIN_LOCK IopTimerLock = 0;
+
+//
+// Static SpinLocks from ntos\kd\kdlock.c
+//
+
+KSPIN_LOCK KdpDebuggerLock = 0;
+
+//
+// Static SpinLocks from ntos\ke\kernldat.c
+//
+
+KSPIN_LOCK KiFreezeExecutionLock = 0;
+KSPIN_LOCK KiFreezeLockBackup = 0;
+ULONG KiHardwareTrigger = 0;
+KSPIN_LOCK KiPowerNotifyLock = 0;
+KSPIN_LOCK KiProfileLock = 0;
+
+//
+// Static SpinLocks from ntos\mm\miglobal.c
+//
+
+KSPIN_LOCK MmSystemSpaceLock = 0;
+KSPIN_LOCK MmAllowWSExpansionLock = 0;
+
+//
+// Static SpinLocks from ntos\ps\psinit.c
+//
+
+KSPIN_LOCK PspEventPairLock = 0;
+KSPIN_LOCK PsLoadedModuleSpinLock = 0;
+
+//
+// Static SpinLocks from ntos\fsrtl\fsrtlp.c
+//
+
+KSPIN_LOCK FsRtlStrucSupSpinLock = 0;