summaryrefslogtreecommitdiffstats
path: root/private/ntos/ke/mips
diff options
context:
space:
mode:
Diffstat (limited to 'private/ntos/ke/mips')
-rw-r--r--private/ntos/ke/mips/alignem.c375
-rw-r--r--private/ntos/ke/mips/alignx.s312
-rw-r--r--private/ntos/ke/mips/allproc.c392
-rw-r--r--private/ntos/ke/mips/apcuser.c140
-rw-r--r--private/ntos/ke/mips/branchem.c311
-rw-r--r--private/ntos/ke/mips/buserror.c309
-rw-r--r--private/ntos/ke/mips/callback.c237
-rw-r--r--private/ntos/ke/mips/callout.s411
-rw-r--r--private/ntos/ke/mips/dmpstate.c713
-rw-r--r--private/ntos/ke/mips/exceptn.c896
-rw-r--r--private/ntos/ke/mips/floatem.c4599
-rw-r--r--private/ntos/ke/mips/flush.c820
-rw-r--r--private/ntos/ke/mips/genmips.c1015
-rw-r--r--private/ntos/ke/mips/getsetrg.c1179
-rw-r--r--private/ntos/ke/mips/initkr.c463
-rw-r--r--private/ntos/ke/mips/intobj.c434
-rw-r--r--private/ntos/ke/mips/services.stb64
-rw-r--r--private/ntos/ke/mips/sources41
-rw-r--r--private/ntos/ke/mips/table.stb61
-rw-r--r--private/ntos/ke/mips/threadbg.s128
-rw-r--r--private/ntos/ke/mips/thredini.c285
-rw-r--r--private/ntos/ke/mips/timindex.s111
-rw-r--r--private/ntos/ke/mips/vdm.c52
-rw-r--r--private/ntos/ke/mips/x4ctxsw.s1497
-rw-r--r--private/ntos/ke/mips/x4mpipi.s451
-rw-r--r--private/ntos/ke/mips/x4sqrt.s113
-rw-r--r--private/ntos/ke/mips/x4start.s968
-rw-r--r--private/ntos/ke/mips/x4trap.s4622
-rw-r--r--private/ntos/ke/mips/xxapcint.s123
-rw-r--r--private/ntos/ke/mips/xxclock.s592
-rw-r--r--private/ntos/ke/mips/xxflshtb.c593
-rw-r--r--private/ntos/ke/mips/xxintsup.s713
-rw-r--r--private/ntos/ke/mips/xxirql.s218
-rw-r--r--private/ntos/ke/mips/xxmiscs.s289
-rw-r--r--private/ntos/ke/mips/xxmpipi.c209
-rw-r--r--private/ntos/ke/mips/xxregsv.s151
-rw-r--r--private/ntos/ke/mips/xxspinlk.s540
37 files changed, 24427 insertions, 0 deletions
diff --git a/private/ntos/ke/mips/alignem.c b/private/ntos/ke/mips/alignem.c
new file mode 100644
index 000000000..19af1d6e2
--- /dev/null
+++ b/private/ntos/ke/mips/alignem.c
@@ -0,0 +1,375 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ alignem.c
+
+Abstract:
+
+ This module implement the code necessary to emulate unaliged data
+ references.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+BOOLEAN
+KiEmulateReference (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate an unaligned data reference to an
+ address in the user part of the address space.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the data reference is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG BranchAddress;
+ PUCHAR DataAddress;
+
+ union {
+ ULONGLONG Longlong;
+ ULONG Long;
+ USHORT Short;
+ } DataReference;
+
+ PUCHAR DataValue;
+ PVOID ExceptionAddress;
+ MIPS_INSTRUCTION FaultInstruction;
+ ULONG Rt;
+ KIRQL OldIrql;
+
+ //
+ // If alignment profiling is active, then call the proper profile
+ // routine.
+ //
+
+ if (KiProfileAlignmentFixup) {
+ KiProfileAlignmentFixupCount += 1;
+ if (KiProfileAlignmentFixupCount >= KiProfileAlignmentFixupInterval) {
+ KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
+ KiProfileAlignmentFixupCount = 0;
+ KeProfileInterruptWithSource(TrapFrame, ProfileAlignmentFixup);
+ KeLowerIrql(OldIrql);
+ }
+ }
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // unaligned reference causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // If the exception PC is equal to the fault instruction address
+ // plus four, then the misalignment exception occurred in the delay
+ // slot of a branch instruction and the continuation address must
+ // be computed by emulating the branch instruction. Note that it
+ // is possible for an exception to occur when the branch instruction
+ // is read from user memory.
+ //
+
+ if ((TrapFrame->Fir + 4) == (ULONG)ExceptionRecord->ExceptionAddress) {
+ BranchAddress = KiEmulateBranch(ExceptionFrame, TrapFrame);
+
+ } else {
+ BranchAddress = TrapFrame->Fir + 4;
+ }
+
+ //
+ // Compute the effective address of the reference and check to make
+ // sure it is within the user part of the address space. Alignment
+ // exceptions take precedence over memory management exceptions and
+ // the address could be a system address.
+ //
+
+ FaultInstruction.Long = *((PULONG)ExceptionRecord->ExceptionAddress);
+ DataAddress = (PUCHAR)KiGetRegisterValue64(FaultInstruction.i_format.Rs,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress = (PUCHAR)((LONG)DataAddress +
+ (LONG)FaultInstruction.i_format.Simmediate);
+
+ //
+ // The emulated data reference must be in user space and must be less
+ // than 16 types from the end of user space.
+ //
+
+ if ((ULONG)DataAddress < 0x7ffffff0) {
+
+ //
+ // Dispatch on the opcode value.
+ //
+
+ DataValue = (PUCHAR)&DataReference;
+ Rt = FaultInstruction.i_format.Rt;
+ switch (FaultInstruction.i_format.Opcode) {
+
+ //
+ // Load halfword integer.
+ //
+
+ case LH_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ KiSetRegisterValue64(Rt,
+ (SHORT)DataReference.Short,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load halfword unsigned integer.
+ //
+
+ case LHU_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ KiSetRegisterValue64(Rt,
+ DataReference.Short,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load word floating.
+ //
+
+ case LWC1_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue(Rt + 32,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load word integer.
+ //
+
+ case LW_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue64(Rt,
+ (LONG)DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load double integer.
+ //
+
+ case LD_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ DataValue[4] = DataAddress[4];
+ DataValue[5] = DataAddress[5];
+ DataValue[6] = DataAddress[6];
+ DataValue[7] = DataAddress[7];
+ KiSetRegisterValue64(Rt,
+ DataReference.Longlong,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load double floating.
+ //
+
+ case LDC1_OP:
+ Rt = (Rt & 0x1e) + 32;
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue(Rt,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataValue[0] = DataAddress[4];
+ DataValue[1] = DataAddress[5];
+ DataValue[2] = DataAddress[6];
+ DataValue[3] = DataAddress[7];
+ KiSetRegisterValue(Rt + 1,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Store halfword integer.
+ //
+
+ case SH_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ break;
+
+ //
+ // Store word floating.
+ //
+
+ case SWC1_OP:
+ DataReference.Long = KiGetRegisterValue(Rt + 32,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ break;
+
+ //
+ // Store word integer.
+ //
+
+ case SW_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ break;
+
+ //
+ // Store double integer.
+ //
+
+ case SD_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ DataAddress[4] = DataValue[4];
+ DataAddress[5] = DataValue[5];
+ DataAddress[6] = DataValue[6];
+ DataAddress[7] = DataValue[7];
+ break;
+
+ //
+ // Store double floating.
+ //
+
+ case SDC1_OP:
+ Rt = (Rt & 0x1e) + 32;
+ DataReference.Long = KiGetRegisterValue(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ DataReference.Long = KiGetRegisterValue(Rt + 1,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[4] = DataValue[0];
+ DataAddress[5] = DataValue[1];
+ DataAddress[6] = DataValue[2];
+ DataAddress[7] = DataValue[3];
+ break;
+
+ //
+ // All other instructions are not emulated.
+ //
+
+ default:
+ return FALSE;
+ }
+
+ TrapFrame->Fir = BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ }
+
+ //
+ // Return a value of FALSE.
+ //
+
+ return FALSE;
+}
diff --git a/private/ntos/ke/mips/alignx.s b/private/ntos/ke/mips/alignx.s
new file mode 100644
index 000000000..8821d764f
--- /dev/null
+++ b/private/ntos/ke/mips/alignx.s
@@ -0,0 +1,312 @@
+// TITLE("Unaligned Branch Tests")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// alignx.s
+//
+// Abstract:
+//
+// This module implements the unaligned branch tests.
+//
+// Author:
+//
+// David N. Cutler (davec) 27-Feb-1991
+//
+// Environment:
+//
+// User mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Unaligned BEQ/BNE/BC1F/BC1T Branch Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement beq/bne/bc1f/bc1t tests with an unaligned
+// load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies first operand for branch test.
+// a1 - Supplies second operate for branch test.
+// a2 - Supplies a pointer to an unaligned word.
+// a3 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the brancd was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Beq)
+
+ .set noreorder
+ li v0,1 // set branched true
+ beq a0,a1,10f // if eq, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Beq
+
+ LEAF_ENTRY(Bne)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bne a0,a1,10f // if eq, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bne
+
+ LEAF_ENTRY(Bc1f)
+
+ .set noreorder
+ mtc1 a0,f0 // set comparand 1
+ mtc1 a1,f2 // set comparand 2
+ li v0,1 // set branched true
+ c.eq.s f0,f2 // compare for equality
+ bc1f 10f // if f, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bc1f
+
+ LEAF_ENTRY(Bc1t)
+
+ .set noreorder
+ mtc1 a0,f0 // set comparand 1
+ mtc1 a1,f2 // set comparand 2
+ li v0,1 // set branched true
+ c.eq.s f0,f2 // compare for equality
+ bc1t 10f // if t, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bc1t
+
+ SBTTL("Unaligned BLEZ/BLTZ/BGEZ/BGTZ/BGEZAL/BLTZAL Branch Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement blez/bltz/bgez/bgtz/bgezal/bltzal
+// tests with an unaligned load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies the operand for branch test.
+// a1 - Supplies a pointer to an unaligned word.
+// a2 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the branch was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Blez)
+
+ .set noreorder
+ li v0,1 // set branched true
+ blez a0,10f // if lez, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Blez
+
+ LEAF_ENTRY(Bltz)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bltz a0,10f // if ltz, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bltz
+
+ LEAF_ENTRY(Bgez)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bgez a0,10f // if gez, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bgez
+
+ LEAF_ENTRY(Bgtz)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bgtz a0,10f // if gtz, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bgtz
+
+ LEAF_ENTRY(Bgezal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ bgezal a0,10f // if gez, branch and link
+ lw v1,0(a1) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a2) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Bgezal
+
+ LEAF_ENTRY(Bltzal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ bltzal a0,10f // if ltz, branch and link
+ lw v1,0(a1) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a2) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Bltzal
+
+ SBTTL("Unaligned JAL/J Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement jal/j tests with an unaligned
+// load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to an unaligned word.
+// a1 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the brancd was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Jal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ jal 10f // jump and link
+ lw v1,0(a0) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jal
+
+ LEAF_ENTRY(Jalr)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ la t0,10f // get destination address
+ jal t0 // jump
+ lw v1,0(a0) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // jump back
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jalr
+
+ LEAF_ENTRY(J)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ j 10f // jump
+ lw v1,0(a0) // load unaligned data
+20: lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j 20b // jump back
+ li v0,1 // set branched true
+ .set reorder
+
+ .end J
+
+ LEAF_ENTRY(Jr)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ la t0,10f // get destination address
+ j t0 // jump
+ lw v1,0(a0) // load unaligned data
+20: lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j 20b // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jr
diff --git a/private/ntos/ke/mips/allproc.c b/private/ntos/ke/mips/allproc.c
new file mode 100644
index 000000000..19a319690
--- /dev/null
+++ b/private/ntos/ke/mips/allproc.c
@@ -0,0 +1,392 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ allproc.c
+
+Abstract:
+
+ This module allocates and intializes kernel resources required
+ to start a new processor, and passes a complete processor state
+ structure to the HAL to obtain a new processor.
+
+Author:
+
+ David N. Cutler 29-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+
+#pragma alloc_text(INIT, KeStartAllProcessors)
+
+#endif
+
+//
+// Define macro to round up to 64-byte boundary and define block sizes.
+//
+
+#define ROUND_UP(x) ((sizeof(x) + 63) & (~63))
+#define BLOCK1_SIZE ((3 * KERNEL_STACK_SIZE) + PAGE_SIZE)
+#define BLOCK2_SIZE (ROUND_UP(KPRCB) + ROUND_UP(ETHREAD) + 64)
+
+//
+// Define barrier wait static data.
+//
+
+#if !defined(NT_UP)
+
+ULONG KiBarrierWait = 0;
+
+#endif
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ );
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiInitializeSystem (
+ IN PLOADER_PARAMETER_BLOCK Loaderblock
+ );
+
+VOID
+KeStartAllProcessors(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called during phase 1 initialize on the master boot
+ processor to start all of the other registered processors.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG MemoryBlock1;
+ ULONG MemoryBlock2;
+ ULONG Number;
+ ULONG PcrAddress;
+ ULONG PcrPage;
+ PKPRCB Prcb;
+ KPROCESSOR_STATE ProcessorState;
+ PRESTART_BLOCK RestartBlock;
+ BOOLEAN Started;
+
+#if !defined(NT_UP)
+
+ //
+ // If the registered number of processors is greater than the maximum
+ // number of processors supported, then only allow the maximum number
+ // of supported processors.
+ //
+
+ if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
+ KeRegisteredProcessors = MAXIMUM_PROCESSORS;
+ }
+
+ //
+ // Set barrier that will prevent any other processor from entering the
+ // idle loop until all processors have been started.
+ //
+
+ KiBarrierWait = 1;
+
+ //
+ // Initialize the processor state that will be used to start each of
+ // processors. Each processor starts in the system initialization code
+ // with address of the loader parameter block as an argument.
+ //
+
+ Number = 1;
+ RtlZeroMemory(&ProcessorState, sizeof(KPROCESSOR_STATE));
+ ProcessorState.ContextFrame.IntA0 = (ULONG)KeLoaderBlock;
+ ProcessorState.ContextFrame.Fir = (ULONG)KiInitializeSystem;
+ while (Number < KeRegisteredProcessors) {
+
+ //
+ // Allocate a DPC stack, an idle thread kernel stack, a panic
+ // stack, a PCR page, a processor block, and an executive thread
+ // object. If the allocation fails or the allocation cannot be
+ // made from unmapped nonpaged pool, then stop starting processors.
+ //
+
+ MemoryBlock1 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK1_SIZE);
+ if (((PVOID)MemoryBlock1 == NULL) ||
+ ((MemoryBlock1 & 0xc0000000) != KSEG0_BASE)) {
+ if ((PVOID)MemoryBlock1 != NULL) {
+ ExFreePool((PVOID)MemoryBlock1);
+ }
+
+ break;
+ }
+
+ MemoryBlock2 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK2_SIZE);
+ if (((PVOID)MemoryBlock2 == NULL) ||
+ ((MemoryBlock2 & 0xc0000000) != KSEG0_BASE)) {
+ ExFreePool((PVOID)MemoryBlock1);
+ if ((PVOID)MemoryBlock2 != NULL) {
+ ExFreePool((PVOID)MemoryBlock2);
+ }
+
+ break;
+ }
+
+ //
+ // Zero both blocks of allocated memory.
+ //
+
+ RtlZeroMemory((PVOID)MemoryBlock1, BLOCK1_SIZE);
+ RtlZeroMemory((PVOID)MemoryBlock2, BLOCK2_SIZE);
+
+ //
+ // Set address of interrupt stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Mips.InterruptStack = MemoryBlock1 + (1 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of idle thread kernel stack in loader parameter block.
+ //
+
+ KeLoaderBlock->KernelStack = MemoryBlock1 + (2 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of panic stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Mips.PanicStack = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+
+ //
+ // Change the color of the PCR page to match the new mapping and
+ // set the page frame of the PCR page in the loader parameter block.
+ //
+
+ PcrAddress = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+ PcrPage = (PcrAddress ^ KSEG0_BASE) >> PAGE_SHIFT;
+ HalChangeColorPage((PVOID)KIPCR, (PVOID)PcrAddress, PcrPage);
+ KeLoaderBlock->u.Mips.PcrPage = PcrPage;
+
+ //
+ // Set the address of the processor block and executive thread in the
+ // loader parameter block.
+ //
+
+ KeLoaderBlock->Prcb = (MemoryBlock2 + 63) & ~63;
+ KeLoaderBlock->Thread = KeLoaderBlock->Prcb + ROUND_UP(KPRCB);
+
+ //
+ // Attempt to start the next processor. If attempt is successful,
+ // then wait for the processor to get initialized. Otherwise,
+ // deallocate the processor resources and terminate the loop.
+ //
+
+ Started = HalStartNextProcessor(KeLoaderBlock, &ProcessorState);
+ if (Started == FALSE) {
+ HalChangeColorPage((PVOID)PcrAddress, (PVOID)KIPCR, PcrPage);
+ ExFreePool((PVOID)MemoryBlock1);
+ ExFreePool((PVOID)MemoryBlock2);
+ break;
+
+ } else {
+
+ //
+ // Wait until boot is finished on the target processor before
+ // starting the next processor. Booting is considered to be
+ // finished when a processor completes its initialization and
+ // drops into the idle loop.
+ //
+
+ Prcb = (PKPRCB)(KeLoaderBlock->Prcb);
+ RestartBlock = Prcb->RestartBlock;
+ while (RestartBlock->BootStatus.BootFinished == 0) {
+ }
+ }
+
+ Number += 1;
+ }
+
+ //
+ // Allow all processor that were started to enter the idle loop and
+ // begin execution.
+ //
+
+ KiBarrierWait = 0;
+
+#endif
+
+ //
+ // Reset and synchronize the performance counters of all processors.
+ //
+
+ KiCalibratePerformanceCounter();
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function resets and synchronizes the performance counter on all
+ processors in the configuration.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Count = 1;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQl to synchronization level to avoid a possible context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Initialize the reset performance counter packet, compute the target
+ // set of processors, and send the packet to the target processors, if
+ // any, for execution.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ Count = (LONG)KeNumberProcessors;
+ KiIpiSendPacket(TargetProcessors,
+ KiCalibratePerformanceCounterTarget,
+ (PVOID)&Count,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Reset the performance counter on current processor.
+ //
+
+ HalCalibratePerformanceCounter((volatile PLONG)&Count);
+
+ //
+ // Wait until all target processors have reset and synchronized their
+ // performance counters.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for reseting the performance counter.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Count - Supplies a pointer to the number of processors in the host
+ configuration.
+
+ Parameter2 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Reset and synchronize the perfromance counter on the current processor
+ // and clear the reset performance counter address to signal the source to
+ // continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalCalibratePerformanceCounter((volatile PLONG)Count);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/apcuser.c b/private/ntos/ke/mips/apcuser.c
new file mode 100644
index 000000000..200fa9a04
--- /dev/null
+++ b/private/ntos/ke/mips/apcuser.c
@@ -0,0 +1,140 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ apcuser.c
+
+Abstract:
+
+ This module implements the machine dependent code necessary to initialize
+ a user mode APC.
+
+Author:
+
+ David N. Cutler (davec) 23-Apr-1990
+
+Environment:
+
+ Kernel mode only, IRQL APC_LEVEL.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the context for a user mode APC.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ NormalRoutine - Supplies a pointer to the user mode APC routine.
+
+ NormalContext - Supplies a pointer to the user context for the APC
+ routine.
+
+ SystemArgument1 - Supplies the first system supplied value.
+
+ SystemArgument2 - Supplies the second system supplied value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord;
+ EXCEPTION_RECORD ExceptionRecord;
+ LONG Length;
+ ULONG UserStack;
+
+ //
+ // Move the user mode state from the trap and exception frames to the
+ // context frame.
+ //
+
+ ContextRecord.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextRecord);
+
+ //
+ // Transfer the context information to the user stack, initialize the
+ // APC routine parameters, and modify the trap frame so execution will
+ // continue in user mode at the user mode APC dispatch routine.
+ //
+
+ try {
+
+ //
+ // Compute length of context record and new aligned user stack pointer.
+ //
+
+ Length = sizeof(CONTEXT);
+ UserStack = (ULONG)(ContextRecord.XIntSp & (~7)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack, Length, sizeof(QUAD));
+ RtlMoveMemory((PULONG)UserStack, &ContextRecord, sizeof(CONTEXT));
+
+ //
+ // Set the address of the user APC routine, the APC parameters, the
+ // new frame pointer, and the new stack pointer in the current trap
+ // frame. Set the continuation address so control will be transfered
+ // to the user APC dispatcher.
+ //
+
+ TrapFrame->XIntSp = (LONG)UserStack;
+ TrapFrame->XIntS8 = (LONG)UserStack;
+ TrapFrame->XIntA0 = (LONG)NormalContext;
+ TrapFrame->XIntA1 = (LONG)SystemArgument1;
+ TrapFrame->XIntA2 = (LONG)SystemArgument2;
+ TrapFrame->XIntA3 = (LONG)NormalRoutine;
+ TrapFrame->Fir = KeUserApcDispatcher;
+
+ //
+ // If an exception occurs, then copy the exception information to an
+ // exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(TrapFrame->Fir);
+ KiDispatchException(&ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ UserMode,
+ TRUE);
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/mips/branchem.c b/private/ntos/ke/mips/branchem.c
new file mode 100644
index 000000000..c9479deba
--- /dev/null
+++ b/private/ntos/ke/mips/branchem.c
@@ -0,0 +1,311 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ branchem.c
+
+Abstract:
+
+ This module implement the code necessary to emulate branches when an
+ alignment or floating exception occurs in the delay slot of a branch
+ instruction.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+ULONG
+KiEmulateBranch (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate the branch instruction specified by
+ the fault instruction address in the specified trap frame. The resultant
+ branch destination address is computed and returned as the function value.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The resultant target branch destination is returned as the function value.
+
+--*/
+
+{
+
+ MIPS_INSTRUCTION BranchInstruction;
+ ULONG BranchTaken;
+ ULONG BranchNotTaken;
+ ULONG RsValue;
+ ULONG RtValue;
+
+ //
+ // Get the branch instruction at the fault address.
+ //
+
+ BranchInstruction.Long = *((PULONG)TrapFrame->Fir);
+
+ //
+ // Assume the branch instruction is a conditional branch and get the
+ // Rs and Rt register values. Also compute the branch taken as well
+ // as the branch not taken target addresses.
+ //
+
+ RsValue = KiGetRegisterValue(BranchInstruction.r_format.Rs,
+ ExceptionFrame,
+ TrapFrame);
+
+ RtValue = KiGetRegisterValue(BranchInstruction.r_format.Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ BranchTaken = (TrapFrame->Fir + 4) +
+ (LONG)(BranchInstruction.i_format.Simmediate << 2);
+ BranchNotTaken = TrapFrame->Fir + 8;
+
+ //
+ // Dispatch on the opcode value.
+ //
+ // N.B. All branch likely instructions are guaranteed to branch since an
+ // exception would not have been generated in the delay slot if the
+ // the branch was not going to actually branch.
+ //
+
+ switch (BranchInstruction.r_format.Opcode) {
+
+ //
+ // Special opcode - dispatch on the function subopcode.
+ //
+
+ case SPEC_OP:
+ switch (BranchInstruction.r_format.Function) {
+
+ //
+ // Jalr - jump and link register.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case JALR_OP:
+
+ //
+ // Jr - jump register.
+ //
+
+ case JR_OP:
+ return RsValue;
+
+ //
+ // All other instruction are illegal and should never happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+
+ //
+ // Jal - jump and link.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case JAL_OP:
+
+ //
+ // J - jump.
+ //
+
+ case J_OP:
+ return ((TrapFrame->Fir + 4) & 0xf0000000) |
+ (BranchInstruction.j_format.Target << 2);
+
+ //
+ // Beq - branch equal.
+ // Beql - branch equal likely.
+ //
+
+ case BEQ_OP:
+ case BEQL_OP:
+ if ((LONG)RsValue == (LONG)RtValue) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bne - branch not equal.
+ // Bnel - branch not equal likely.
+ //
+
+ case BNE_OP:
+ case BNEL_OP:
+ if ((LONG)RsValue != (LONG)RtValue) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Blez - branch less than or equal zero.
+ // Blezl - branch less than or equal zero likely.
+ //
+
+ case BLEZ_OP:
+ case BLEZL_OP:
+ if ((LONG)RsValue <= 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bgtz - branch greater than zero.
+ // Bgtzl - branch greater than zero likely.
+ //
+
+ case BGTZ_OP:
+ case BGTZL_OP:
+ if ((LONG)RsValue > 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Branch conditional opcode - dispatch on the rt field.
+ //
+
+ case BCOND_OP:
+ switch (BranchInstruction.r_format.Rt) {
+
+ //
+ // Bltzal - branch on less than zero and link.
+ // Bltzall - branch on less than zero and link likely.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case BLTZAL_OP:
+ case BLTZALL_OP:
+
+ //
+ // Bltz - branch less than zero.
+ // Bltzl - branch less than zero likely.
+ //
+
+ case BLTZ_OP:
+ case BLTZL_OP:
+ if ((LONG)RsValue < 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bgezal - branch on greater than or euqal zero and link.
+ // Bgezall - branch on greater than or equal zero and link likely.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case BGEZAL_OP:
+ case BGEZALL_OP:
+
+ //
+ // Bgez - branch greater than zero.
+ // Bgezl - branch greater than zero likely.
+ //
+
+ case BGEZ_OP:
+ case BGEZL_OP:
+ if ((LONG)RsValue >= 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // All other instructions are illegal and should not happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+
+ //
+ // Cop1 - coprocessor 1 branch operation.
+ //
+ // Bczf - Branch coprocessor z false.
+ // Bczfl - Branch coprocessor z false likely.
+ // Bczt - Branch coprocessor z true.
+ // Bcztl - Branch coprocessor z true likely.
+ //
+
+ case COP1_OP:
+ if ((BranchInstruction.Long & COPz_BC_MASK) == COPz_BF) {
+
+ //
+ // Branch on coprocessor 1 condition code false.
+ //
+
+ if (((PFSR)(&TrapFrame->Fsr))->CC == 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ } else if ((BranchInstruction.Long & COPz_BC_MASK) == COPz_BT) {
+
+ //
+ // Branch of coprocessor 1 condition code true.
+ //
+
+ if (((PFSR)(&TrapFrame->Fsr))->CC != 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ }
+
+ //
+ // All other instructions are illegal and should not happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+}
diff --git a/private/ntos/ke/mips/buserror.c b/private/ntos/ke/mips/buserror.c
new file mode 100644
index 000000000..4c4b7ef72
--- /dev/null
+++ b/private/ntos/ke/mips/buserror.c
@@ -0,0 +1,309 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ buserror.c
+
+Abstract:
+
+ This module implements the code necessary to process data and instruction
+ bus errors and to set the address of the cache error routine.
+
+Author:
+
+ David N. Cutler (davec) 31-Oct-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+BOOLEAN
+KeBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PVOID VirtualAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function provides the default bus error handling routine for NT.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ VirtualAddress - Supplies the virtual address of the bus error.
+
+ PhysicalAddress - Supplies the physical address of the bus error.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Bug check specifying the exception code, the virtual address, the
+ // low part of the physical address, the current processor state, and
+ // the exception PC.
+ //
+
+ KeBugCheckEx(ExceptionRecord->ExceptionCode & 0xffff,
+ (ULONG)VirtualAddress,
+ PhysicalAddress.LowPart,
+ TrapFrame->Psr,
+ TrapFrame->Fir);
+
+ return FALSE;
+}
+
+PHYSICAL_ADDRESS
+KiGetPhysicalAddress (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function computes the physical address for a given virtual address.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address whose physical address is
+ to be computed.
+
+Return Value:
+
+ The physical address that correcponds to the specified virtual address.
+
+--*/
+
+{
+ PHYSICAL_ADDRESS PhysicalAddress;
+
+ //
+ // If the address is a KSEG0 or KSEG1 address, then mask off the high
+ // three address bits and return the result as the physical address.
+ // Otherwise, call memory management to convert the virtual address to
+ // a physical address.
+ //
+
+ if (((ULONG)VirtualAddress >= KSEG0_BASE) &&
+ ((ULONG)VirtualAddress < (KSEG1_BASE + 0x20000000))) {
+ PhysicalAddress.LowPart = (ULONG)VirtualAddress & 0x1fffffff;
+ PhysicalAddress.HighPart = 0;
+ return PhysicalAddress;
+
+ } else {
+ return MmGetPhysicalAddress(VirtualAddress);
+ }
+}
+
+VOID
+KiDataBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process a data bus error. The virtual and
+ physical address of the error are computed and the data bus error
+ processing routine is called indirectly through the PCR. NT provides
+ a standard routine to process the error and shutdown the system. A
+ vendor, however, can replace the standard NT routine and do additional
+ processing if necessary via the HAL.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PVOID VirtualAddress;
+ PHYSICAL_ADDRESS PhysicalAddress;
+ MIPS_INSTRUCTION FaultInstruction;
+
+ //
+ // Any exception that occurs during the attempted calculation of the
+ // virtual address causes the virtual address calculation to be
+ // aborted and the virtual address of the instruction itself is used
+ // instead.
+ //
+
+ try {
+
+ //
+ // Compute the effective address of the reference.
+ //
+
+ FaultInstruction.Long = *((PULONG)ExceptionRecord->ExceptionAddress);
+ VirtualAddress = (PVOID)(KiGetRegisterValue(FaultInstruction.i_format.Rs,
+ ExceptionFrame,
+ TrapFrame) +
+ FaultInstruction.i_format.Simmediate);
+
+ //
+ // If an exception occurs, then abort the calculation of the virtual
+ // address and set the virtual address equal to the instruction address.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ VirtualAddress = ExceptionRecord->ExceptionAddress;
+ }
+
+ //
+ // Compute the physical address that corresponds to the data address.
+ //
+
+ PhysicalAddress = KiGetPhysicalAddress(VirtualAddress);
+
+ //
+ // If a value of FALSE is returned by the data bus error handling routine,
+ // then bug check. Otherwise, assume that the error has been handled and
+ // return.
+ //
+
+ if ((PCR->DataBusError)(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ VirtualAddress,
+ PhysicalAddress) == FALSE) {
+
+ KeBugCheck(DATA_BUS_ERROR);
+ }
+
+ return;
+}
+
+VOID
+KiInstructionBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process an instruction bus error. The virtual
+ and physical address of the error are computed and the instruction bus
+ error processing routine is called indirectly through the PCR. NT provides
+ a standard routine to process the error and shutdown the system. A vendor,
+ however, can replace the standard NT routine and do additional processing
+ if necessary via the HAL.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PVOID VirtualAddress;
+ PHYSICAL_ADDRESS PhysicalAddress;
+
+ //
+ // Compute the physical address that corresponds to the data address.
+ //
+
+ VirtualAddress = ExceptionRecord->ExceptionAddress;
+ PhysicalAddress = KiGetPhysicalAddress(VirtualAddress);
+
+ //
+ // If a value of FALSE is returned by the instructiona bus error handling
+ // routine, then bug check. Otherwise, assume that the error has been
+ // handled and return.
+ //
+
+ if ((PCR->InstructionBusError)(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ VirtualAddress,
+ PhysicalAddress) == FALSE) {
+
+ KeBugCheck(INSTRUCTION_BUS_ERROR);
+ }
+
+ return;
+}
+
+VOID
+KeSetCacheErrorRoutine (
+ IN PKCACHE_ERROR_ROUTINE Routine
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the address of the cache error routine.
+ The cache error routine is called whenever a cache error occurs.
+
+Arguments:
+
+ Routine - Supplies a pointer to the cache error routine.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set the address of the cache error routine.
+ //
+
+ *((PULONG)CACHE_ERROR_VECTOR) = (ULONG)Routine | KSEG1_BASE;
+ return;
+}
diff --git a/private/ntos/ke/mips/callback.c b/private/ntos/ke/mips/callback.c
new file mode 100644
index 000000000..75b29c55e
--- /dev/null
+++ b/private/ntos/ke/mips/callback.c
@@ -0,0 +1,237 @@
+/*++
+
+Copyright (c) 1994 Microsoft Corporation
+
+Module Name:
+
+ callback.c
+
+Abstract:
+
+ This module implements user mode call back services.
+
+Author:
+
+ David N. Cutler (davec) 29-Oct-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KeUserModeCallback (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function call out from kernel mode to a user mode function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied
+ to the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that receives
+ the address of the output buffer.
+
+ Outputlength - Supplies a pointer to a variable that receives
+ the length of the output buffer.
+
+Return Value:
+
+ If the callout cannot be executed, then an error status is
+ returned. Otherwise, the status returned by the callback function
+ is returned.
+
+--*/
+
+{
+
+ PUCALLOUT_FRAME CalloutFrame;
+ ULONG Length;
+ ULONG OldStack;
+ NTSTATUS Status;
+ PKTRAP_FRAME TrapFrame;
+ PULONG UserStack;
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // Get the user mode stack pointer and attempt to copy input buffer
+ // to the user stack.
+ //
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ OldStack = (ULONG)TrapFrame->XIntSp;
+ try {
+
+ //
+ // Compute new user mode stack address, probe for writability,
+ // and copy the input buffer to the user stack.
+ //
+
+ Length = (InputLength +
+ sizeof(QUAD) - 1 + sizeof(UCALLOUT_FRAME)) & ~(sizeof(QUAD) - 1);
+
+ CalloutFrame = (PUCALLOUT_FRAME)(OldStack - Length);
+ ProbeForWrite(CalloutFrame, Length, sizeof(QUAD));
+ RtlMoveMemory(CalloutFrame + 1, InputBuffer, InputLength);
+
+ //
+ // Allocate stack frame fill in callout arguments.
+ //
+
+ CalloutFrame->Buffer = (PVOID)(CalloutFrame + 1);
+ CalloutFrame->Length = InputLength;
+ CalloutFrame->ApiNumber = ApiNumber;
+ CalloutFrame->Pad = 0;
+ CalloutFrame->Sp = TrapFrame->XIntSp;
+ CalloutFrame->Ra = TrapFrame->XIntRa;
+
+ //
+ // If an exception occurs during the probe of the user stack, then
+ // always handle the exception and return the exception code as the
+ // status value.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call user mode.
+ //
+
+ TrapFrame->XIntSp = (LONG)CalloutFrame;
+ Status = KiCallUserMode(OutputBuffer, OutputLength);
+ TrapFrame->XIntSp = (LONG)OldStack;
+
+ //
+ // If the GDI TEB batch contains any entries, it must be flushed.
+ //
+
+ if (((PTEB)KeGetCurrentThread()->Teb)->GdiBatchCount > 0) {
+ KeGdiFlushUserBatch();
+ }
+
+ return Status;
+}
+
+NTSTATUS
+NtW32Call (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ OUT PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls a W32 function.
+
+ N.B. ************** This is a temporary service *****************
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied to
+ the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that recevies the
+ output buffer address.
+
+ Outputlength - Supplies a pointer to a variable that recevies the
+ output buffer length.
+
+Return Value:
+
+ TBS.
+
+--*/
+
+{
+
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ NTSTATUS Status;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // If the current thread is not a GUI thread, then fail the service
+ // since the thread does not have a large stack.
+ //
+
+ if (KeGetCurrentThread()->Win32Thread == (PVOID)&KeServiceDescriptorTable[0]) {
+ return STATUS_NOT_IMPLEMENTED;
+ }
+
+ //
+ // Probe the output buffer address and length for writeability.
+ //
+
+ try {
+ ProbeForWriteUlong((PULONG)OutputBuffer);
+ ProbeForWriteUlong(OutputLength);
+
+ //
+ // If an exception occurs during the probe of the output buffer or
+ // length, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call out to user mode specifying the input buffer and API number.
+ //
+
+ Status = KeUserModeCallback(ApiNumber,
+ InputBuffer,
+ InputLength,
+ &ValueBuffer,
+ &ValueLength);
+
+ //
+ // If the callout is successful, then the output buffer address and
+ // length.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *OutputBuffer = ValueBuffer;
+ *OutputLength = ValueLength;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ return Status;
+}
diff --git a/private/ntos/ke/mips/callout.s b/private/ntos/ke/mips/callout.s
new file mode 100644
index 000000000..d40dd5b48
--- /dev/null
+++ b/private/ntos/ke/mips/callout.s
@@ -0,0 +1,411 @@
+// TITLE("Call Out to User Mode")
+//++
+//
+// Copyright (c) 1994 Microsoft Corporation
+//
+// Module Name:
+//
+// callout.s
+//
+// Abstract:
+//
+// This module implements the code necessary to call out from kernel
+// mode to user mode.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Oct-1994
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeUserCallbackDispatcher 4
+
+ SBTTL("Call User Mode Function")
+//++
+//
+// NTSTATUS
+// KiCallUserMode (
+// IN PVOID *OutputBuffer,
+// IN PULONG OutputLength
+// )
+//
+// Routine Description:
+//
+// This function calls a user mode function.
+//
+// N.B. This function calls out to user mode and the NtCallbackReturn
+// function returns back to the caller of this function. Therefore,
+// the stack layout must be consistent between the two routines.
+//
+// Arguments:
+//
+// OutputBuffer (a0) - Supplies a pointer to the variable that receivies
+// the address of the output buffer.
+//
+// OutputLength (a1) - Supplies a pointer to a variable that receives
+// the length of the output buffer.
+//
+// Return Value:
+//
+// The final status of the call out function is returned as the status
+// of the function.
+//
+// N.B. This function does not return to its caller. A return to the
+// caller is executed when a NtCallbackReturn system service is
+// executed.
+//
+// N.B. This function does return to its caller if a kernel stack
+// expansion is required and the attempted expansion fails.
+//
+//--
+
+ NESTED_ENTRY(KiCallUserMode, CuFrameLength, zero)
+
+ subu sp,sp,CuFrameLength // allocate stack frame
+ sw ra,CuRa(sp) // save return address
+
+//
+// Save nonvolatile integer registers.
+//
+
+ sw s0,CuS0(sp) // save integer registers s0-s8
+ sw s1,CuS1(sp) //
+ sw s2,CuS2(sp) //
+ sw s3,CuS3(sp) //
+ sw s4,CuS4(sp) //
+ sw s5,CuS5(sp) //
+ sw s6,CuS6(sp) //
+ sw s7,CuS7(sp) //
+ sw s8,CuS8(sp) //
+
+//
+// Save nonvolatile floating registers.
+//
+
+ sdc1 f20,CuF20(sp) // save floating registers f20-f31
+ sdc1 f22,CuF22(sp) //
+ sdc1 f24,CuF24(sp) //
+ sdc1 f26,CuF26(sp) //
+ sdc1 f28,CuF28(sp) //
+ sdc1 f30,CuF30(sp) //
+
+ PROLOGUE_END
+
+//
+// Save argument registers.
+//
+
+ sw a0,CuA0(sp) // save output buffer address
+ sw a1,CuA1(sp) // save output length address
+
+//
+// Check if sufficient room is available on the kernel stack for another
+// system call.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,KiPcr + PcInitialStack(zero) // get initial stack address
+ lw t2,ThStackLimit(t0) // get current stack limit
+ subu t3,sp,KERNEL_LARGE_STACK_COMMIT // compute bottom address
+ sltu t4,t3,t2 // check if limit exceeded
+ beq zero,t4,10f // if eq, limit not exceeded
+ move a0,sp // set current kernel stack address
+ jal MmGrowKernelStack // attempt to grow the kernel stack
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,KiPcr + PcInitialStack(zero) // get initial stack address
+ lw t2,ThStackLimit(t0) // get expanded stack limit
+ bne zero,v0,20f // if ne, attempt to grow failed
+ sw t2,KiPcr + PcStackLimit(zero) // set expanded stack limit
+
+//
+// Get the address of the current thread and save the previous trap frame
+// and callback stack addresses in the current frame. Also save the new
+// callback stack address in the thread object.
+//
+
+10: lw s8,ThTrapFrame(t0) // get trap frame address
+ lw t2,ThCallbackStack(t0) // get callback stack address
+ sw t1,CuInStk(sp) // save initial stack address
+ sw s8,CuTrFr(sp) // save trap frame address
+ sw t2,CuCbStk(sp) // save callback stack address
+ sw sp,ThCallbackStack(t0) // set callback stack address
+
+//
+// Restore state and callback to user mode.
+//
+
+ lw t2,TrFsr(s8) // get previous floating status
+ li t3,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ cfc1 t4,fsr // get current floating status
+ mtc0 t3,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t2,fsr // restore previous floating status
+ lw t3,TrPsr(s8) // get previous processor status
+ sw sp,ThInitialStack(t0) // reset initial stack address
+ sw sp,KiPcr + PcInitialStack(zero) //
+ sw t4,CuFsr(sp) // save current floating status
+ lw t4,KeUserCallbackDispatcher // set continuation address
+
+//
+// If a user mode APC is pending, then request an APC interrupt.
+//
+
+ lbu t1,ThApcState + AsUserApcPending(t0) // get user APC pending
+ sb zero,ThAlerted(t0) // clear kernel mode alerted
+ mfc0 t2,cause // get exception cause register
+ sll t1,t1,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t2,t2,t1 // merge possilbe APC interrupt request
+ mtc0 t2,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// miss is not possible, then restore the volatile register state.
+//
+
+ sw t3,KiPcr + PcSavedT7(zero) // save processor status
+ j KiServiceExit // join common code
+ sw t4,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// An attempt to grow the kernel stack failed.
+//
+
+20: lw ra,CuRa(sp) // restore return address
+ addu sp,sp,CuFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiCalluserMode
+
+ SBTTL("Switch Kernel Stack")
+//++
+//
+// PVOID
+// KeSwitchKernelStack (
+// IN PVOID StackBase,
+// IN PVOID StackLimit
+// )
+//
+// Routine Description:
+//
+// This function switches to the specified large kernel stack.
+//
+// N.B. This function can ONLY be called when there are no variables
+// in the stack that refer to other variables in the stack, i.e.,
+// there are no pointers into the stack.
+//
+// Arguments:
+//
+// StackBase (a0) - Supplies a pointer to the base of the new kernel
+// stack.
+//
+// StackLimit (a1) - supplies a pointer to the limit of the new kernel
+// stack.
+//
+// Return Value:
+//
+// The old kernel stack is returned as the function value.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument register save area
+SsRa: .space 4 // saved return address
+SsSp: .space 4 // saved new stack pointer
+ .space 2 * 4 // fill
+SsFrameLength: // length of stack frame
+SsA0: .space 4 // saved argument registers a0-a1
+SsA1: .space 4 //
+
+ NESTED_ENTRY(KeSwitchKernelStack, SsFrameLength, zero)
+
+ subu sp,sp,SsFrameLength // allocate stack frame
+ sw ra,SsRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the address of the new stack and copy the old stack to the new
+// stack.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ sw a0,SsA0(sp) // save new kernel stack base address
+ sw a1,SsA1(sp) // save new kernel stack limit address
+ lw a2,ThStackBase(t0) // get current stack base address
+ lw a3,ThTrapFrame(t0) // get current trap frame address
+ addu a3,a3,a0 // relocate current trap frame address
+ subu a3,a3,a2 //
+ sw a3,ThTrapFrame(t0) //
+ move a1,sp // set source address of copy
+ subu a2,a2,sp // compute length of copy
+ subu a0,a0,a2 // set destination address of copy
+ sw a0,SsSp(sp) // save new stack pointer address
+ jal RtlMoveMemory // copy old stack to new stack
+
+//
+// Switch to new kernel stack and return the address of the old kernel
+// stack.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+
+ DISABLE_INTERRUPTS(t1) // disable interrupts
+
+ lw v0,ThStackBase(t0) // get old kernel stack base address
+ lw a0,SsA0(sp) // get new kernel stack base address
+ lw a1,SsA1(sp) // get new kernel stack limit address
+ sw a0,ThInitialStack(t0) // set new initial stack address
+ sw a0,ThStackBase(t0) // set new stack base address
+ sw a1,ThStackLimit(t0) // set new stack limit address
+ li v1,TRUE // set large kernel stack TRUE
+ sb v1,ThLargeStack(t0) //
+ sw a0,KiPcr + PcInitialStack(zero) // set initial stack adddress
+ sw a1,KiPcr + PcStackLimit(zero) // set stack limit
+ lw sp,SsSp(sp) // switch to new kernel stack
+
+ ENABLE_INTERRUPTS(t1) // enable interrupts
+
+ lw ra,SsRa(sp) // restore return address
+ addu sp,sp,SsFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeSwitchKernelStack
+
+ SBTTL("Return from User Mode Callback")
+//++
+//
+// NTSTATUS
+// NtCallbackReturn (
+// IN PVOID OutputBuffer OPTIONAL,
+// IN ULONG OutputLength,
+// IN NTSTATUS Status
+// )
+//
+// Routine Description:
+//
+// This function returns from a user mode callout to the kernel
+// mode caller of the user mode callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode and the KiCallUserMode function calls out to user mode.
+// Therefore, the stack layout must be consistent between the
+// two routines.
+//
+// Arguments:
+//
+// OutputBuffer - Supplies an optional pointer to an output buffer.
+//
+// OutputLength - Supplies the length of the output buffer.
+//
+// Status - Supplies the status value returned to the caller of the
+// callback function.
+//
+// Return Value:
+//
+// If the callback return cannot be executed, then an error status is
+// returned. Otherwise, the specified callback status is returned to
+// the caller of the callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode is a callout is currently active.
+//
+//--
+
+ LEAF_ENTRY(NtCallbackReturn)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,ThCallbackStack(t0) // get callback stack address
+ beq zero,t1,10f // if eq, no callback stack present
+
+//
+// Restore nonvolatile integer registers.
+//
+
+ lw s0,CuS0(t1) // restore integer registers s0-s8
+ lw s1,CuS1(t1) //
+ lw s2,CuS2(t1) //
+ lw s3,CuS3(t1) //
+ lw s4,CuS4(t1) //
+ lw s5,CuS5(t1) //
+ lw s6,CuS6(t1) //
+ lw s7,CuS7(t1) //
+ lw s8,CuS8(t1) //
+
+//
+// Save nonvolatile floating registers.
+//
+
+ ldc1 f20,CuF20(t1) // restore floating registers f20-f31
+ ldc1 f22,CuF22(t1) //
+ ldc1 f24,CuF24(t1) //
+ ldc1 f26,CuF26(t1) //
+ ldc1 f28,CuF28(t1) //
+ ldc1 f30,CuF30(t1) //
+
+//
+// Restore the trap frame and callback stacks addresses, store the output
+// buffer address and length, restore the floating status, and set the
+// service status.
+//
+
+ lw t2,CuTrFr(t1) // get previous trap frame address
+ lw t3,CuCbStk(t1) // get previous callback stack address
+ lw t4,CuA0(t1) // get address to store output address
+ lw t5,CuA1(t1) // get address to store output length
+ lw t6,CuFsr(t1) // get previous floating status
+ sw t2,ThTrapFrame(t0) // restore trap frame address
+ sw t3,ThCallbackStack(t0) // restore callback stack address
+ sw a0,0(t4) // store output buffer address
+ sw a1,0(t5) // store output buffer length
+
+ .set noreorder
+ .set noat
+ ctc1 t6,fsr // restore previous floating status
+ .set at
+ .set reorder
+
+ move v0,a2 // set callback service status
+
+//
+// Restore initial stack pointer, trim stackback to callback frame,
+// deallocate callback stack frame, and return to callback caller.
+//
+
+ lw t2,CuInStk(t1) // get previous initial stack
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ sw t2,ThInitialStack(t0) // restore initial stack address
+ sw t2,KiPcr + PcInitialStack(zero) //
+ move sp,t1 // trim stack back callback frame
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ lw ra,CuRa(sp) // restore return address
+ addu sp,sp,CuFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// No callback is currently active.
+//
+
+10: li v0,STATUS_NO_CALLBACK_ACTIVE // set service status
+ j ra // return
+
+ .end NtCallbackReturn
diff --git a/private/ntos/ke/mips/dmpstate.c b/private/ntos/ke/mips/dmpstate.c
new file mode 100644
index 000000000..2215df217
--- /dev/null
+++ b/private/ntos/ke/mips/dmpstate.c
@@ -0,0 +1,713 @@
+/*++
+
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ dmpstate.c
+
+Abstract:
+
+ This module implements the architecture specific routine that dumps
+ the machine state when a bug check occurs and no debugger is hooked
+ to the system. It is assumed that it is called from bug check.
+
+Author:
+
+ David N. Cutler (davec) 17-Jan-1992
+
+Environment:
+
+ Kernel mode.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ );
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ );
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ );
+
+//
+// Define external data.
+//
+
+extern LIST_ENTRY PsLoadedModuleList;
+
+VOID
+KeDumpMachineState (
+ IN PKPROCESSOR_STATE ProcessorState,
+ IN PCHAR Buffer,
+ IN PULONG BugCheckParameters,
+ IN ULONG NumberOfParameters,
+ IN PKE_BUGCHECK_UNICODE_TO_ANSI UnicodeToAnsiRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function formats and displays the machine state at the time of the
+ to bug check.
+
+Arguments:
+
+ ProcessorState - Supplies a pointer to a processor state record.
+
+ Buffer - Supplies a pointer to a buffer to be used to output machine
+ state information.
+
+ BugCheckParameters - Supplies a pointer to an array of additional
+ bug check information.
+
+ NumberOfParameters - Suppiles the size of the bug check parameters
+ array.
+
+ UnicodeToAnsiRoutine - Supplies a pointer to a routine to convert Unicode strings
+ to Ansi strings without touching paged translation tables.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PCONTEXT ContextRecord;
+ ULONG ControlPc;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG DisplayColumn;
+ ULONG DisplayHeight;
+ ULONG DisplayRow;
+ ULONG DisplayWidth;
+ UNICODE_STRING DllName;
+ ULONG EstablisherFrame;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PVOID ImageBase;
+ ULONG Index;
+ BOOLEAN InFunction;
+ ULONG LastStack;
+ PLIST_ENTRY ModuleListHead;
+ PLIST_ENTRY NextEntry;
+ ULONG NextPc;
+ ULONG StackLimit;
+ UCHAR AnsiBuffer[ 32 ];
+ ULONG DateStamp;
+
+ //
+ // Query display parameters.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ //
+ // Display any addresses that fall within the range of any module in
+ // the loaded module list.
+ //
+
+ for (Index = 0; Index < NumberOfParameters; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)*BugCheckParameters,
+ &ImageBase,
+ &DataTableEntry);
+
+ if (ImageBase != NULL) {
+ sprintf(Buffer,
+ "*** %08lX has base at %08lX - %s\n",
+ *BugCheckParameters,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+ }
+
+ BugCheckParameters += 1;
+ }
+
+ //
+ // Virtually unwind to the caller of bug check.
+ //
+
+ ContextRecord = &ProcessorState->ContextFrame;
+ LastStack = (ULONG)ContextRecord->XIntSp;
+ ControlPc = (ULONG)(ContextRecord->XIntRa - 4);
+ NextPc = ControlPc;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if (FunctionEntry != NULL) {
+ NextPc = RtlVirtualUnwind(ControlPc | 1,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ }
+
+ //
+ // At this point the context record contains the machine state at the
+ // call to bug check.
+ //
+ // Put out the machine state at the time of the bugcheck.
+ //
+
+ sprintf(Buffer,
+ "\nMachine State at Call to Bug Check PC : %08lX PSR : %08lX\n\n",
+ (ULONG)ContextRecord->XIntRa,
+ ContextRecord->Psr);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Format and output the integer registers.
+ //
+
+ sprintf(Buffer,
+ "AT :%8lX V0 :%8lX V1 :%8lX A0 :%8lX\n",
+ (ULONG)ContextRecord->XIntAt,
+ (ULONG)ContextRecord->XIntV0,
+ (ULONG)ContextRecord->XIntV1,
+ (ULONG)ContextRecord->XIntA0);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "A1 :%8lX A2 :%8lX A3 :%8lX T0 :%8lX\n",
+ (ULONG)ContextRecord->XIntA1,
+ (ULONG)ContextRecord->XIntA2,
+ (ULONG)ContextRecord->XIntA3,
+ (ULONG)ContextRecord->XIntT0);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T1 :%8lX T2 :%8lX T3 :%8lX T4 :%8lX\n",
+ (ULONG)ContextRecord->XIntT1,
+ (ULONG)ContextRecord->XIntT2,
+ (ULONG)ContextRecord->XIntT3,
+ (ULONG)ContextRecord->XIntT4);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T5 :%8lX T6 :%8lX T7 :%8lX T8 :%8lX\n",
+ (ULONG)ContextRecord->XIntT5,
+ (ULONG)ContextRecord->XIntT6,
+ (ULONG)ContextRecord->XIntT7,
+ (ULONG)ContextRecord->XIntT8);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T9 :%8lX S0 :%8lX S1 :%8lX S2 :%8lX\n",
+ (ULONG)ContextRecord->XIntT9,
+ (ULONG)ContextRecord->XIntS0,
+ (ULONG)ContextRecord->XIntS1,
+ (ULONG)ContextRecord->XIntS2);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S3 :%8lX S4 :%8lX S5 :%8lX S6 :%8lX\n",
+ (ULONG)ContextRecord->XIntS3,
+ (ULONG)ContextRecord->XIntS4,
+ (ULONG)ContextRecord->XIntS5,
+ (ULONG)ContextRecord->XIntS6);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S7 :%8lX S8 :%8lX GP :%8lX SP :%8lX\n",
+ (ULONG)ContextRecord->XIntS7,
+ (ULONG)ContextRecord->XIntS8,
+ (ULONG)ContextRecord->XIntGp,
+ (ULONG)ContextRecord->XIntSp);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "RA :%8lX LO :%8lX HI :%8lX FSR:%8lX\n",
+ (ULONG)ContextRecord->XIntRa,
+ (ULONG)ContextRecord->XIntLo,
+ (ULONG)ContextRecord->XIntHi,
+ (ULONG)ContextRecord->Fsr);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Format and output the firswt four floating registers.
+ //
+
+ sprintf(Buffer,
+ "F0 :%8lX F1 :%8lX F2 :%8lX F3 :%8lX\n",
+ ContextRecord->FltF0,
+ ContextRecord->FltF1,
+ ContextRecord->FltF2,
+ ContextRecord->FltF3);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F4 :%8lX F5 :%8lX F6 :%8lX F7 :%8lX\n",
+ ContextRecord->FltF4,
+ ContextRecord->FltF5,
+ ContextRecord->FltF6,
+ ContextRecord->FltF7);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F8 :%8lX F9 :%8lX F10:%8lX F11:%8lX\n",
+ ContextRecord->FltF8,
+ ContextRecord->FltF9,
+ ContextRecord->FltF10,
+ ContextRecord->FltF11);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F12:%8lX F13:%8lX F14:%8lX F15:%8lX\n",
+ ContextRecord->FltF12,
+ ContextRecord->FltF13,
+ ContextRecord->FltF14,
+ ContextRecord->FltF15);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F16:%8lX F17:%8lX F18:%8lX F19:%8lX\n",
+ ContextRecord->FltF16,
+ ContextRecord->FltF17,
+ ContextRecord->FltF18,
+ ContextRecord->FltF19);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F20:%8lX F21:%8lX F22:%8lX F23:%8lX\n",
+ ContextRecord->FltF20,
+ ContextRecord->FltF21,
+ ContextRecord->FltF22,
+ ContextRecord->FltF23);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F24:%8lX F25:%8lX F26:%8lX F27:%8lX\n",
+ ContextRecord->FltF24,
+ ContextRecord->FltF25,
+ ContextRecord->FltF26,
+ ContextRecord->FltF27);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F28:%8lX F29:%8lX F30:%8lX F31:%8lX\n\n",
+ ContextRecord->FltF28,
+ ContextRecord->FltF29,
+ ContextRecord->FltF30,
+ ContextRecord->FltF31);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Output short stack back trace with base address.
+ //
+
+ DllName.Length = 0;
+ DllName.Buffer = L"";
+ if (FunctionEntry != NULL) {
+ StackLimit = (ULONG)KeGetCurrentThread()->KernelStack;
+ HalDisplayString("Callee-Sp Return-Ra Dll Base - Name\n");
+ for (Index = 0; Index < 8; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ sprintf(Buffer,
+ " %08lX %08lX : %08lX - %s\n",
+ (ULONG)ContextRecord->XIntSp,
+ NextPc + 4,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( (ImageBase != NULL) ? &DataTableEntry->BaseDllName : &DllName,
+ AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+ if ((NextPc != ControlPc) || ((ULONG)ContextRecord->XIntSp != LastStack)) {
+ ControlPc = NextPc;
+ LastStack = (ULONG)ContextRecord->XIntSp;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if ((FunctionEntry != NULL) && (LastStack < StackLimit)) {
+ NextPc = RtlVirtualUnwind(ControlPc | 1,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ } else {
+ NextPc = (ULONG)ContextRecord->XIntRa;
+ }
+
+ } else {
+ break;
+ }
+ }
+ }
+
+ //
+ // Output the build number and other useful information.
+ //
+
+ sprintf(Buffer,
+ "\nIRQL : %d, DPC Active : %s, SYSVER 0x%08x\n",
+ KeGetCurrentIrql(),
+ KeIsExecutingDpc() ? "TRUE" : "FALSE",
+ NtBuildNumber);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Output the processor id and the primary cache sizes.
+ //
+
+ sprintf(Buffer,
+ "Processor Id %d.%d, Icache : %d, Dcache : %d\n",
+ (PCR->ProcessorId >> 8) & 0xff,
+ PCR->ProcessorId & 0xff,
+ PCR->FirstLevelIcacheSize,
+ PCR->FirstLevelDcacheSize);
+
+ HalDisplayString(Buffer);
+
+ //
+ // If the display width is greater than 80 + 24 (the size of a DLL
+ // name and base address), then display all the modules loaded in
+ // the system.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ if (DisplayWidth > (80 + 24)) {
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ //
+ // Output display headers.
+ //
+
+ Index = 1;
+ KiDisplayString(80, Index, "Dll Base DateStmp - Name");
+ NextEntry = ModuleListHead->Flink;
+ if (NextEntry != NULL) {
+
+ //
+ // Scan the list of loaded modules and display their base
+ // address and name.
+ //
+
+ while (NextEntry != ModuleListHead) {
+ Index += 1;
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ if (MmDbgReadCheck(DataTableEntry->DllBase) != NULL) {
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase);
+ DateStamp = NtHeaders->FileHeader.TimeDateStamp;
+
+ } else {
+ DateStamp = 0;
+ }
+ sprintf(Buffer,
+ "%08lX %08lx - %s",
+ DataTableEntry->DllBase,
+ DateStamp,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ KiDisplayString(80, Index, Buffer);
+ NextEntry = NextEntry->Flink;
+ if (Index > DisplayHeight) {
+ break;
+ }
+ }
+ }
+ }
+
+ //
+ // Reset the current display position.
+ //
+
+ HalSetDisplayParameters(DisplayColumn, DisplayRow);
+ return;
+}
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ )
+
+/*++
+
+Routine Description:
+
+ This function display a string starting at the specified column and row
+ position on the screen.
+
+Arguments:
+
+ Column - Supplies the starting column of where the string is displayed.
+
+ Row - Supplies the starting row of where the string is displayed.
+
+ Bufer - Supplies a pointer to the string that is displayed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Position the cursor and display the string.
+ //
+
+ HalSetDisplayParameters(Column, Row);
+ HalDisplayString(Buffer);
+ return;
+}
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches the currently active function tables for an entry
+ that corresponds to the specified PC value.
+
+Arguments:
+
+ ControlPc - Supplies the address of an instruction within the specified
+ function.
+
+Return Value:
+
+ If there is no entry in the function table for the specified PC, then
+ NULL is returned. Otherwise, the address of the function table entry
+ that corresponds to the specified PC is returned.
+
+--*/
+
+{
+
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PRUNTIME_FUNCTION FunctionTable;
+ ULONG SizeOfExceptionTable;
+ LONG High;
+ PVOID ImageBase;
+ LONG Low;
+ LONG Middle;
+
+ //
+ // Search for the image that includes the specified PC value.
+ //
+
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ //
+ // If an image is found that includes the specified PC, then locate the
+ // function table for the image.
+ //
+
+ if (ImageBase != NULL) {
+ FunctionTable = (PRUNTIME_FUNCTION)RtlImageDirectoryEntryToData(
+ ImageBase, TRUE, IMAGE_DIRECTORY_ENTRY_EXCEPTION,
+ &SizeOfExceptionTable);
+
+ //
+ // If a function table is located, then search the function table
+ // for a function table entry for the specified PC.
+ //
+
+ if (FunctionTable != NULL) {
+
+ //
+ // Initialize search indicies.
+ //
+
+ Low = 0;
+ High = (SizeOfExceptionTable / sizeof(RUNTIME_FUNCTION)) - 1;
+
+ //
+ // Perform binary search on the function table for a function table
+ // entry that subsumes the specified PC.
+ //
+
+ while (High >= Low) {
+
+ //
+ // Compute next probe index and test entry. If the specified PC
+ // is greater than of equal to the beginning address and less
+ // than the ending address of the function table entry, then
+ // return the address of the function table entry. Otherwise,
+ // continue the search.
+ //
+
+ Middle = (Low + High) >> 1;
+ FunctionEntry = &FunctionTable[Middle];
+ if (ControlPc < FunctionEntry->BeginAddress) {
+ High = Middle - 1;
+
+ } else if (ControlPc >= FunctionEntry->EndAddress) {
+ Low = Middle + 1;
+
+ } else {
+
+ //
+ // The capability exists for more than one function entry
+ // to map to the same function. This permits a function to
+ // have discontiguous code segments described by separate
+ // function table entries. If the ending prologue address
+ // is not within the limits of the begining and ending
+ // address of the function able entry, then the prologue
+ // ending address is the address of a function table entry
+ // that accurately describes the ending prologue address.
+ //
+
+ if ((FunctionEntry->PrologEndAddress < FunctionEntry->BeginAddress) ||
+ (FunctionEntry->PrologEndAddress >= FunctionEntry->EndAddress)) {
+ FunctionEntry = (PRUNTIME_FUNCTION)FunctionEntry->PrologEndAddress;
+ }
+
+ return FunctionEntry;
+ }
+ }
+ }
+ }
+
+ //
+ // A function table entry for the specified PC was not found.
+ //
+
+ return NULL;
+}
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base of an image that contains the
+ specified PcValue. An image contains the PcValue if the PcValue
+ is within the ImageBase, and the ImageBase plus the size of the
+ virtual image.
+
+Arguments:
+
+ PcValue - Supplies a PcValue.
+
+ BaseOfImage - Returns the base address for the image containing the
+ PcValue. This value must be added to any relative addresses in
+ the headers to locate portions of the image.
+
+ DataTableEntry - Suppies a pointer to a variable that receives the
+ address of the data table entry that describes the image.
+
+Return Value:
+
+ NULL - No image was found that contains the PcValue.
+
+ NON-NULL - Returns the base address of the image that contain the
+ PcValue.
+
+--*/
+
+{
+
+ PLIST_ENTRY ModuleListHead;
+ PLDR_DATA_TABLE_ENTRY Entry;
+ PLIST_ENTRY Next;
+ ULONG Bounds;
+ PVOID ReturnBase, Base;
+
+ //
+ // If the module list has been initialized, then scan the list to
+ // locate the appropriate entry.
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ ReturnBase = NULL;
+ Next = ModuleListHead->Flink;
+ if (Next != NULL) {
+ while (Next != ModuleListHead) {
+ Entry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ Base = Entry->DllBase;
+ Bounds = (ULONG)Base + Entry->SizeOfImage;
+ if ((ULONG)PcValue >= (ULONG)Base && (ULONG)PcValue < Bounds) {
+ *DataTableEntry = Entry;
+ ReturnBase = Base;
+ break;
+ }
+ }
+ }
+
+ *BaseOfImage = ReturnBase;
+ return ReturnBase;
+}
diff --git a/private/ntos/ke/mips/exceptn.c b/private/ntos/ke/mips/exceptn.c
new file mode 100644
index 000000000..315d2bda7
--- /dev/null
+++ b/private/ntos/ke/mips/exceptn.c
@@ -0,0 +1,896 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ exceptn.c
+
+Abstract:
+
+ This module implement the code necessary to dispatch expections to the
+ proper mode and invoke the exception dispatcher.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define HEADER_FILE
+#include "kxmips.h"
+
+//
+// Define multiply overflow and divide by zero breakpoint instruction values.
+//
+
+#define KDDEBUG_BREAKPOINT ((SPEC_OP << 26) | (BREAKIN_BREAKPOINT << 16) | BREAK_OP)
+#define DIVIDE_BREAKPOINT ((SPEC_OP << 26) | (DIVIDE_BY_ZERO_BREAKPOINT << 16) | BREAK_OP)
+#define MULTIPLY_BREAKPOINT ((SPEC_OP << 26) | (MULTIPLY_OVERFLOW_BREAKPOINT << 16) | BREAK_OP)
+#define OVERFLOW_BREAKPOINT ((SPEC_OP << 26) | (DIVIDE_OVERFLOW_BREAKPOINT << 16) | BREAK_OP)
+
+//
+// Define external kernel breakpoint instruction value.
+//
+
+#define KERNEL_BREAKPOINT_INSTRUCTION 0x16000d
+
+VOID
+KeContextFromKframes (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PCONTEXT ContextFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified trap and exception frames
+ frames into the specified context frame according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame from which context
+ should be copied into the context record.
+
+ ContextFrame - Supplies a pointer to the context frame that receives the
+ context copied from the trap and exception frames.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG ContextFlags;
+
+ //
+ // Set control information if specified.
+ //
+
+ ContextFlags = ContextFrame->ContextFlags;
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, ra, sp, FIR, and PSR.
+ //
+
+ ContextFrame->XIntGp = TrapFrame->XIntGp;
+ ContextFrame->XIntSp = TrapFrame->XIntSp;
+ ContextFrame->Fir = TrapFrame->Fir;
+ ContextFrame->Psr = TrapFrame->Psr;
+ ContextFrame->XIntRa = TrapFrame->XIntRa;
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers zero, and, at - t9, k0, k1, lo, and hi.
+ //
+
+ ContextFrame->XIntZero = 0;
+ ContextFrame->XIntAt = TrapFrame->XIntAt;
+ ContextFrame->XIntV0 = TrapFrame->XIntV0;
+ ContextFrame->XIntV1 = TrapFrame->XIntV1;
+ ContextFrame->XIntA0 = TrapFrame->XIntA0;
+ ContextFrame->XIntA1 = TrapFrame->XIntA1;
+ ContextFrame->XIntA2 = TrapFrame->XIntA2;
+ ContextFrame->XIntA3 = TrapFrame->XIntA3;
+ ContextFrame->XIntT0 = TrapFrame->XIntT0;
+ ContextFrame->XIntT1 = TrapFrame->XIntT1;
+ ContextFrame->XIntT2 = TrapFrame->XIntT2;
+ ContextFrame->XIntT3 = TrapFrame->XIntT3;
+ ContextFrame->XIntT4 = TrapFrame->XIntT4;
+ ContextFrame->XIntT5 = TrapFrame->XIntT5;
+ ContextFrame->XIntT6 = TrapFrame->XIntT6;
+ ContextFrame->XIntT7 = TrapFrame->XIntT7;
+ ContextFrame->XIntT8 = TrapFrame->XIntT8;
+ ContextFrame->XIntT9 = TrapFrame->XIntT9;
+ ContextFrame->XIntK0 = 0;
+ ContextFrame->XIntK1 = 0;
+ ContextFrame->XIntLo = TrapFrame->XIntLo;
+ ContextFrame->XIntHi = TrapFrame->XIntHi;
+
+ //
+ // Set integer registers s0 - s7, and s8.
+ //
+
+ ContextFrame->XIntS0 = TrapFrame->XIntS0;
+ ContextFrame->XIntS1 = TrapFrame->XIntS1;
+ ContextFrame->XIntS2 = TrapFrame->XIntS2;
+ ContextFrame->XIntS3 = TrapFrame->XIntS3;
+ ContextFrame->XIntS4 = TrapFrame->XIntS4;
+ ContextFrame->XIntS5 = TrapFrame->XIntS5;
+ ContextFrame->XIntS6 = TrapFrame->XIntS6;
+ ContextFrame->XIntS7 = TrapFrame->XIntS7;
+ ContextFrame->XIntS8 = TrapFrame->XIntS8;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set floating registers f0 - f19.
+ //
+
+ RtlMoveMemory(&ContextFrame->FltF0, &TrapFrame->FltF0,
+ sizeof(ULONG) * (20));
+
+ //
+ // Set floating registers f20 - f31.
+ //
+
+ RtlMoveMemory(&ContextFrame->FltF20, &ExceptionFrame->FltF20,
+ sizeof(ULONG) * (12));
+
+ //
+ // Set floating status register.
+ //
+
+ ContextFrame->Fsr = TrapFrame->Fsr;
+ }
+
+ return;
+}
+
+VOID
+KeContextToKframes (
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN PCONTEXT ContextFrame,
+ IN ULONG ContextFlags,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified context frame into
+ the specified trap and exception frames according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that receives the volatile
+ context from the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that receives
+ the nonvolatile context from the context record.
+
+ ContextFrame - Supplies a pointer to a context frame that contains the
+ context that is to be copied into the trap and exception frames.
+
+ ContextFlags - Supplies the set of flags that specify which parts of the
+ context frame are to be copied into the trap and exception frames.
+
+ PreviousMode - Supplies the processor mode for which the trap and exception
+ frames are being built.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, sp, ra, FIR, and PSR.
+ //
+
+ TrapFrame->XIntGp = ContextFrame->XIntGp;
+ TrapFrame->XIntSp = ContextFrame->XIntSp;
+ TrapFrame->Fir = ContextFrame->Fir;
+ TrapFrame->Psr = SANITIZE_PSR(ContextFrame->Psr, PreviousMode);
+ TrapFrame->XIntRa = ContextFrame->XIntRa;
+ }
+
+ //
+ // Set integer registers contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers at - t9, lo, and hi.
+ //
+
+ TrapFrame->XIntAt = ContextFrame->XIntAt;
+ TrapFrame->XIntV0 = ContextFrame->XIntV0;
+ TrapFrame->XIntV1 = ContextFrame->XIntV1;
+ TrapFrame->XIntA0 = ContextFrame->XIntA0;
+ TrapFrame->XIntA1 = ContextFrame->XIntA1;
+ TrapFrame->XIntA2 = ContextFrame->XIntA2;
+ TrapFrame->XIntA3 = ContextFrame->XIntA3;
+ TrapFrame->XIntT0 = ContextFrame->XIntT0;
+ TrapFrame->XIntT1 = ContextFrame->XIntT1;
+ TrapFrame->XIntT2 = ContextFrame->XIntT2;
+ TrapFrame->XIntT3 = ContextFrame->XIntT3;
+ TrapFrame->XIntT4 = ContextFrame->XIntT4;
+ TrapFrame->XIntT5 = ContextFrame->XIntT5;
+ TrapFrame->XIntT6 = ContextFrame->XIntT6;
+ TrapFrame->XIntT7 = ContextFrame->XIntT7;
+ TrapFrame->XIntT8 = ContextFrame->XIntT8;
+ TrapFrame->XIntT9 = ContextFrame->XIntT9;
+ TrapFrame->XIntLo = ContextFrame->XIntLo;
+ TrapFrame->XIntHi = ContextFrame->XIntHi;
+
+ //
+ // Set integer registers s0 - s7, and s8.
+ //
+
+ TrapFrame->XIntS0 = ContextFrame->XIntS0;
+ TrapFrame->XIntS1 = ContextFrame->XIntS1;
+ TrapFrame->XIntS2 = ContextFrame->XIntS2;
+ TrapFrame->XIntS3 = ContextFrame->XIntS3;
+ TrapFrame->XIntS4 = ContextFrame->XIntS4;
+ TrapFrame->XIntS5 = ContextFrame->XIntS5;
+ TrapFrame->XIntS6 = ContextFrame->XIntS6;
+ TrapFrame->XIntS7 = ContextFrame->XIntS7;
+ TrapFrame->XIntS8 = ContextFrame->XIntS8;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set floating registers f0 - f19.
+ //
+
+ RtlMoveMemory(&TrapFrame->FltF0, &ContextFrame->FltF0,
+ sizeof(ULONG) * (20));
+
+ //
+ // Set floating registers f20 - f31.
+ //
+
+ RtlMoveMemory(&ExceptionFrame->FltF20, &ContextFrame->FltF20,
+ sizeof(ULONG) * (12));
+
+ //
+ // Set floating status register.
+ //
+
+ TrapFrame->Fsr = SANITIZE_FSR(ContextFrame->Fsr, PreviousMode);
+ }
+
+ return;
+}
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to dispatch an exception to the proper mode and
+ to cause the exception dispatcher to be called.
+
+ If the exception is a data misalignment, the previous mode is user, this
+ is the first chance for handling the exception, and the current thread
+ has enabled automatic alignment fixup, then an attempt is made to emulate
+ the unaligned reference. Data misalignment exceptions are never emulated
+ for kernel mode.
+
+ If the exception is a floating exception (N.B. the pseudo status
+ STATUS_FLOAT_STACK_CHECK is used to signify this and is converted to the
+ proper code by the floating emulation routine), then an attempt is made
+ to emulate the floating operation if it is not implemented.
+
+ If the exception is neither a data misalignment nor a floating point
+ exception and the the previous mode is kernel, then the exception
+ dispatcher is called directly to process the exception. Otherwise the
+ exception record, exception frame, and trap frame contents are copied
+ to the user mode stack. The contents of the exception frame and trap
+ are then modified such that when control is returned, execution will
+ commense in user mode in a routine which will call the exception
+ dispatcher.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ FirstChance - Supplies a boolean variable that specifies whether this
+ is the first (TRUE) or second (FALSE) time that this exception has
+ been processed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextFrame;
+ PULONG Destination;
+ EXCEPTION_RECORD ExceptionRecord1;
+ ULONG Index;
+ LONG Length;
+ PULONGLONG Source;
+ BOOLEAN UserApcPending;
+ ULONG UserStack1;
+ ULONG UserStack2;
+
+ //
+ // If the exception is an access violation, and the previous mode is
+ // user mode, then attempt to emulate a load or store operation if
+ // the exception address is at the end of a page.
+ //
+ // N.B. The following is a workaround for a r4000 chip bug where an
+ // address privilege violation is reported as a access violation
+ // on a load or store instruction that is the last instruction
+ // in a page.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
+ (((ULONG)ExceptionRecord->ExceptionAddress & 0xffc) == 0xffc) &&
+ (PreviousMode != KernelMode) &&
+ (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a data bus error, then process the error.
+ //
+ // N.B. A special exception code is used to signal a data bus error.
+ // This code is equivalent to the bug check code merged with a
+ // reserved facility code and the reserved bit set.
+ //
+ // N.B. If control returns, then it is assumed that the error has been
+ // corrected.
+ //
+
+ if (ExceptionRecord->ExceptionCode == (DATA_BUS_ERROR | 0xdfff0000)) {
+
+ //
+ // N.B. The following is a workaround for a r4000 chip bug where an
+ // address privilege violation is reported as a data bus error
+ // on a load or store instruction that is the last instruction
+ // in a page.
+ //
+
+ if ((ExceptionRecord->ExceptionInformation[1] < 0x80000000) &&
+ (((ULONG)ExceptionRecord->ExceptionAddress & 0xffc) == 0xffc) &&
+ (PreviousMode != KernelMode)) {
+ if (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ }
+
+ KiDataBusError(ExceptionRecord, ExceptionFrame, TrapFrame);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is an instruction bus error, then process the error.
+ //
+ // N.B. A special exception code is used to signal an instruction bus
+ // error. This code is equivalent to the bug check code merged
+ // with a reserved facility code and the reserved bit set.
+ //
+ // N.B. If control returns, then it is assumed that the error hand been
+ // corrected.
+ //
+
+ if (ExceptionRecord->ExceptionCode == (INSTRUCTION_BUS_ERROR | 0xdfff0000)) {
+ KiInstructionBusError(ExceptionRecord, ExceptionFrame, TrapFrame);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a data misalignment, this is the first change for
+ // handling the exception, and the current thread has enabled automatic
+ // alignment fixup, then attempt to emulate the unaligned reference.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_DATATYPE_MISALIGNMENT) &&
+ (FirstChance != FALSE) &&
+ ((KeGetCurrentThread()->AutoAlignment != FALSE) ||
+ (KeGetCurrentThread()->ApcState.Process->AutoAlignment != FALSE) ||
+ (((ExceptionRecord->ExceptionInformation[1] & 0x7fff0000) == 0x7fff0000) &&
+ (PreviousMode != KernelMode))) &&
+ (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a floating exception, then attempt to emulate the
+ // operation.
+ //
+ // N.B. The pseudo status STATUS_FLOAT_STACK_CHECK is used to signify
+ // that the exception is a floating exception and that this it the
+ // first chance for handling the exception. The floating emulation
+ // routine converts the status code to the proper floating status
+ // value.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_FLOAT_STACK_CHECK) &&
+ (KiEmulateFloating(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, PreviousMode);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a breakpoint, then translate it to an appropriate
+ // exception code if it is a division by zero or an integer overflow
+ // caused by multiplication.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) {
+ if (ExceptionRecord->ExceptionInformation[0] == DIVIDE_BREAKPOINT) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_DIVIDE_BY_ZERO;
+
+ } else if ((ExceptionRecord->ExceptionInformation[0] == MULTIPLY_BREAKPOINT) ||
+ (ExceptionRecord->ExceptionInformation[0] == OVERFLOW_BREAKPOINT)) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_OVERFLOW;
+
+ } else if (ExceptionRecord->ExceptionInformation[0] == KDDEBUG_BREAKPOINT) {
+ TrapFrame->Fir += 4;
+ }
+ }
+
+ //
+ // Move machine state from trap and exception frames to a context frame,
+ // and increment the number of exceptions dispatched.
+ //
+
+ ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+ KeGetCurrentPrcb()->KeExceptionDispatchCount += 1;
+
+ //
+ // Select the method of handling the exception based on the previous mode.
+ //
+
+ if (PreviousMode == KernelMode) {
+
+ //
+ // Previous mode was kernel.
+ //
+ // If this is the first chance, the kernel debugger is active, and
+ // the exception is a kernel breakpoint, then give the kernel debugger
+ // a chance to handle the exception.
+ //
+ // If this is the first chance and the kernel debugger is not active
+ // or does not handle the exception, then attempt to find a frame
+ // handler to handle the exception.
+ //
+ // If this is the second chance or the exception is not handled, then
+ // if the kernel debugger is active, then give the kernel debugger a
+ // second chance to handle the exception. If the kernel debugger does
+ // not handle the exception, then bug check.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a breakpoint,
+ // and the breakpoint is handled by the kernel debugger, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ KernelMode) != FALSE)) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ KernelMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (RtlDispatchException(ExceptionRecord, &ContextFrame) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if (KiDebugRoutine != NULL) {
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ TRUE)) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+
+ } else {
+
+ //
+ // Previous mode was user.
+ //
+ // If this is the first chance, the kernel debugger is active, the
+ // exception is a kernel breakpoint, and the current process is not
+ // being debugged, or the current process is being debugged, but the
+ // the breakpoint is not a kernel breakpoint instruction, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+ // If this is the first chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // transfer the exception information to the user stack, transition to
+ // user mode, and attempt to dispatch the exception to a frame based
+ // handler. If a frame based handler handles the exception, then continue
+ // execution. Otherwise, execute the raise exception system service
+ // which will call this routine a second time to process the exception.
+ //
+ // If this is the second chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // if the current process has a subsystem port, then send a message to
+ // the subsystem port and wait for a reply. If the subsystem handles the
+ // exception, then continue execution. Else terminate the thread.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a kernel
+ // breakpoint, and the current process is not being debugged,
+ // or the current process is being debugged, but the breakpoint
+ // is not a kernel breakpoint instruction, then give the kernel
+ // debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ UserMode) != FALSE) &&
+ ((PsGetCurrentProcess()->DebugPort == NULL) ||
+ ((PsGetCurrentProcess()->DebugPort != NULL) &&
+ (ExceptionRecord->ExceptionInformation[0] !=
+ KERNEL_BREAKPOINT_INSTRUCTION)))) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ UserMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, FALSE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+ }
+
+ //
+ // Transfer exception information to the user stack, transition
+ // to user mode, and attempt to dispatch the exception to a frame
+ // based handler.
+ //
+
+ repeat:
+ try {
+
+ //
+ // Coerce the 64-bit integer register context to 32-bits
+ // and store in the 32-bit context area of the context
+ // record.
+ //
+ // N.B. This only works becasue the 32- and 64-bit integer
+ // register context does not overlap in the context
+ // record.
+ //
+
+ Destination = &ContextFrame.IntZero;
+ Source = &ContextFrame.XIntZero;
+ for (Index = 0; Index < 32; Index += 1) {
+ *Destination++ = (ULONG)*Source++;
+ }
+
+ //
+ // Compute length of exception record and new aligned stack
+ // address.
+ //
+
+ Length = (sizeof(EXCEPTION_RECORD) + 7) & (~7);
+ UserStack1 = (ULONG)(ContextFrame.XIntSp & (~7)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // exception record to the user stack area.
+ //
+
+ ProbeForWrite((PCHAR)UserStack1, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack1, ExceptionRecord, Length);
+
+ //
+ // Compute length of context record and new aligned user stack
+ // pointer.
+ //
+
+ Length = sizeof(CONTEXT);
+ UserStack2 = UserStack1 - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack2, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack2, &ContextFrame, sizeof(CONTEXT));
+
+ //
+ // Set address of exception record, context record, and the
+ // and the new stack pointer in the current trap frame.
+ //
+
+ TrapFrame->XIntSp = (LONG)UserStack2;
+ TrapFrame->XIntS8 = (LONG)UserStack2;
+ TrapFrame->XIntS0 = (LONG)UserStack1;
+ TrapFrame->XIntS1 = (LONG)UserStack2;
+
+ //
+ // Sanitize the floating status register so a recursive
+ // exception will not occur.
+ //
+
+ TrapFrame->Fsr = SANITIZE_FSR(ContextFrame.Fsr, UserMode);
+
+ //
+ // Set the address of the exception routine that will call the
+ // exception dispatcher and then return to the trap handler.
+ // The trap handler will restore the exception and trap frame
+ // context and continue execution in the routine that will
+ // call the exception dispatcher.
+ //
+
+ TrapFrame->Fir = KeUserExceptionDispatcher;
+ return;
+
+ //
+ // If an exception occurs, then copy the new exception information
+ // to an exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord1,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // If the exception is a stack overflow, then attempt
+ // to raise the stack overflow exception. Otherwise,
+ // the user's stack is not accessible, or is misaligned,
+ // and second chance processing is performed.
+ //
+
+ if (ExceptionRecord1.ExceptionCode == STATUS_STACK_OVERFLOW) {
+ ExceptionRecord1.ExceptionAddress = ExceptionRecord->ExceptionAddress;
+ RtlMoveMemory((PVOID)ExceptionRecord,
+ &ExceptionRecord1, sizeof(EXCEPTION_RECORD));
+ goto repeat;
+ }
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ UserApcPending = KeGetCurrentThread()->ApcState.UserApcPending;
+ if (DbgkForwardException(ExceptionRecord, TRUE, TRUE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+
+ } else if (DbgkForwardException(ExceptionRecord, FALSE, TRUE)) {
+
+ //
+ // If a user APC was not previously pending and one is now
+ // pending, then the thread has been terminated and the PC
+ // must be forced to a legal address so an infinite loop does
+ // not occur for the case where a jump to an unmapped address
+ // occured.
+ //
+
+ if ((UserApcPending == FALSE) &&
+ (KeGetCurrentThread()->ApcState.UserApcPending != FALSE)) {
+ TrapFrame->Fir = (ULONG)USPCR;
+ }
+
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+
+ } else {
+ ZwTerminateProcess(NtCurrentProcess(), ExceptionRecord->ExceptionCode);
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+ }
+ }
+
+ //
+ // Move machine state from context frame to trap and exception frames and
+ // then return to continue execution with the restored state.
+ //
+
+Handled1:
+ KeContextToKframes(TrapFrame, ExceptionFrame, &ContextFrame,
+ ContextFrame.ContextFlags, PreviousMode);
+
+ //
+ // Exception was handled by the debugger or the associated subsystem
+ // and state was modified, if necessary, using the get state and set
+ // state capabilities. Therefore the context frame does not need to
+ // be transfered to the trap and exception frames.
+ //
+
+Handled2:
+ return;
+}
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from an exception filter to copy the exception
+ information from one exception record to another when an exception occurs.
+
+Arguments:
+
+ ExceptionRecord1 - Supplies a pointer to the destination exception record.
+
+ ExceptionRecord2 - Supplies a pointer to the source exception record.
+
+Return Value:
+
+ A value of EXCEPTION_EXECUTE_HANDLER is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Copy one exception record to another and return value that causes
+ // an exception handler to be executed.
+ //
+
+ RtlMoveMemory((PVOID)ExceptionRecord1,
+ (PVOID)ExceptionRecord2,
+ sizeof(EXCEPTION_RECORD));
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+NTSTATUS
+KeRaiseUserException(
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an exception to be raised in the calling thread's
+ usermode context. This is accomplished by editing the trap frame the
+ kernel was entered with to point to trampoline code that raises the
+ requested exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ The status value that should be returned by the caller.
+
+--*/
+
+{
+
+ PKTRAP_FRAME TrapFrame;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ TrapFrame->Fir = KeRaiseUserExceptionDispatcher;
+ return ExceptionCode;
+}
diff --git a/private/ntos/ke/mips/floatem.c b/private/ntos/ke/mips/floatem.c
new file mode 100644
index 000000000..139a89c4b
--- /dev/null
+++ b/private/ntos/ke/mips/floatem.c
@@ -0,0 +1,4599 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ floatem.c
+
+Abstract:
+
+ This module implements a software emulation of the IEEE single and
+ double floating operations. It is required on MIPS processors since
+ the hardware does not fully support all of the operations required
+ by the IEEE standard. In particular, infinitives and Nans are not
+ handled by the hardware, but rather cause an exception. On receipt
+ of the exception, a software emulation of the floating operation
+ is performed to determine the real result of the operation and if
+ an exception will actually be raised.
+
+ Since floating exceptions are rather rare events, this routine is
+ written in C. Should a higher performance implementation be required,
+ then the algorithms contained herein, can be used to guide a higher
+ performance assembly language implementation.
+
+ N.B. This routine does not emulate floating loads, floating stores,
+ control to/from floating, or move to/from floating instructions.
+ These instructions either do not fault or are emulated elsewhere.
+
+ Floating point operations are carried out by unpacking the operands,
+ normalizing denormalized numbers, checking for NaNs, interpreting
+ infinities, and computing results.
+
+ Floating operands are converted to a format that has a value with the
+ appropriate number of leading zeros, an overflow bit, the mantissa, a
+ guard bit, a round bit, and a set of sticky bits.
+
+ The overflow bit is needed for addition and is also used for multiply.
+ The mantissa is 24-bits for single operations and 53-bits for double
+ operations. The guard bit and round bit are used to hold precise values
+ for normalization and rounding.
+
+ If the result of an operation is normalized, then the guard bit becomes
+ the round bit and the round bit is accumulated with the sticky bits. If
+ the result of an operation needs to be shifted left one bit for purposes
+ of nomalization, then the guard bit becomes part of the mantissa and the
+ round bit is used for rounding.
+
+ The round bit plus the sticky bits are used to determine how rounding is
+ performed.
+
+Author:
+
+ David N. Cutler (davec) 16-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define signaling NaN mask values.
+//
+
+#define DOUBLE_SIGNAL_NAN_MASK (1 << (53 - 32))
+#define SINGLE_SIGNAL_NAN_MASK (1 << 24)
+
+//
+// Define quite NaN mask values.
+//
+
+#define DOUBLE_QUIET_NAN_MASK (1 << (51 - 32))
+#define SINGLE_QUIET_NAN_MASK (1 << 22)
+
+//
+// Define quiet NaN prefix values.
+//
+
+#define DOUBLE_QUIET_NAN_PREFIX 0x7ff00000
+#define SINGLE_QUIET_NAN_PREFIX 0x7f800000
+
+//
+// Define compare function masks.
+//
+
+#define COMPARE_UNORDERED_MASK (1 << 0)
+#define COMPARE_EQUAL_MASK (1 << 1)
+#define COMPARE_LESS_MASK (1 << 2)
+#define COMPARE_ORDERED_MASK (1 << 3)
+
+//
+// Define context block structure.
+//
+
+typedef struct _FP_CONTEXT_BLOCK {
+ ULONG Fd;
+ ULONG BranchAddress;
+ PEXCEPTION_RECORD ExceptionRecord;
+ PKEXCEPTION_FRAME ExceptionFrame;
+ PKTRAP_FRAME TrapFrame;
+ ULONG Round;
+} FP_CONTEXT_BLOCK, *PFP_CONTEXT_BLOCK;
+
+//
+// Define single and double operand value structures.
+//
+
+typedef struct _FP_DOUBLE_OPERAND {
+ union {
+ struct {
+ ULONG MantissaLow;
+ LONG MantissaHigh;
+ };
+
+ LONGLONG Mantissa;
+ };
+
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+} FP_DOUBLE_OPERAND, *PFP_DOUBLE_OPERAND;
+
+typedef struct _FP_SINGLE_OPERAND {
+ LONG Mantissa;
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+} FP_SINGLE_OPERAND, *PFP_SINGLE_OPERAND;
+
+//
+// Define forward referenced function protypes.
+//
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidCompareSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ );
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ );
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ );
+
+BOOLEAN
+KiNormalizeLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ );
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ );
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ );
+
+ULONG
+KiSquareRootDouble (
+ IN PULARGE_INTEGER DoubleValue
+ );
+
+ULONG
+KiSquareRootSingle (
+ IN PULONG SingleValue
+ );
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ );
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ );
+
+BOOLEAN
+KiEmulateFloating (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate a floating operation and convert the
+ exception status to the proper value. If the exception is an unimplemented
+ operation, then the operation is emulated. Otherwise, the status code is
+ just converted to its proper value.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the floating exception is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULARGE_INTEGER AhighBhigh;
+ ULARGE_INTEGER AhighBlow;
+ ULARGE_INTEGER AlowBhigh;
+ ULARGE_INTEGER AlowBlow;
+ ULONG Carry1;
+ ULONG Carry2;
+ BOOLEAN CompareEqual;
+ ULONG CompareFunction;
+ BOOLEAN CompareLess;
+ FP_CONTEXT_BLOCK ContextBlock;
+ LARGE_INTEGER DoubleDividend;
+ LARGE_INTEGER DoubleDivisor;
+ ULARGE_INTEGER DoubleValue;
+ ULONG DoubleMantissaLow;
+ LONG DoubleMantissaHigh;
+ FP_DOUBLE_OPERAND DoubleOperand1;
+ FP_DOUBLE_OPERAND DoubleOperand2;
+ FP_DOUBLE_OPERAND DoubleOperand3;
+ LARGE_INTEGER DoubleQuotient;
+ PVOID ExceptionAddress;
+ ULONG ExponentDifference;
+ ULONG ExponentSum;
+ ULONG Format;
+ ULONG Fs;
+ ULONG Ft;
+ ULONG Function;
+ ULONG Index;
+ MIPS_INSTRUCTION Instruction;
+ ULARGE_INTEGER LargeResult;
+ LONG Longword;
+ LONG Negation;
+ union {
+ LONGLONG Quadword;
+ LARGE_INTEGER LargeValue;
+ }u;
+
+ LONG SingleMantissa;
+ FP_SINGLE_OPERAND SingleOperand1;
+ FP_SINGLE_OPERAND SingleOperand2;
+ FP_SINGLE_OPERAND SingleOperand3;
+ ULONG SingleValue;
+ ULONG StickyBits;
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // floating operation causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // If the exception PC is equal to the fault instruction address
+ // plus four, then the floating exception occurred in the delay
+ // slot of a branch instruction and the continuation address must
+ // be computed by emulating the branch instruction. Note that it
+ // is possible for an exception to occur when the branch instruction
+ // is read from user memory.
+ //
+
+ if ((TrapFrame->Fir + 4) == (ULONG)ExceptionRecord->ExceptionAddress) {
+ ContextBlock.BranchAddress = KiEmulateBranch(ExceptionFrame,
+ TrapFrame);
+
+ } else {
+ ContextBlock.BranchAddress = TrapFrame->Fir + 4;
+ }
+
+ //
+ // Increment the floating emulation count.
+ //
+
+ KeGetCurrentPrcb()->KeFloatingEmulationCount += 1;
+
+ //
+ // Initialize the address of the exception record, exception frame,
+ // and trap frame in the context block used during the emulation of
+ // the floating point operation.
+ //
+
+ ContextBlock.ExceptionRecord = ExceptionRecord;
+ ContextBlock.ExceptionFrame = ExceptionFrame;
+ ContextBlock.TrapFrame = TrapFrame;
+ ContextBlock.Round = ((PFSR)&TrapFrame->Fsr)->RM;
+
+ //
+ // Initialize the number of exception information parameters, set
+ // the branch address, and clear the IEEE exception value.
+ //
+
+ ExceptionRecord->NumberParameters = 6;
+ ExceptionRecord->ExceptionInformation[0] = 0;
+ ExceptionRecord->ExceptionInformation[1] = ContextBlock.BranchAddress;
+ ExceptionRecord->ExceptionInformation[2] = 0;
+ ExceptionRecord->ExceptionInformation[3] = 0;
+ ExceptionRecord->ExceptionInformation[4] = 0;
+ ExceptionRecord->ExceptionInformation[5] = 0;
+
+ //
+ // Clear all exception flags and emulate the floating point operation
+ // The return value is dependent on the results of the emulation.
+ //
+
+ TrapFrame->Fsr &= ~(0x3f << 12);
+ Instruction = *((PMIPS_INSTRUCTION)ExceptionRecord->ExceptionAddress);
+ Function = Instruction.c_format.Function;
+ ContextBlock.Fd = Instruction.c_format.Fd;
+ Fs = Instruction.c_format.Fs;
+ Ft = Instruction.c_format.Ft;
+ Format = Instruction.c_format.Format;
+ Negation = 0;
+
+ //
+ // Check for illegal register specification or format code.
+ //
+
+ if (((ContextBlock.Fd & 0x1) != 0) || ((Fs & 0x1) != 0) || ((Ft & 0x1) != 0) ||
+ ((Format != FORMAT_LONGWORD) && (Format != FORMAT_QUADWORD) && (Format > FORMAT_DOUBLE))) {
+ Function = FLOAT_ILLEGAL;
+ }
+
+ //
+ // Decode operand values and dispose with NaNs.
+ //
+
+ if ((Function <= FLOAT_DIVIDE) || (Function >= FLOAT_COMPARE)) {
+
+ //
+ // The function has two operand values.
+ //
+
+ if (Format == FORMAT_SINGLE) {
+ KiUnpackSingle(Fs, &ContextBlock, &SingleOperand1);
+ KiUnpackSingle(Ft, &ContextBlock, &SingleOperand2);
+
+ //
+ // If either operand is a NaN, then check to determine if a
+ // compare instruction or other dyadic operation is being
+ // performed.
+ //
+
+ if ((SingleOperand1.Nan != FALSE) || (SingleOperand2.Nan != FALSE)) {
+ if (Function < FLOAT_COMPARE) {
+
+ //
+ // Dyadic operation.
+ //
+ // Store a quite Nan if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signally NaN.
+ //
+
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else {
+
+ //
+ // Compare operation.
+ //
+ // Set the condition based on the predicate of
+ // the floating comparison.
+ //
+ // If the compare is a signaling compare, then
+ // raise an exception if the invalid operation
+ // trap is enabled. Otherwise, raise an exception
+ // if one of the operands is a signaling NaN.
+ //
+
+ if ((Function & COMPARE_UNORDERED_MASK) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ if ((Function & COMPARE_ORDERED_MASK) != 0) {
+ return KiInvalidCompareSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else {
+ return KiInvalidCompareSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+ }
+
+ } else if (Function >= FLOAT_COMPARE) {
+ CompareFunction = Function;
+ Function = FLOAT_COMPARE_SINGLE;
+ }
+
+ } else if (Format == FORMAT_DOUBLE) {
+ KiUnpackDouble(Fs, &ContextBlock, &DoubleOperand1);
+ KiUnpackDouble(Ft, &ContextBlock, &DoubleOperand2);
+
+ //
+ // If either operand is a NaN, then check to determine if a
+ // compare instruction or other dyadic operation is being
+ // performed.
+ //
+
+ if ((DoubleOperand1.Nan != FALSE) || (DoubleOperand2.Nan != FALSE)) {
+ if (Function < FLOAT_COMPARE) {
+
+ //
+ // Dyadic operation.
+ //
+ // Store a quite Nan if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signally NaN.
+ //
+
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else {
+
+ //
+ // Compare operation.
+ //
+ // Set the condition based on the predicate of
+ // the floating comparison.
+ //
+ // If the compare is a signaling compare, then
+ // raise an exception if the invalid operation
+ // trap is enabled. Othersie, raise an exception
+ // if one of the operands is a signaling NaN.
+ //
+
+ if ((Function & COMPARE_UNORDERED_MASK) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ if ((Function & COMPARE_ORDERED_MASK) != 0) {
+ return KiInvalidCompareDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else {
+ return KiInvalidCompareDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+ }
+
+ } else if (Function >= FLOAT_COMPARE) {
+ CompareFunction = Function;
+ Function = FLOAT_COMPARE_DOUBLE;
+ }
+
+ } else {
+ Function = FLOAT_ILLEGAL;
+ }
+
+ } else {
+
+ //
+ // The function has one operand value.
+ //
+
+ if (Format == FORMAT_SINGLE) {
+ KiUnpackSingle(Fs, &ContextBlock, &SingleOperand1);
+
+ //
+ // If the operand is a NaN and the function is not a convert
+ // operation, then store a quiet NaN if the invalid operation
+ // trap is disabled, or raise an exception if the invalid
+ // operation trap is enabled and the operand is a signaling
+ // NaN.
+ //
+
+ if ((SingleOperand1.Nan != FALSE) &&
+ (Function < FLOAT_ROUND_QUADWORD) ||
+ (Function > FLOAT_CONVERT_QUADWORD) ||
+ ((Function > FLOAT_FLOOR_LONGWORD) &&
+ (Function < FLOAT_CONVERT_SINGLE))) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand1);
+
+ }
+
+ } else if (Format == FORMAT_DOUBLE) {
+ KiUnpackDouble(Fs, &ContextBlock, &DoubleOperand1);
+
+ //
+ // If the operand is a NaN and the function is not a convert
+ // operation, then store a quiet NaN if the invalid operation
+ // trap is disabled, or raise an exception if the invalid
+ // operation trap is enabled and the operand is a signaling
+ // NaN.
+ //
+
+ if ((DoubleOperand1.Nan != FALSE) &&
+ (Function < FLOAT_ROUND_QUADWORD) ||
+ (Function > FLOAT_CONVERT_QUADWORD) ||
+ ((Function > FLOAT_FLOOR_LONGWORD) &&
+ (Function < FLOAT_CONVERT_SINGLE))) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+ }
+
+ } else if ((Format == FORMAT_LONGWORD) &&
+ (Function >= FLOAT_CONVERT_SINGLE)) {
+ Longword = KiGetRegisterValue(Fs + 32,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ } else if ((Format == FORMAT_QUADWORD) &&
+ (Function >= FLOAT_CONVERT_SINGLE)) {
+ u.LargeValue.LowPart = KiGetRegisterValue(Fs + 32,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ u.LargeValue.HighPart = KiGetRegisterValue(Fs + 33,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ } else {
+ Function = FLOAT_ILLEGAL;
+ }
+ }
+
+ //
+ // Case to the proper function routine to emulate the operation.
+ //
+
+ switch (Function) {
+
+ //
+ // Floating subtract operation.
+ //
+ // Floating subtract is accomplished by complementing the sign
+ // of the second operand and then performing an add operation.
+ //
+
+ case FLOAT_SUBTRACT:
+ Negation = 0x1;
+
+ //
+ // Floating add operation.
+ //
+ // Floating add is accomplished using signed magnitude addition.
+ //
+ // The exponent difference is calculated and the smaller number
+ // is right shifted by the specified amount, but no more than
+ // the width of the operand values (i.e., 26 for single and 55
+ // for double). The shifted out value is saved for rounding.
+ //
+ // If the signs of the two operands are the same, then they
+ // are added together after having performed the alignment
+ // shift.
+ //
+ // If the signs of the two operands are different, then the
+ // sign of the result is the sign of the larger operand and
+ // the smaller operand is subtracted from the larger operand.
+ // In order to avoid making a double level test (i.e., one on
+ // the exponents, and one on the mantissas if the exponents
+ // are equal), it is posible that the result of the subtract
+ // could be negative (if the exponents are equal). If this
+ // occurs, then the result sign and mantissa are complemented
+ // to obtain the correct result.
+ //
+
+ case FLOAT_ADD:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ SingleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder then operands according to their exponent value.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 26 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ SingleOperand1.Exponent - SingleOperand2.Exponent;
+
+ if (ExponentDifference > 26) {
+ ExponentDifference = 26;
+ }
+
+ StickyBits =
+ SingleOperand2.Mantissa & ((1 << ExponentDifference) - 1);
+ SingleMantissa = SingleOperand2.Mantissa >> ExponentDifference;
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, perform
+ // the operation by subtracting the second operand from the
+ // first operand.
+ //
+
+ if ((SingleOperand1.Sign ^ SingleOperand2.Sign) == 0) {
+ SingleOperand1.Mantissa += SingleMantissa;
+
+ } else {
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else if (SingleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ SingleOperand1.Mantissa -= 1;
+ }
+
+ SingleOperand1.Mantissa -= SingleMantissa;
+ if (SingleOperand1.Mantissa < 0) {
+ SingleOperand1.Mantissa = -SingleOperand1.Mantissa;
+ SingleOperand1.Sign ^= 0x1;
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ DoubleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder then operands according to their exponent value.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 55 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ DoubleOperand1.Exponent - DoubleOperand2.Exponent;
+
+ if (ExponentDifference > 55) {
+ ExponentDifference = 55;
+ }
+
+ if (ExponentDifference >= 32) {
+ ExponentDifference -= 32;
+ StickyBits = (DoubleOperand2.MantissaLow) |
+ (DoubleOperand2.MantissaHigh & ((1 << ExponentDifference) - 1));
+
+ DoubleMantissaLow =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ DoubleMantissaHigh = 0;
+
+ } else if (ExponentDifference > 0) {
+ StickyBits =
+ DoubleOperand2.MantissaLow & ((1 << ExponentDifference) - 1);
+
+ DoubleMantissaLow =
+ (DoubleOperand2.MantissaLow >> ExponentDifference) |
+ (DoubleOperand2.MantissaHigh << (32 - ExponentDifference));
+
+ DoubleMantissaHigh =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ } else {
+ StickyBits = 0;
+ DoubleMantissaLow = DoubleOperand2.MantissaLow;
+ DoubleMantissaHigh = DoubleOperand2.MantissaHigh;
+ }
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, perform
+ // the operation by subtracting the second operand from the
+ // first operand.
+ //
+
+ if ((DoubleOperand1.Sign ^ DoubleOperand2.Sign) == 0) {
+ DoubleOperand1.MantissaLow += DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh += DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh += 1;
+ }
+
+ } else {
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else if (DoubleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ if (DoubleOperand1.MantissaLow < 1) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= 1;
+ }
+
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh -= DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaHigh < 0) {
+ DoubleOperand1.MantissaLow = ~DoubleOperand1.MantissaLow + 1;
+ DoubleOperand1.MantissaHigh = -DoubleOperand1.MantissaHigh;
+ if (DoubleOperand1.MantissaLow != 0) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.Sign ^= 0x1;
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating multiply operation.
+ //
+ // Floating multiply is accomplished using unsigned multiplies
+ // of the mantissa values, and adding the parital results together
+ // to form the total product.
+ //
+ // The two mantissa values are preshifted such that the final
+ // result is properly aligned.
+ //
+
+ case FLOAT_MULTIPLY:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Reorder the operands according to their exponent value.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 64-bit value and then unsigned multiply
+ // the two mantissa values. The single result is the high part
+ // of the 64-bit product and the sticky bits are the low part
+ // of the 64-bit product.
+ //
+
+ LargeResult.QuadPart = UInt32x32To64(SingleOperand1.Mantissa << (32 - 26),
+ SingleOperand2.Mantissa << 1);
+
+ SingleOperand1.Mantissa = LargeResult.HighPart;
+ StickyBits = LargeResult.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent +=
+ SingleOperand2.Exponent - SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Reorder the operands according to their exponent value.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 128-bit value and then unsigned multiply
+ // the two mantissa values. The double result is the high part
+ // of the 128-bit product and the sticky bits are the low part
+ // of the 128-bit product.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ (DoubleOperand1.MantissaHigh << 1) |
+ (DoubleOperand1.MantissaLow >> 31);
+
+ DoubleOperand1.MantissaLow <<= 1;
+ DoubleOperand2.MantissaHigh =
+ (DoubleOperand2.MantissaHigh << (64 - 55)) |
+ (DoubleOperand2.MantissaLow >> (32 - (64 -55)));
+
+ DoubleOperand2.MantissaLow <<= (64 - 55);
+
+ //
+ // The 128-bit product is formed by mutiplying and adding
+ // all the cross product values.
+ //
+ // Consider the operands (A and B) as being composed of two
+ // parts Ahigh, Alow, Bhigh, and Blow. The cross product sum
+ // is then:
+ //
+ // Ahigh * Bhigh * 2^64 +
+ // Ahigh * Blow * 2^32 +
+ // Alow * Bhigh * 2^32 +
+ // Alow * Blow
+ //
+
+ AhighBhigh.QuadPart = UInt32x32To64(DoubleOperand1.MantissaHigh,
+ DoubleOperand2.MantissaHigh);
+
+ AhighBlow.QuadPart = UInt32x32To64(DoubleOperand1.MantissaHigh,
+ DoubleOperand2.MantissaLow);
+
+ AlowBhigh.QuadPart = UInt32x32To64(DoubleOperand1.MantissaLow,
+ DoubleOperand2.MantissaHigh);
+
+ AlowBlow.QuadPart = UInt32x32To64(DoubleOperand1.MantissaLow,
+ DoubleOperand2.MantissaLow);
+
+ AlowBlow.HighPart += AhighBlow.LowPart;
+ if (AlowBlow.HighPart < AhighBlow.LowPart) {
+ Carry1 = 1;
+
+ } else {
+ Carry1 = 0;
+ }
+
+ AlowBlow.HighPart += AlowBhigh.LowPart;
+ if (AlowBlow.HighPart < AlowBhigh.LowPart) {
+ Carry1 += 1;
+ }
+
+ DoubleOperand1.MantissaLow = AhighBlow.HighPart + Carry1;
+ if (DoubleOperand1.MantissaLow < Carry1) {
+ Carry2 = 1;
+
+ } else {
+ Carry2 = 0;
+ }
+
+ DoubleOperand1.MantissaLow += AlowBhigh.HighPart;
+ if (DoubleOperand1.MantissaLow < AlowBhigh.HighPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaLow += AhighBhigh.LowPart;
+ if (DoubleOperand1.MantissaLow < AhighBhigh.LowPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaHigh = AhighBhigh.HighPart + Carry2;
+ StickyBits = AlowBlow.HighPart | AlowBlow.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent +=
+ DoubleOperand2.Exponent - DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating divide operation.
+ //
+ // Floating division is accomplished by repeated subtract using
+ // a single one-bit-at-a-time algorithm. The number of division
+ // steps performed is equal to the mantissa size plus one guard
+ // bit.
+ //
+ // The sticky bits are the remainder after the specified number
+ // of division steps.
+ //
+
+ case FLOAT_DIVIDE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) ||
+ ((SingleOperand1.Infinity == FALSE) &&
+ (SingleOperand1.Mantissa == 0) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0))) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiDivideByZeroSingle(&ContextBlock,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (SingleOperand1.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (SingleOperand2.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent = 0;
+ SingleOperand1.Mantissa = 0;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 26 iterations.
+ //
+
+ SingleOperand3.Mantissa = 0;
+ for (Index = 0; Index < 26; Index += 1) {
+ SingleOperand3.Mantissa <<=1;
+ if (SingleOperand1.Mantissa >= SingleOperand2.Mantissa) {
+ SingleOperand1.Mantissa -= SingleOperand2.Mantissa;
+ SingleOperand3.Mantissa |= 1;
+ }
+
+ SingleOperand1.Mantissa <<= 1;
+ }
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand3.Sign = SingleOperand1.Sign ^ SingleOperand2.Sign;
+ SingleOperand3.Exponent = SingleOperand1.Exponent -
+ SingleOperand2.Exponent + SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ SingleOperand3.Infinity = FALSE;
+ SingleOperand3.Nan = FALSE;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand3,
+ SingleOperand1.Mantissa);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) ||
+ ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0))) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiDivideByZeroDouble(&ContextBlock,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (DoubleOperand1.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (DoubleOperand2.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent = 0;
+ DoubleOperand1.MantissaHigh = 0;
+ DoubleOperand1.MantissaLow = 0;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 55 iterations.
+ //
+
+ DoubleDividend.LowPart = DoubleOperand1.MantissaLow;
+ DoubleDividend.HighPart = DoubleOperand1.MantissaHigh;
+ DoubleDivisor.LowPart = DoubleOperand2.MantissaLow;
+ DoubleDivisor.HighPart = DoubleOperand2.MantissaHigh;
+ DoubleQuotient.LowPart = 0;
+ DoubleQuotient.HighPart = 0;
+ for (Index = 0; Index < 55; Index += 1) {
+ DoubleQuotient.HighPart =
+ (DoubleQuotient.HighPart << 1) |
+ DoubleQuotient.LowPart >> 31;
+
+ DoubleQuotient.LowPart <<= 1;
+ if (DoubleDividend.QuadPart >= DoubleDivisor.QuadPart) {
+ DoubleDividend.QuadPart -= DoubleDivisor.QuadPart;
+ DoubleQuotient.LowPart |= 1;
+ }
+
+ DoubleDividend.HighPart =
+ (DoubleDividend.HighPart << 1) |
+ DoubleDividend.LowPart >> 31;
+
+ DoubleDividend.LowPart <<= 1;
+ }
+
+ DoubleOperand3.MantissaLow = DoubleQuotient.LowPart;
+ DoubleOperand3.MantissaHigh = DoubleQuotient.HighPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand3.Sign = DoubleOperand1.Sign ^ DoubleOperand2.Sign;
+ DoubleOperand3.Exponent = DoubleOperand1.Exponent -
+ DoubleOperand2.Exponent + DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ DoubleOperand3.Infinity = FALSE;
+ DoubleOperand3.Nan = FALSE;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand3,
+ DoubleDividend.LowPart | DoubleDividend.HighPart);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating square root.
+ //
+
+ case FLOAT_SQUARE_ROOT:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is plus infinity, then the result is
+ // plus infinity, or if the operand is plus or minus
+ // zero, then the result is plus or minus zero.
+ //
+
+ if (((SingleOperand1.Sign == 0) &&
+ (SingleOperand1.Infinity != FALSE)) ||
+ (SingleOperand1.Mantissa == 0)) {
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+ }
+
+ //
+ // If the operand is negative, then the operation is
+ // invalid.
+ //
+
+ if (SingleOperand1.Sign != 0) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand1);
+ }
+
+ //
+ // The only case remaining that could cause an exception
+ // is a denomalized source value. The square root of a
+ // denormalized value is computed by:
+ //
+ // 1. Converting the value to a normalized value with
+ // an exponent equal to the denormalization shift count
+ // plus the bias of the exponent plus one.
+ //
+ // 2. Computing the square root of the value and unpacking
+ // the result.
+ //
+ // 3. Converting the shift count back to a normalization
+ // shift count.
+ //
+ // 4. Rounding and packing the resultant value.
+ //
+ // N.B. The square root of all denormalized number is a
+ // normalized number.
+ //
+
+ SingleOperand1.Exponent = (SINGLE_EXPONENT_BIAS + 1 +
+ SingleOperand1.Exponent) << 23;
+
+ SingleValue = (SingleOperand1.Mantissa & ~(1 << 25)) >> 2;
+ SingleValue |= SingleOperand1.Exponent;
+ StickyBits = KiSquareRootSingle(&SingleValue);
+ SingleOperand1.Exponent = (SingleValue >> 23) -
+ ((SINGLE_EXPONENT_BIAS + 1) / 2);
+
+ SingleOperand1.Mantissa = ((SingleValue &
+ 0x7fffff) | 0x800000) << 2;
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is plus infinity, then the result is
+ // plus infinity, or if the operand is plus or minus
+ // zero, then the result is plus or minus zero.
+ //
+
+ if (((DoubleOperand1.Sign == 0) &&
+ (DoubleOperand1.Infinity != FALSE)) ||
+ (DoubleOperand1.MantissaHigh == 0)) {
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+ }
+
+ //
+ // If the operand is negative, then the operation is
+ // invalid.
+ //
+
+ if (DoubleOperand1.Sign != 0) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+ }
+
+ //
+ // The only case remaining that could cause an exception
+ // is a denomalized source value. The square root of a
+ // denormalized value is computed by:
+ //
+ // 1. Converting the value to a normalized value with
+ // an exponent equal to the denormalization shift count
+ // plus the bias of the exponent plus one.
+ //
+ // 2. Computing the square root of the value and unpacking
+ // the result.
+ //
+ // 3. Converting the shift count back to a normalization
+ // shift count.
+ //
+ // 4. Rounding and packing the resultant value.
+ //
+ // N.B. The square root of all denormalized numbers is a
+ // normalized number.
+ //
+
+ DoubleOperand1.Exponent = (DOUBLE_EXPONENT_BIAS + 1 +
+ DoubleOperand1.Exponent) << 20;
+
+ DoubleValue.HighPart = (DoubleOperand1.MantissaHigh & ~(1 << 22)) >> 2;
+ DoubleValue.LowPart = (DoubleOperand1.MantissaHigh << 30) |
+ (DoubleOperand1.MantissaLow >> 2);
+
+ DoubleValue.HighPart |= DoubleOperand1.Exponent;
+ StickyBits = KiSquareRootDouble(&DoubleValue);
+ DoubleOperand1.Exponent = (DoubleValue.HighPart >> 20) -
+ ((DOUBLE_EXPONENT_BIAS + 1) / 2);
+
+ DoubleOperand1.MantissaLow = DoubleValue.LowPart << 2;
+ DoubleOperand1.MantissaHigh = ((DoubleValue.HighPart &
+ 0xfffff) | 0x100000) << 2;
+
+ DoubleOperand1.MantissaHigh |= (DoubleValue.LowPart >> 30);
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating absolute operation.
+ //
+ // Floating absolute is accomplished by clearing the sign
+ // of the floating value.
+ //
+
+ case FLOAT_ABSOLUTE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Clear the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ SingleOperand1.Sign = 0;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Clear the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ DoubleOperand1.Sign = 0;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating move operation.
+ //
+ // Floating move is accomplished by moving the source operand
+ // to the destination register.
+ //
+
+ case FLOAT_MOVE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Normalize the result and store in the destination
+ // register.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Normalize the result and store in the destination
+ // register.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating negate operation.
+ //
+ // Floating absolute is accomplished by complementing the sign
+ // of the floating value.
+ //
+
+ case FLOAT_NEGATE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Complement the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ SingleOperand1.Sign ^= 0x1;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Complement the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ DoubleOperand1.Sign ^= 0x1;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating compare single.
+ //
+ // This operation is performed after having separated out NaNs,
+ // and therefore the only comparison predicates left are equal
+ // and less.
+ //
+ // Floating compare single is accomplished by comparing signs,
+ // then exponents, and finally the mantissa if necessary.
+ //
+ // N.B. The sign of zero is ignorned.
+ //
+
+ case FLOAT_COMPARE_SINGLE:
+
+ //
+ // If either operand is zero, then set the sign of the operand
+ // positive.
+ //
+
+ if ((SingleOperand1.Infinity == FALSE) &&
+ (SingleOperand1.Mantissa == 0)) {
+ SingleOperand1.Sign = 0;
+ SingleOperand1.Exponent = - 23;
+ }
+
+ if ((SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ SingleOperand2.Sign = 0;
+ SingleOperand2.Exponent = - 23;
+ }
+
+ //
+ // Compare signs first.
+ //
+
+ if (SingleOperand1.Sign < SingleOperand2.Sign) {
+
+ //
+ // The first operand is greater than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Sign > SingleOperand2.Sign) {
+
+ //
+ // The first operand is less than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+
+ //
+ // The operand signs are equal.
+ //
+ // If the sign of the operand is negative, then the sense of
+ // the comparison is reversed.
+ //
+
+ if (SingleOperand1.Sign == 0) {
+
+ //
+ // Compare positive operand with positive operand.
+ //
+
+ if (SingleOperand1.Exponent > SingleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Exponent < SingleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (SingleOperand1.Mantissa > SingleOperand2.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Mantissa < SingleOperand2.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+
+ } else {
+
+ //
+ // Compare negative operand with negative operand.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand2.Exponent < SingleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (SingleOperand2.Mantissa > SingleOperand1.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand2.Mantissa < SingleOperand1.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+ }
+
+ //
+ // Form the condition code using the comparison information
+ // and the compare function predicate bits.
+ //
+
+ if (((CompareLess != FALSE) &&
+ ((CompareFunction & COMPARE_LESS_MASK) != 0)) ||
+ ((CompareEqual != FALSE) &&
+ ((CompareFunction & COMPARE_EQUAL_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ TrapFrame->Fir = ContextBlock.BranchAddress;
+ return TRUE;
+
+ //
+ // Floating compare double.
+ //
+ // This operation is performed after having separated out NaNs,
+ // and therefore the only comparison predicates left are equal
+ // and less.
+ //
+ // Floating compare double is accomplished by comparing signs,
+ // then exponents, and finally the mantissa if necessary.
+ //
+ // N.B. The sign of zero is ignorned.
+ //
+
+ case FLOAT_COMPARE_DOUBLE:
+
+ //
+ // If either operand is zero, then set the sign of the operand
+ // positive.
+ //
+
+ if ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0)) {
+ DoubleOperand1.Sign = 0;
+ DoubleOperand1.Exponent = - 52;
+ }
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ DoubleOperand2.Sign = 0;
+ DoubleOperand2.Exponent = - 52;
+ }
+
+ //
+ // Compare signs first.
+ //
+
+ if (DoubleOperand1.Sign < DoubleOperand2.Sign) {
+
+ //
+ // The first operand is greater than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Sign > DoubleOperand2.Sign) {
+
+ //
+ // The first operand is less than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+
+ //
+ // The operand signs are equal.
+ //
+ // If the sign of the operand is negative, then the sense of
+ // the comparison is reversed.
+ //
+
+ if (DoubleOperand1.Sign == 0) {
+
+ //
+ // Compare positive operand with positive operand.
+ //
+
+ if (DoubleOperand1.Exponent > DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Exponent < DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaHigh >
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaHigh <
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaLow >
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaLow <
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+
+ } else {
+
+ //
+ // Compare negative operand with negative operand.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.Exponent < DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaHigh >
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaHigh <
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaLow >
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaLow <
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Form the condition code using the comparison information
+ // and the compare function predicate bits.
+ //
+
+ if (((CompareLess != FALSE) &&
+ ((CompareFunction & COMPARE_LESS_MASK) != 0)) ||
+ ((CompareEqual != FALSE) &&
+ ((CompareFunction & COMPARE_EQUAL_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ TrapFrame->Fir = ContextBlock.BranchAddress;
+ return TRUE;
+
+ //
+ // Floating convert to single.
+ //
+ // This operation is only legal for conversion from quadword,
+ // longword, and double formats to single format. This operation
+ // can not be used to convert from a single format to a single format.
+ //
+ // Floating conversion to single is accompished by forming a
+ // single floating operand and then normalize and storing the
+ // result value.
+ //
+
+ case FLOAT_CONVERT_SINGLE:
+ if (Format == FORMAT_SINGLE) {
+ break;
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (DoubleOperand1.Nan != FALSE) {
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ SingleOperand1.Exponent = SINGLE_MAXIMUM_EXPONENT;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = TRUE;
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand1);
+
+ }
+
+ //
+ // Transform the double operand to single format.
+ //
+
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ StickyBits = DoubleOperand1.MantissaLow << (26 - (55 - 32));
+ SingleOperand1.Exponent = DoubleOperand1.Exponent +
+ SINGLE_EXPONENT_BIAS - DOUBLE_EXPONENT_BIAS;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = DoubleOperand1.Infinity;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_LONGWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Longword < 0) {
+ SingleOperand1.Sign = 0x1;
+ Longword = -Longword;
+
+ } else {
+ SingleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the longword
+ // value.
+ //
+
+ if (Longword != 0) {
+ SingleOperand1.Exponent = SINGLE_EXPONENT_BIAS + 31;
+ while (Longword > 0) {
+ Longword <<= 1;
+ SingleOperand1.Exponent -= 1;
+ }
+
+ SingleOperand1.Mantissa = (ULONG)Longword >> (32 - 26);
+ StickyBits = Longword << 26;
+
+ } else {
+ SingleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ SingleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_QUADWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (u.Quadword < 0) {
+ SingleOperand1.Sign = 0x1;
+ u.Quadword = -u.Quadword;
+
+ } else {
+ SingleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (u.Quadword != 0) {
+ SingleOperand1.Exponent = SINGLE_EXPONENT_BIAS + 63;
+ while (u.Quadword > 0) {
+ u.Quadword <<= 1;
+ SingleOperand1.Exponent -= 1;
+ }
+
+ SingleOperand1.Mantissa = (LONG)((ULONGLONG)u.Quadword >> (64 - 26));
+ StickyBits = (u.Quadword << 26) ? 1 : 0;
+
+ } else {
+ SingleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ SingleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to double.
+ //
+ // This operation is only legal for conversion from quadword,
+ // longword, and single formats to double format. This operation
+ // cannot be used to convert from a double format to a double
+ // format.
+ //
+ // Floating conversion to double is accomplished by forming
+ // double floating operand and then normalizing and storing
+ // the result value.
+ //
+
+ case FLOAT_CONVERT_DOUBLE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (SingleOperand1.Nan != FALSE) {
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow = (0xffffffff >> (26 - 2 - (55 - 32))) |
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = DOUBLE_MAXIMUM_EXPONENT;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = TRUE;
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = SingleOperand1.Infinity;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+ break;
+
+ } else if (Format == FORMAT_LONGWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Longword < 0) {
+ DoubleOperand1.Sign = 0x1;
+ Longword = -Longword;
+
+ } else {
+ DoubleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the longword
+ // value.
+ //
+
+ if (Longword != 0) {
+ SingleOperand1.Exponent = DOUBLE_EXPONENT_BIAS + 31;
+ while (Longword > 0) {
+ Longword <<= 1;
+ DoubleOperand1.Exponent -= 1;
+ }
+
+ DoubleOperand1.Mantissa = (ULONGLONG)Longword >> (64 - 55);
+
+ } else {
+ DoubleOperand1.Mantissa = 0;
+ DoubleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_QUADWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (u.Quadword < 0) {
+ DoubleOperand1.Sign = 0x1;
+ u.Quadword = -u.Quadword;
+
+ } else {
+ DoubleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (u.Quadword != 0) {
+ DoubleOperand1.Exponent = DOUBLE_EXPONENT_BIAS + 63;
+ while (u.Quadword > 0) {
+ u.Quadword <<= 1;
+ DoubleOperand1.Exponent -= 1;
+ }
+
+ DoubleOperand1.Mantissa = (ULONGLONG)u.Quadword >> (64 - 55);
+ StickyBits = (u.Quadword << 55) ? 1 : 0;
+
+ } else {
+ DoubleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ DoubleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to quadword.
+ //
+ // This operation is only legal for conversion from double
+ // and single formats to quadword format. This operation
+ // cannot be used to convert from a quadword format to a
+ // longword or quadword format.
+ //
+ // Floating conversion to quadword is accomplished by forming
+ // a quadword value from a single or double floating value.
+ //
+ // There is one general conversion operation and four directed
+ // rounding operations.
+ //
+
+ case FLOAT_ROUND_QUADWORD:
+ ContextBlock.Round = ROUND_TO_NEAREST;
+ goto ConvertQuadword;
+
+ case FLOAT_TRUNC_QUADWORD:
+ ContextBlock.Round = ROUND_TO_ZERO;
+ goto ConvertQuadword;
+
+ case FLOAT_CEIL_QUADWORD:
+ ContextBlock.Round = ROUND_TO_PLUS_INFINITY;
+ goto ConvertQuadword;
+
+ case FLOAT_FLOOR_QUADWORD:
+ ContextBlock.Round = ROUND_TO_MINUS_INFINITY;
+ goto ConvertQuadword;
+
+ case FLOAT_CONVERT_QUADWORD:
+ ConvertQuadword:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) ||
+ (SingleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationQuadword(&ContextBlock,
+ SingleOperand1.Infinity,
+ SingleOperand1.Sign);
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.Mantissa = (LONGLONG)SingleOperand1.Mantissa << (55 - 26);
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Convert double to quadword and store the result value.
+ //
+
+ return KiNormalizeQuadword(&ContextBlock, &DoubleOperand1);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) ||
+ (DoubleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationQuadword(&ContextBlock,
+ DoubleOperand1.Infinity,
+ DoubleOperand1.Sign);
+ }
+
+ //
+ // Convert double to quadword and store the result value.
+ //
+
+ return KiNormalizeQuadword(&ContextBlock, &DoubleOperand1);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to longword.
+ //
+ // This operation is only legal for conversion from double
+ // and single formats to longword format. This operation
+ // cannot be used to convert from a longword format to a
+ // longword format.
+ //
+ // Floating conversion to longword is accomplished by forming
+ // a longword value from a single or double floating value.
+ //
+ // There is one general conversion operation and four directed
+ // rounding operations.
+ //
+
+ case FLOAT_ROUND_LONGWORD:
+ ContextBlock.Round = ROUND_TO_NEAREST;
+ goto ConvertLongword;
+
+ case FLOAT_TRUNC_LONGWORD:
+ ContextBlock.Round = ROUND_TO_ZERO;
+ goto ConvertLongword;
+
+ case FLOAT_CEIL_LONGWORD:
+ ContextBlock.Round = ROUND_TO_PLUS_INFINITY;
+ goto ConvertLongword;
+
+ case FLOAT_FLOOR_LONGWORD:
+ ContextBlock.Round = ROUND_TO_MINUS_INFINITY;
+ goto ConvertLongword;
+
+ case FLOAT_CONVERT_LONGWORD:
+ ConvertLongword:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) ||
+ (SingleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationLongword(&ContextBlock,
+ SingleOperand1.Infinity,
+ SingleOperand1.Sign);
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Convert double to longword and store the result value.
+ //
+
+ return KiNormalizeLongword(&ContextBlock, &DoubleOperand1);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) ||
+ (DoubleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationLongword(&ContextBlock,
+ DoubleOperand1.Infinity,
+ DoubleOperand1.Sign);
+ }
+
+ //
+ // Convert double to longword and store the result value.
+ //
+
+ return KiNormalizeLongword(&ContextBlock, &DoubleOperand1);
+
+ } else {
+ break;
+ }
+
+ //
+ // An illegal function, format value, or field value.
+ //
+
+ default :
+ break;
+ }
+
+ //
+ // An illegal function, format value, or field value was encoutnered.
+ // Generate and illegal instruction exception.
+ //
+
+ ExceptionRecord->ExceptionCode = STATUS_ILLEGAL_INSTRUCTION;
+ return FALSE;
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address and branch destination.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ return FALSE;
+ }
+}
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero double
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quite NaN or a properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = DoubleOperand1->Sign ^ DoubleOperand2->Sign;
+ ResultValueHigh = DOUBLE_INFINITY_VALUE_HIGH | (ResultSign << 31);
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriatly signed
+ // infinity and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (DoubleOperand1->Infinity == FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SZ = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EZ != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ ((PFSR)&TrapFrame->Fsr)->XZ = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero single
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quite NaN is or properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = SingleOperand1->Sign ^ SingleOperand2->Sign;
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultSign << 31);
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriatly signed
+ // infinity and return a value of TRUE.
+ //
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (SingleOperand1->Infinity == FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SZ = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EZ != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ ((PFSR)&TrapFrame->Fsr)->XZ = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to determine whether an invalid operation
+ exception should be raised for a double compare operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, no operation is performed and a value
+ of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, perform no operation and return a
+ // value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((DoubleOperand1->Nan != FALSE) &&
+ ((DoubleOperand1->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((DoubleOperand2->Nan != FALSE) &&
+ ((DoubleOperand2->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.CompareValue = FpCompareUnordered;
+ return FALSE;
+ }
+ }
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidCompareSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to determine whether an invalid operation
+ exception should be raised for a single compare operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, no operation is performed and a value
+ of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, perform no operation and return a
+ // value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((SingleOperand1->Nan != FALSE) &&
+ ((SingleOperand1->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((SingleOperand2->Nan != FALSE) &&
+ ((SingleOperand2->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.CompareValue = FpCompareUnordered;
+ return FALSE;
+ }
+ }
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid double floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quite NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG MantissaHigh;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the first operand is a NaN, then compute a quite NaN from its
+ // value. Otherwise, if the second operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quite
+ // NaN.
+
+ if (DoubleOperand1->Nan != FALSE) {
+ MantissaHigh = DoubleOperand1->MantissaHigh & ~DOUBLE_SIGNAL_NAN_MASK;
+ if ((DoubleOperand1->MantissaLow | MantissaHigh) != 0) {
+ ResultValueLow = DoubleOperand1->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand1->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand1->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX;
+ ResultValueHigh &= ~DOUBLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ } else if (DoubleOperand2->Nan != FALSE) {
+ MantissaHigh = DoubleOperand2->MantissaHigh & ~DOUBLE_SIGNAL_NAN_MASK;
+ if ((DoubleOperand2->MantissaLow | MantissaHigh) != 0) {
+ ResultValueLow = DoubleOperand2->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand2->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand2->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX;
+ ResultValueHigh &= ~DOUBLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((DoubleOperand1->Nan != FALSE) &&
+ ((DoubleOperand1->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((DoubleOperand2->Nan != FALSE) &&
+ ((DoubleOperand2->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid conversion to longword.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ Infinity - Suuplies a boolean variable that specifies whether the
+ invalid operand is infinite.
+
+ Sign - Supplies the infinity sign if the invalid operand is infinite.
+
+Return Value:
+
+ If the invalid operation trap is enabled, then a value of FALSE is
+ returned. Otherwise, an appropriate longword value is stored as the
+ destination result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the value is infinite, then the result is a properly signed value
+ // whose magnitude is the largest that will fit in 32-bits. Otherwise,
+ // the result is an integer NaN.
+ //
+
+ if (Infinity != FALSE) {
+ if (Sign == 0) {
+ ResultValue = 0x7fffffff;
+
+ } else {
+ ResultValue = 0x80000000;
+ }
+
+ } else {
+ ResultValue = SINGLE_INTEGER_NAN;
+ }
+
+ //
+ // If the invalid operation trap is enabled then store the proper
+ // exception code and exception flags and return a value of FALSE.
+ // Otherwise, store a quiet NaN as the destination result and return
+ // a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U32Value = ResultValue;
+ return FALSE;
+
+ } else {
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+}
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid conversion to quadword.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ Infinity - Suuplies a boolean variable that specifies whether the
+ invalid operand is infinite.
+
+ Sign - Supplies the infinity sign if the invalid operand is infinite.
+
+Return Value:
+
+ If the invalid operation trap is enabled, then a value of FALSE is
+ returned. Otherwise, an appropriate longword value is stored as the
+ destination result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ union {
+ ULONGLONG ResultValue;
+ ULARGE_INTEGER LargeValue;
+ }u;
+
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the value is infinite, then the result is a properly signed value
+ // whose magnitude is the largest that will fit in 64-bits. Otherwise,
+ // the result is an integer NaN.
+ //
+
+ if (Infinity != FALSE) {
+ if (Sign == 0) {
+ u.ResultValue = 0x7fffffffffffffff;
+
+ } else {
+ u.ResultValue = 0x8000000000000000;
+ }
+
+ } else {
+ u.ResultValue = DOUBLE_INTEGER_NAN;
+ }
+
+ //
+ // If the invalid operation trap is enabled then store the proper
+ // exception code and exception flags and return a value of FALSE.
+ // Otherwise, store a quiet NaN as the destination result and return
+ // a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U64Value.QuadPart = u.ResultValue;
+ return FALSE;
+
+ } else {
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ u.LargeValue.LowPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 33,
+ u.LargeValue.HighPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+}
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid single floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quite NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the first operand is a NaN, then compute a quite NaN from its
+ // value. Otherwise, if the second operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quite
+ // NaN.
+
+ if (SingleOperand1->Nan != FALSE) {
+ if ((SingleOperand1->Mantissa & ~SINGLE_SIGNAL_NAN_MASK) != 0) {
+ ResultValue = SingleOperand1->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue &= ~SINGLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ } else if (SingleOperand2->Nan != FALSE) {
+ if ((SingleOperand2->Mantissa & ~SINGLE_SIGNAL_NAN_MASK) != 0) {
+ ResultValue = SingleOperand2->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue &= ~SINGLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((SingleOperand1->Nan != FALSE) &&
+ ((SingleOperand1->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((SingleOperand2->Nan != FALSE) &&
+ ((SingleOperand2->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a double floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <63:56> - zero
+ <55> - overflow bit
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computable.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG ExceptionResultHigh;
+ ULONG ExceptionResultLow;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ ULONG RoundBit;
+ PKTRAP_FRAME TrapFrame;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (ResultOperand->Infinity != FALSE) {
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ DOUBLE_INFINITY_VALUE_LOW,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ DOUBLE_INFINITY_VALUE_HIGH | (ResultOperand->Sign << 31),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumlate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((ResultOperand->MantissaHigh & (1 << (55 - 32))) != 0) {
+ StickyBits |= (ResultOperand->MantissaLow & 0x1);
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is not zero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 54.
+ //
+
+ if ((ResultOperand->MantissaLow != 0) || (ResultOperand->MantissaHigh != 0)) {
+ while ((ResultOperand->MantissaHigh & (1 << (54 - 32))) == 0) {
+ ResultOperand->MantissaHigh =
+ (ResultOperand->MantissaHigh << 1) |
+ (ResultOperand->MantissaLow >> 31);
+
+ ResultOperand->MantissaLow <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa one bit and accumlate the lost bit with the
+ // sticky bits.
+ //
+
+ StickyBits |= (ResultOperand->MantissaLow & 0x1);
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = ResultOperand->MantissaLow & 0x1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultOperand->MantissaLow & 0x2) != 0)) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 54, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((ResultOperand->MantissaHigh & (1 << (54 - 32))) != 0) {
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // Right shift the mantissa one bit to normalize the final result.
+ //
+
+ StickyBits |= ResultOperand->MantissaLow & 0x1;
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow sticky bits being set in FSR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has
+ // occurred. If denormals are being flushed to zero, then a result of
+ // zero is returned. Otherwise, both the inexact and underflow sticky
+ // bits are set in FSR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in FSR.
+ //
+
+ if (ResultOperand->Exponent >= DOUBLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh =
+ DOUBLE_INFINITY_VALUE_HIGH | (ResultOperand->Sign << 31);
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh =
+ DOUBLE_MAXIMUM_VALUE_HIGH | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh = DOUBLE_INFINITY_VALUE_HIGH;
+
+ } else {
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh = (ULONG)(DOUBLE_MAXIMUM_VALUE_HIGH | (1 << 31));
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh = (ULONG)(DOUBLE_INFINITY_VALUE_HIGH | (1 << 31));
+
+ } else {
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh = DOUBLE_MAXIMUM_VALUE_HIGH;
+ }
+
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 1536
+ // from the exponent.
+ //
+
+ ExceptionResultLow = ResultOperand->MantissaLow;
+ ExceptionResultHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ExceptionResultHigh |= ((ResultOperand->Exponent - 1536) << (52 - 32));
+ ExceptionResultHigh |= (ResultOperand->Sign << 31);
+
+ } else {
+ if ((ResultOperand->Exponent <= DOUBLE_MINIMUM_EXPONENT) &&
+ (ResultOperand->MantissaHigh != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->FS == 0) {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift >= 53) {
+ DenormalizeShift = 53;
+ }
+
+ if (DenormalizeShift >= 32) {
+ DenormalizeShift -= 32;
+ StickyBits |= ResultOperand->MantissaLow |
+ (ResultOperand->MantissaHigh & ((1 << DenormalizeShift) - 1));
+
+ ResultValueLow = ResultOperand->MantissaHigh >> DenormalizeShift;
+ ResultValueHigh = 0;
+
+ } else if (DenormalizeShift > 0) {
+ StickyBits |=
+ ResultOperand->MantissaLow & ((1 << DenormalizeShift) - 1);
+
+ ResultValueLow =
+ (ResultOperand->MantissaLow >> DenormalizeShift) |
+ (ResultOperand->MantissaHigh << (32 - DenormalizeShift));
+
+ ResultValueHigh =
+ (ResultOperand->MantissaHigh >> DenormalizeShift);
+
+ } else {
+ ResultValueLow = ResultOperand->MantissaLow;
+ ResultValueHigh = ResultOperand->MantissaHigh;
+ }
+
+ ResultValueHigh |= (ResultOperand->Sign << 31);
+ if (StickyBits != 0) {
+ Inexact = TRUE;
+ Overflow = FALSE;
+ Underflow = TRUE;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 1536 to the exponent.
+ //
+
+ ExceptionResultLow = ResultOperand->MantissaLow;
+ ExceptionResultHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ExceptionResultHigh |= ((ResultOperand->Exponent + 1536) << (52 - 32));
+ ExceptionResultHigh |= (ResultOperand->Sign << 31);
+
+ } else {
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ ResultValueLow = 0;
+ ResultValueHigh = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ if (ResultOperand->MantissaHigh == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValueLow = ResultOperand->MantissaLow;
+ ResultValueHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ResultValueHigh |= (ResultOperand->Exponent << (52 - 32));
+ ResultValueHigh |= (ResultOperand->Sign << 31);
+ Inexact = StickyBits ? TRUE : FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SO = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EO != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EO != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ExceptionResultLow;
+ IeeeValue->Value.Fp64Value.W[1] = ExceptionResultHigh;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XO = 1;
+ return FALSE;
+ }
+
+ } else if (Underflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SU = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EU != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EU != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ExceptionResultLow;
+ IeeeValue->Value.Fp64Value.W[1] = ExceptionResultHigh;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XU = 1;
+ return FALSE;
+ }
+
+ } else if (Inexact != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a result value to a longword result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and an overlfow bit of zero.
+ The result format is:
+
+ <63:55> - zero
+ <54 - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ There are no sticky bits.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ LONG ExponentShift;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValue;
+ ULONG RoundBit;
+ ULONG StickyBits;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // Subtract out the exponent bias and divide the cases into right
+ // and left shifts.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ExponentShift = ResultOperand->Exponent - DOUBLE_EXPONENT_BIAS;
+ if (ExponentShift < 23) {
+
+ //
+ // The integer value is less than 2**23 and a right shift must
+ // be performed.
+ //
+
+ ExponentShift = 22 - ExponentShift;
+ if (ExponentShift > 24) {
+ ExponentShift = 24;
+ }
+
+ StickyBits =
+ (ResultOperand->MantissaLow >> 2) |
+ (ResultOperand->MantissaHigh << (32 - ExponentShift));
+
+ ResultValue = ResultOperand->MantissaHigh >> ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+
+ //
+ // The integer value is two or greater and a left shift must be
+ // performed.
+ //
+
+ ExponentShift -= 22;
+ if (ExponentShift <= (31 - 22)) {
+ StickyBits = ResultOperand->MantissaLow << ExponentShift;
+ ResultValue =
+ (ResultOperand->MantissaHigh << ExponentShift) |
+ (ResultOperand->MantissaLow >> (32 - ExponentShift));
+
+ Overflow = FALSE;
+
+ } else {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = StickyBits >> 31;
+ StickyBits <<= 1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultValue & 0x1) != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) && (StickyBits != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) && (StickyBits != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If the result value is positive and the result is negative, then
+ // overflow has occurred. Otherwise, negate the result value and
+ // check if the result is negative. If the result is positive, then
+ // overflow has occurred.
+ //
+
+ if (ResultOperand->Sign == 0) {
+ if ((ResultValue >> 31) != 0) {
+ Overflow = TRUE;
+ }
+
+ } else {
+ ResultValue = ~ResultValue + 1;
+ if ((ResultValue >> 31) == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ return KiInvalidOperationLongword(ContextBlock,
+ FALSE,
+ 0);
+
+ } else if ((StickyBits | RoundBit) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U32Value = ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a result value to a quadword result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and an overlfow bit of zero.
+ The result format is:
+
+ <63:55> - zero
+ <54 - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ There are no sticky bits.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ LONG ExponentShift;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ union {
+ ULONGLONG ResultValue;
+ ULARGE_INTEGER LargeValue;
+ }u;
+
+ ULONG RoundBit;
+ ULONG StickyBits;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // Subtract out the exponent bias and divide the cases into right
+ // and left shifts.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ExponentShift = ResultOperand->Exponent - DOUBLE_EXPONENT_BIAS;
+ if (ExponentShift < 54) {
+
+ //
+ // The integer value is less than 2**52 and a right shift must
+ // be performed.
+ //
+
+ ExponentShift = 54 - ExponentShift;
+ if (ExponentShift > 54) {
+ ExponentShift = 54;
+ }
+
+ StickyBits = (ULONG)(ResultOperand->Mantissa << (32 - ExponentShift));
+ u.ResultValue = ResultOperand->Mantissa >> ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+
+ //
+ // The integer value is two or greater and a left shift must be
+ // performed.
+ //
+
+ ExponentShift -= 54;
+ if (ExponentShift <= (63 - 54)) {
+ StickyBits = 0;
+ u.ResultValue = ResultOperand->Mantissa << ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = StickyBits >> 31;
+ StickyBits <<= 1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((u.ResultValue & 0x1) != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) && (StickyBits != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) && (StickyBits != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If the result value is positive and the result is negative, then
+ // overflow has occurred. Otherwise, negate the result value and
+ // check if the result is negative. If the result is positive, then
+ // overflow has occurred.
+ //
+
+ if (ResultOperand->Sign == 0) {
+ if ((u.ResultValue >> 63) != 0) {
+ Overflow = TRUE;
+ }
+
+ } else {
+ u.ResultValue = ~u.ResultValue + 1;
+ if ((u.ResultValue >> 63) == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ return KiInvalidOperationQuadword(ContextBlock,
+ FALSE,
+ 0);
+
+ } else if ((StickyBits | RoundBit) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U64Value.QuadPart = u.ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ u.LargeValue.LowPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 33,
+ u.LargeValue.HighPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a single floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <31:27> - zero
+ <26> - overflow bit
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computable.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG ExceptionResult;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValue;
+ ULONG RoundBit;
+ PKTRAP_FRAME TrapFrame;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (ResultOperand->Infinity != FALSE) {
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumlate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((ResultOperand->Mantissa & (1 << 26)) != 0) {
+ StickyBits |= (ResultOperand->Mantissa & 0x1);
+ ResultOperand->Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is not zero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 25.
+ //
+
+ if (ResultOperand->Mantissa != 0) {
+ while ((ResultOperand->Mantissa & (1 << 25)) == 0) {
+ ResultOperand->Mantissa <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa one bit and accumlate the lost bit with the
+ // sticky bits.
+ //
+
+ StickyBits |= (ResultOperand->Mantissa & 0x1);
+ ResultOperand->Mantissa >>= 1;
+
+ //
+ // Round the result value using the mantissa, the round bit, and the
+ // sticky bits,
+ //
+
+ RoundBit = ResultOperand->Mantissa & 0x1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultOperand->Mantissa & 0x2) != 0)) {
+ ResultOperand->Mantissa += 2;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->Mantissa += 2;
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->Mantissa += 2;
+ }
+
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 25, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((ResultOperand->Mantissa & (1 << 25)) != 0) {
+ ResultOperand->Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // Right shift the mantissa one bit to normalize the final result.
+ //
+
+ StickyBits |= RoundBit;
+ ResultOperand->Mantissa >>= 1;
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow stickt bits being set in FSR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has
+ // occurred. If denormals are being flushed to zero, then a result of
+ // zero is returned. Otherwise, both the inexact and underflow sticky
+ // bits are set in FSR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in FSR.
+ //
+
+ if (ResultOperand->Exponent >= SINGLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValue = SINGLE_MAXIMUM_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValue = SINGLE_INFINITY_VALUE;
+
+ } else {
+ ResultValue = (ULONG)(SINGLE_MAXIMUM_VALUE | (1 << 31));
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValue = (ULONG)(SINGLE_INFINITY_VALUE | (1 << 31));
+
+ } else {
+ ResultValue = SINGLE_MAXIMUM_VALUE;
+ }
+
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 192
+ // from the exponent.
+ //
+
+ ExceptionResult = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent - 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ } else {
+ if ((ResultOperand->Exponent <= SINGLE_MINIMUM_EXPONENT) &&
+ (ResultOperand->Mantissa != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->FS == 0) {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift >= 24) {
+ DenormalizeShift = 24;
+ }
+
+ ResultValue = ResultOperand->Mantissa >> DenormalizeShift;
+ ResultValue |= (ResultOperand->Sign << 31);
+ if ((StickyBits != 0) ||
+ ((ResultOperand->Mantissa & ((1 << DenormalizeShift) - 1)) != 0)) {
+ Inexact = TRUE;
+ Overflow = FALSE;
+ Underflow = TRUE;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 192 to the exponent.
+ //
+
+ ExceptionResult = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent + 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ } else {
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ ResultValue = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ if (ResultOperand->Mantissa == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValue = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ResultValue |= (ResultOperand->Exponent << 23);
+ ResultValue |= (ResultOperand->Sign << 31);
+ Inexact = StickyBits ? TRUE : FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SO = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EO != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EO != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XO = 1;
+ return FALSE;
+ }
+
+ } else if (Underflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SU = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EU != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EU != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XU = 1;
+ return FALSE;
+ }
+
+ } else if (Inexact != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a double floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <63:55> - zero
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Value1;
+ ULONG Value2;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value1 = KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ Value2 = KiGetRegisterValue(Source + 32 + 1,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ DoubleOperand->Sign = Value2 >> 31;
+ DoubleOperand->Exponent = (Value2 >> (52 - 32)) & 0x7ff;
+ DoubleOperand->MantissaHigh = Value2 & 0xfffff;
+ DoubleOperand->MantissaLow = Value1;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a Nan or an infinity.
+ //
+
+ if (DoubleOperand->Exponent == DOUBLE_MAXIMUM_EXPONENT) {
+ if ((DoubleOperand->MantissaLow | DoubleOperand->MantissaHigh) != 0) {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = TRUE;
+
+ } else {
+ DoubleOperand->Infinity = TRUE;
+ DoubleOperand->Nan = FALSE;
+ }
+
+ } else {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = FALSE;
+ if (DoubleOperand->Exponent == DOUBLE_MINIMUM_EXPONENT) {
+ if ((DoubleOperand->MantissaHigh | DoubleOperand->MantissaLow) != 0) {
+ DoubleOperand->Exponent += 1;
+ while ((DoubleOperand->MantissaHigh & (1 << 20)) == 0) {
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 1) |
+ (DoubleOperand->MantissaLow >> 31);
+ DoubleOperand->MantissaLow <<= 1;
+ DoubleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ DoubleOperand->MantissaHigh |= (1 << 20);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 2) | (DoubleOperand->MantissaLow >> 30);
+ DoubleOperand->MantissaLow <<= 2;
+ return;
+}
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a single floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <31:26> - zero
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Value;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value = KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ SingleOperand->Sign = Value >> 31;
+ SingleOperand->Exponent = (Value >> 23) & 0xff;
+ SingleOperand->Mantissa = Value & 0x7fffff;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a Nan or an infinity.
+ //
+
+ if (SingleOperand->Exponent == SINGLE_MAXIMUM_EXPONENT) {
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = TRUE;
+
+ } else {
+ SingleOperand->Infinity = TRUE;
+ SingleOperand->Nan = FALSE;
+ }
+
+ } else {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = FALSE;
+ if (SingleOperand->Exponent == SINGLE_MINIMUM_EXPONENT) {
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Exponent += 1;
+ while ((SingleOperand->Mantissa & (1 << 23)) == 0) {
+ SingleOperand->Mantissa <<= 1;
+ SingleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ SingleOperand->Mantissa |= (1 << 23);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ SingleOperand->Mantissa <<= 2;
+ return;
+}
diff --git a/private/ntos/ke/mips/flush.c b/private/ntos/ke/mips/flush.c
new file mode 100644
index 000000000..4aa394099
--- /dev/null
+++ b/private/ntos/ke/mips/flush.c
@@ -0,0 +1,820 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ flush.c
+
+Abstract:
+
+ This module implements MIPS machine dependent kernel functions to flush
+ the data and instruction caches and to flush I/O buffers.
+
+Author:
+
+ David N. Cutler (davec) 26-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+ULONG ChangeColor;
+
+//
+// Define forward referenced prototyes.
+//
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ );
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ );
+
+VOID
+KeChangeColorPage (
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN ULONG PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine changes the color of a page.
+
+Arguments:
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ ChangeColor += 1;
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the change color
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiChangeColorPageTarget,
+ (PVOID)NewColor,
+ (PVOID)OldColor,
+ (PVOID)PageFrame);
+ }
+
+#endif
+
+ //
+ // Change the color of the page on the current processor.
+ //
+
+ HalChangeColorPage(NewColor, OldColor, PageFrame);
+
+ //
+ // Wait until all target processors have finished changing the color
+ // of the page.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for changing the color of a page.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+
+ //
+ // Change the color of the page on the current processor and clear
+ // change color packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalChangeColorPage(NewColor, OldColor, (ULONG)PageFrame);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepDcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the data cache on all processors that are currently
+ running threads which are children of the current process or flushes the
+ data cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which data
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepDcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the data cache on the current processor.
+ //
+
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // data cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the data cache on target
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the data cache on the current processor and clear the sweep
+ // data cache packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache on all processors that are
+ currently running threads which are children of the current process or
+ flushes the instruction cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchrnization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the instruction cache on the current processor.
+ //
+
+ HalSweepIcache();
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // instruction cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the instruction cache on
+ target processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the instruction cache on the current processor and clear
+ // the sweep instruction cache packet address to signal the source
+ // to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepIcache();
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcacheRange (
+ IN BOOLEAN AllProcessors,
+ IN PVOID BaseAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the an range of virtual addresses from the primary
+ instruction cache on all processors that are currently running threads
+ which are children of the current process or flushes the range of virtual
+ addresses from the primary instruction cache on all processors in the host
+ configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // If the length of the range is greater than the size of the primary
+ // instruction cache, then set the length of the flush to the size of
+ // the primary instruction cache and set the base address of zero.
+ //
+ // N.B. It is assumed that the size of the primary instruction and
+ // data caches are the same.
+ //
+
+ if (Length > PCR->FirstLevelIcacheSize) {
+ BaseAddress = (PVOID)0;
+ Length = PCR->FirstLevelIcacheSize;
+ }
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the sweep range
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheRangeTarget,
+ (PVOID)BaseAddress,
+ (PVOID)Length,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified range of virtual addresses from the primary
+ // instruction cache.
+ //
+
+ Offset = (ULONG)BaseAddress & PCR->IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)BaseAddress & ~PCR->IcacheAlignment),
+ (Offset + Length + PCR->IcacheAlignment) & ~PCR->IcacheAlignment);
+
+ Offset = (ULONG)BaseAddress & PCR->DcacheAlignment;
+ HalSweepDcacheRange((PVOID)((ULONG)BaseAddress & ~PCR->DcacheAlignment),
+ (Offset + Length + PCR->DcacheAlignment) & ~PCR->DcacheAlignment);
+
+ //
+ // Wait until all target processors have finished sweeping the specified
+ // range of addresses from the instruction cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping a range of addresses from the
+ instruction cache.
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+
+ //
+ // Sweep the specified instruction cache range on the current processor.
+ //
+
+#if !defined(NT_UP)
+
+ Offset = (ULONG)(BaseAddress) & PCR->IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)(BaseAddress) & ~PCR->IcacheAlignment),
+ (Offset + (ULONG)Length + PCR->IcacheAlignment) & ~PCR->IcacheAlignment);
+
+ Offset = (ULONG)(BaseAddress) & PCR->DcacheAlignment;
+ HalSweepDcacheRange((PVOID)((ULONG)(BaseAddress) & ~PCR->DcacheAlignment),
+ (Offset + (ULONG)Length + PCR->DcacheAlignment) & ~PCR->DcacheAlignment);
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeFlushIoBuffers (
+ IN PMDL Mdl,
+ IN BOOLEAN ReadOperation,
+ IN BOOLEAN DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the I/O buffer specified by the memory descriptor
+ list from the data cache on all processors.
+
+Arguments:
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // If the operation is a DMA operation, then check if the flush
+ // can be avoided because the host system supports the right set
+ // of cache coherency attributes. Otherwise, the flush can also
+ // be avoided if the operation is a programmed I/O and not a page
+ // read.
+ //
+
+ if (DmaOperation != FALSE) {
+ if (ReadOperation != FALSE) {
+ if ((KiDmaIoCoherency & DMA_READ_ICACHE_INVALIDATE) != 0) {
+
+ ASSERT((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0);
+
+ return;
+
+ } else if (((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) &&
+ ((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0)) {
+ return;
+ }
+
+ } else if ((KiDmaIoCoherency & DMA_WRITE_DCACHE_SNOOP) != 0) {
+ return;
+ }
+
+ } else if ((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) {
+ return;
+ }
+
+ //
+ // Either the operation is a DMA operation and the right coherency
+ // atributes are not supported by the host system, or the operation
+ // is programmed I/O and a page read.
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the flush I/O
+ // parameters to the target processors, if any, for execution.
+ //
+
+#if !defined(NT_UP)
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushIoBuffersTarget,
+ (PVOID)Mdl,
+ (PVOID)((ULONG)ReadOperation),
+ (PVOID)((ULONG)DmaOperation));
+ }
+
+#endif
+
+ //
+ // Flush I/O buffer on current processor.
+ //
+
+ HalFlushIoBuffers(Mdl, ReadOperation, DmaOperation);
+
+ //
+ // Wait until all target processors have finished flushing the
+ // specified I/O buffer.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+ return;
+}
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing an I/O buffer on target
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the specified I/O buffer on the current processor.
+ //
+
+#if !defined(NT_UP)
+
+ HalFlushIoBuffers((PMDL)Mdl,
+ (BOOLEAN)((ULONG)ReadOperation),
+ (BOOLEAN)((ULONG)DmaOperation));
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/genmips.c b/private/ntos/ke/mips/genmips.c
new file mode 100644
index 000000000..81003ea32
--- /dev/null
+++ b/private/ntos/ke/mips/genmips.c
@@ -0,0 +1,1015 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ genmips.c
+
+Abstract:
+
+ This module implements a program which generates MIPS machine dependent
+ structure offset definitions for kernel structures that are accessed in
+ assembly code.
+
+Author:
+
+ David N. Cutler (davec) 27-Mar-1990
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define HEADER_FILE
+#include "excpt.h"
+#include "ntdef.h"
+#include "ntkeapi.h"
+#include "ntmips.h"
+#include "ntimage.h"
+#include "ntseapi.h"
+#include "ntobapi.h"
+#include "ntlpcapi.h"
+#include "ntioapi.h"
+#include "ntmmapi.h"
+#include "ntldr.h"
+#include "ntpsapi.h"
+#include "ntexapi.h"
+#include "ntnls.h"
+#include "nturtl.h"
+#include "ntcsrmsg.h"
+#include "ntcsrsrv.h"
+#include "ntxcapi.h"
+#include "arc.h"
+#include "ntstatus.h"
+#include "kxmips.h"
+#include "stdarg.h"
+#include "setjmp.h"
+
+//
+// Define architecture specific generation macros.
+//
+
+#define genAlt(Name, Type, Member) \
+ dumpf("#define " #Name " 0x%lx\n", OFFSET(Type, Member))
+
+#define genCom(Comment) \
+ dumpf("\n"); \
+ dumpf("//\n"); \
+ dumpf("// " Comment "\n"); \
+ dumpf("//\n"); \
+ dumpf("\n")
+
+#define genDef(Prefix, Type, Member) \
+ dumpf("#define " #Prefix #Member " 0x%lx\n", OFFSET(Type, Member))
+
+#define genVal(Name, Value) \
+ dumpf("#define " #Name " 0x%lx\n", Value)
+
+#define genSpc() dumpf("\n");
+
+//
+// Define member offset computation macro.
+//
+
+#define OFFSET(type, field) ((LONG)(&((type *)0)->field))
+
+FILE *KsMips;
+FILE *HalMips;
+
+//
+// EnableInc(a) - Enables output to goto specified include file
+//
+
+#define EnableInc(a) OutputEnabled |= a;
+
+//
+// DisableInc(a) - Disables output to goto specified include file
+//
+
+#define DisableInc(a) OutputEnabled &= ~a;
+
+ULONG OutputEnabled;
+
+#define KSMIPS 0x01
+#define HALMIPS 0x02
+
+#define KERNEL KSMIPS
+#define HAL HALMIPS
+
+VOID dumpf (const char *format, ...);
+
+
+//
+// This routine returns the bit number right to left of a field.
+//
+
+LONG
+t (
+ IN ULONG z
+ )
+
+{
+ LONG i;
+
+ for (i = 0; i < 32; i += 1) {
+ if ((z >> i) & 1) {
+ break;
+ }
+ }
+ return i;
+}
+
+//
+// This program generates the MIPS machine dependent assembler offset
+// definitions.
+//
+
+VOID
+main (argc, argv)
+ int argc;
+ char *argv[];
+{
+
+ char *outName;
+ LONG EventOffset;
+
+ //
+ // Create file for output.
+ //
+
+ if (argc == 2) {
+ outName = argv[ 1 ];
+ } else {
+ outName = "\\nt\\public\\sdk\\inc\\ksmips.h";
+ }
+
+ outName = argc >= 2 ? argv[1] : "\\nt\\public\\sdk\\inc\\ksmips.h";
+ KsMips = fopen( outName, "w" );
+ if (KsMips == NULL) {
+ fprintf( stderr, "GENMIPS: Cannot open %s for writing.\n", outName);
+ perror("GENMIPS");
+ exit(1);
+ }
+
+ fprintf(stderr, "GENMIPS: Writing %s header file.\n", outName);
+ outName = argc >= 3 ? argv[2] : "\\nt\\private\\ntos\\inc\\halmips.h";
+ HalMips = fopen( outName, "w" );
+ if (HalMips == NULL) {
+ fprintf( stderr, "GENMIPS: Cannot open %s for writing.\n", outName);
+ perror("GENMIPS");
+ exit(1);
+ }
+
+ fprintf(stderr, "GENMIPS: Writing %s header file.\n", outName);
+
+ //
+ // Include statement for MIPS architecture static definitions.
+ //
+
+ EnableInc (KSMIPS | HALMIPS);
+ dumpf("#include \"kxmips.h\"\n");
+ DisableInc (HALMIPS);
+
+ //
+ // Include architecture independent definitions.
+ //
+
+#include "..\genxx.inc"
+
+ //
+ // Generate architecture dependent definitions.
+ //
+ // Processor block structure definitions.
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Processor Block Structure Offset Definitions");
+
+ genVal(PRCB_MINOR_VERSION, PRCB_MINOR_VERSION);
+ genVal(PRCB_MAJOR_VERSION, PRCB_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pb, KPRCB, MinorVersion);
+ genDef(Pb, KPRCB, MajorVersion);
+ genDef(Pb, KPRCB, CurrentThread);
+ genDef(Pb, KPRCB, NextThread);
+ genDef(Pb, KPRCB, IdleThread);
+ genDef(Pb, KPRCB, Number);
+ genDef(Pb, KPRCB, SetMember);
+ genDef(Pb, KPRCB, RestartBlock);
+ genDef(Pb, KPRCB, SystemReserved);
+ genDef(Pb, KPRCB, HalReserved);
+
+ DisableInc(HALMIPS);
+
+ genDef(Pb, KPRCB, DpcTime);
+ genDef(Pb, KPRCB, InterruptTime);
+ genDef(Pb, KPRCB, KernelTime);
+ genDef(Pb, KPRCB, UserTime);
+ genDef(Pb, KPRCB, AdjustDpcThreshold);
+ genDef(Pb, KPRCB, InterruptCount);
+ genDef(Pb, KPRCB, ApcBypassCount);
+ genDef(Pb, KPRCB, DpcBypassCount);
+ genDef(Pb, KPRCB, IpiFrozen);
+ genDef(Pb, KPRCB, ProcessorState);
+ genAlt(PbAlignmentFixupCount, KPRCB, KeAlignmentFixupCount);
+ genAlt(PbContextSwitches, KPRCB, KeContextSwitches);
+ genAlt(PbDcacheFlushCount, KPRCB, KeDcacheFlushCount);
+ genAlt(PbExceptionDispatchCount, KPRCB, KeExceptionDispatchCount);
+ genAlt(PbFirstLevelTbFills, KPRCB, KeFirstLevelTbFills);
+ genAlt(PbFloatingEmulationCount, KPRCB, KeFloatingEmulationCount);
+ genAlt(PbIcacheFlushCount, KPRCB, KeIcacheFlushCount);
+ genAlt(PbSecondLevelTbFills, KPRCB, KeSecondLevelTbFills);
+ genAlt(PbSystemCalls, KPRCB, KeSystemCalls);
+ genDef(Pb, KPRCB, CurrentPacket);
+ genDef(Pb, KPRCB, TargetSet);
+ genDef(Pb, KPRCB, WorkerRoutine);
+ genDef(Pb, KPRCB, RequestSummary);
+ genDef(Pb, KPRCB, SignalDone);
+ genDef(Pb, KPRCB, DpcInterruptRequested);
+ genDef(Pb, KPRCB, MaximumDpcQueueDepth);
+ genDef(Pb, KPRCB, MinimumDpcRate);
+ genDef(Pb, KPRCB, IpiCounts);
+ genDef(Pb, KPRCB, StartCount);
+ genDef(Pb, KPRCB, DpcLock);
+ genDef(Pb, KPRCB, DpcListHead);
+ genDef(Pb, KPRCB, DpcQueueDepth);
+ genDef(Pb, KPRCB, DpcCount);
+ genDef(Pb, KPRCB, DpcLastCount);
+ genDef(Pb, KPRCB, DpcRequestRate);
+ genDef(Pb, KPRCB, DpcRoutineActive);
+ genVal(ProcessorBlockLength, ((sizeof(KPRCB) + 15) & ~15));
+
+ //
+ // Processor control register structure definitions.
+ //
+
+#if defined(_MIPS_)
+
+ EnableInc(HALMIPS);
+
+ genCom("Processor Control Registers Structure Offset Definitions");
+
+ genVal(PCR_MINOR_VERSION, PCR_MINOR_VERSION);
+ genVal(PCR_MAJOR_VERSION, PCR_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pc, KPCR, MinorVersion);
+ genDef(Pc, KPCR, MajorVersion);
+ genDef(Pc, KPCR, InterruptRoutine);
+ genDef(Pc, KPCR, XcodeDispatch);
+ genDef(Pc, KPCR, FirstLevelDcacheSize);
+ genDef(Pc, KPCR, FirstLevelDcacheFillSize);
+ genDef(Pc, KPCR, FirstLevelIcacheSize);
+ genDef(Pc, KPCR, FirstLevelIcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelDcacheSize);
+ genDef(Pc, KPCR, SecondLevelDcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelIcacheSize);
+ genDef(Pc, KPCR, SecondLevelIcacheFillSize);
+ genDef(Pc, KPCR, Prcb);
+ genDef(Pc, KPCR, Teb);
+ genDef(Pc, KPCR, TlsArray);
+ genDef(Pc, KPCR, DcacheFillSize);
+ genDef(Pc, KPCR, IcacheAlignment);
+ genDef(Pc, KPCR, IcacheFillSize);
+ genDef(Pc, KPCR, ProcessorId);
+ genDef(Pc, KPCR, ProfileInterval);
+ genDef(Pc, KPCR, ProfileCount);
+ genDef(Pc, KPCR, StallExecutionCount);
+ genDef(Pc, KPCR, StallScaleFactor);
+ genDef(Pc, KPCR, Number);
+ genDef(Pc, KPCR, DataBusError);
+ genDef(Pc, KPCR, InstructionBusError);
+ genDef(Pc, KPCR, CachePolicy);
+ genDef(Pc, KPCR, IrqlMask);
+ genDef(Pc, KPCR, IrqlTable);
+ genDef(Pc, KPCR, CurrentIrql);
+ genDef(Pc, KPCR, SetMember);
+ genDef(Pc, KPCR, CurrentThread);
+ genDef(Pc, KPCR, AlignedCachePolicy);
+ genDef(Pc, KPCR, NotMember);
+ genDef(Pc, KPCR, SystemReserved);
+ genDef(Pc, KPCR, DcacheAlignment);
+ genDef(Pc, KPCR, HalReserved);
+
+ DisableInc(HALMIPS);
+
+ genDef(Pc, KPCR, FirstLevelActive);
+ genDef(Pc, KPCR, DpcRoutineActive);
+ genDef(Pc, KPCR, CurrentPid);
+ genDef(Pc, KPCR, OnInterruptStack);
+ genDef(Pc, KPCR, SavedInitialStack);
+ genDef(Pc, KPCR, SavedStackLimit);
+ genDef(Pc, KPCR, SystemServiceDispatchStart);
+ genDef(Pc, KPCR, SystemServiceDispatchEnd);
+ genDef(Pc, KPCR, InterruptStack);
+ genDef(Pc, KPCR, PanicStack);
+ genDef(Pc, KPCR, BadVaddr);
+ genDef(Pc, KPCR, InitialStack);
+ genDef(Pc, KPCR, StackLimit);
+ genDef(Pc, KPCR, SavedEpc);
+ genDef(Pc, KPCR, SavedT7);
+ genDef(Pc, KPCR, SavedT8);
+ genDef(Pc, KPCR, SavedT9);
+ genDef(Pc, KPCR, SystemGp);
+ genDef(Pc, KPCR, QuantumEnd);
+ genVal(ProcessorControlRegisterLength, ((sizeof(KPCR) + 15) & ~15));
+
+ genSpc();
+
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountLow);
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountMultiplier);
+ genDef(Pc2, KUSER_SHARED_DATA, InterruptTime);
+ genDef(Pc2, KUSER_SHARED_DATA, SystemTime);
+
+#endif
+
+ //
+ // TB entry structure offset definitions.
+ //
+
+#if defined(_MIPS_)
+
+ genCom("TB Entry Structure Offset Definitions");
+
+ genDef(Tb, TB_ENTRY, Entrylo0);
+ genDef(Tb, TB_ENTRY, Entrylo1);
+ genDef(Tb, TB_ENTRY, Entryhi);
+ genDef(Tb, TB_ENTRY, Pagemask);
+
+#endif
+
+ //
+ //
+ // Interprocessor command definitions.
+ //
+
+ genCom("Immediate Interprocessor Command Definitions");
+
+ genVal(IPI_APC, IPI_APC);
+ genVal(IPI_DPC, IPI_DPC);
+ genVal(IPI_FREEZE, IPI_FREEZE);
+ genVal(IPI_PACKET_READY, IPI_PACKET_READY);
+
+ //
+ // Interprocessor interrupt count structure offset definitions.
+ //
+
+ genCom("Interprocessor Interrupt Count Structure Offset Definitions");
+
+ genDef(Ic, KIPI_COUNTS, Freeze);
+ genDef(Ic, KIPI_COUNTS, Packet);
+ genDef(Ic, KIPI_COUNTS, DPC);
+ genDef(Ic, KIPI_COUNTS, APC);
+ genDef(Ic, KIPI_COUNTS, FlushSingleTb);
+ genDef(Ic, KIPI_COUNTS, FlushMultipleTb);
+ genDef(Ic, KIPI_COUNTS, FlushEntireTb);
+ genDef(Ic, KIPI_COUNTS, GenericCall);
+ genDef(Ic, KIPI_COUNTS, ChangeColor);
+ genDef(Ic, KIPI_COUNTS, SweepDcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcacheRange);
+ genDef(Ic, KIPI_COUNTS, FlushIoBuffers);
+ genDef(Ic, KIPI_COUNTS, GratuitousDPC);
+
+ //
+ // Context frame offset definitions and flag definitions.
+ //
+
+ EnableInc (HALMIPS);
+
+ genCom("Context Frame Offset and Flag Definitions");
+
+ genVal(CONTEXT_FULL, CONTEXT_FULL);
+ genVal(CONTEXT_CONTROL, CONTEXT_CONTROL);
+ genVal(CONTEXT_FLOATING_POINT, CONTEXT_FLOATING_POINT);
+ genVal(CONTEXT_INTEGER, CONTEXT_INTEGER);
+ genVal(CONTEXT_EXTENDED_FLOAT, CONTEXT_EXTENDED_FLOAT);
+ genVal(CONTEXT_EXTENDED_INTEGER, CONTEXT_EXTENDED_INTEGER);
+
+ genCom("32-bit Context Frame Offset Definitions");
+
+ genDef(Cx, CONTEXT, FltF0);
+ genDef(Cx, CONTEXT, FltF1);
+ genDef(Cx, CONTEXT, FltF2);
+ genDef(Cx, CONTEXT, FltF3);
+ genDef(Cx, CONTEXT, FltF4);
+ genDef(Cx, CONTEXT, FltF5);
+ genDef(Cx, CONTEXT, FltF6);
+ genDef(Cx, CONTEXT, FltF7);
+ genDef(Cx, CONTEXT, FltF8);
+ genDef(Cx, CONTEXT, FltF9);
+ genDef(Cx, CONTEXT, FltF10);
+ genDef(Cx, CONTEXT, FltF11);
+ genDef(Cx, CONTEXT, FltF12);
+ genDef(Cx, CONTEXT, FltF13);
+ genDef(Cx, CONTEXT, FltF14);
+ genDef(Cx, CONTEXT, FltF15);
+ genDef(Cx, CONTEXT, FltF16);
+ genDef(Cx, CONTEXT, FltF17);
+ genDef(Cx, CONTEXT, FltF18);
+ genDef(Cx, CONTEXT, FltF19);
+ genDef(Cx, CONTEXT, FltF20);
+ genDef(Cx, CONTEXT, FltF21);
+ genDef(Cx, CONTEXT, FltF22);
+ genDef(Cx, CONTEXT, FltF23);
+ genDef(Cx, CONTEXT, FltF24);
+ genDef(Cx, CONTEXT, FltF25);
+ genDef(Cx, CONTEXT, FltF26);
+ genDef(Cx, CONTEXT, FltF27);
+ genDef(Cx, CONTEXT, FltF28);
+ genDef(Cx, CONTEXT, FltF29);
+ genDef(Cx, CONTEXT, FltF30);
+ genDef(Cx, CONTEXT, FltF31);
+ genDef(Cx, CONTEXT, IntZero);
+ genDef(Cx, CONTEXT, IntAt);
+ genDef(Cx, CONTEXT, IntV0);
+ genDef(Cx, CONTEXT, IntV1);
+ genDef(Cx, CONTEXT, IntA0);
+ genDef(Cx, CONTEXT, IntA1);
+ genDef(Cx, CONTEXT, IntA2);
+ genDef(Cx, CONTEXT, IntA3);
+ genDef(Cx, CONTEXT, IntT0);
+ genDef(Cx, CONTEXT, IntT1);
+ genDef(Cx, CONTEXT, IntT2);
+ genDef(Cx, CONTEXT, IntT3);
+ genDef(Cx, CONTEXT, IntT4);
+ genDef(Cx, CONTEXT, IntT5);
+ genDef(Cx, CONTEXT, IntT6);
+ genDef(Cx, CONTEXT, IntT7);
+ genDef(Cx, CONTEXT, IntS0);
+ genDef(Cx, CONTEXT, IntS1);
+ genDef(Cx, CONTEXT, IntS2);
+ genDef(Cx, CONTEXT, IntS3);
+ genDef(Cx, CONTEXT, IntS4);
+ genDef(Cx, CONTEXT, IntS5);
+ genDef(Cx, CONTEXT, IntS6);
+ genDef(Cx, CONTEXT, IntS7);
+ genDef(Cx, CONTEXT, IntT8);
+ genDef(Cx, CONTEXT, IntT9);
+ genDef(Cx, CONTEXT, IntK0);
+ genDef(Cx, CONTEXT, IntK1);
+ genDef(Cx, CONTEXT, IntGp);
+ genDef(Cx, CONTEXT, IntSp);
+ genDef(Cx, CONTEXT, IntS8);
+ genDef(Cx, CONTEXT, IntRa);
+ genDef(Cx, CONTEXT, IntLo);
+ genDef(Cx, CONTEXT, IntHi);
+ genDef(Cx, CONTEXT, Fsr);
+ genDef(Cx, CONTEXT, Fir);
+ genDef(Cx, CONTEXT, Psr);
+ genDef(Cx, CONTEXT, ContextFlags);
+
+ genCom("64-bit Context Frame Offset Definitions");
+
+ genDef(Cx, CONTEXT, XFltF0);
+ genDef(Cx, CONTEXT, XFltF1);
+ genDef(Cx, CONTEXT, XFltF2);
+ genDef(Cx, CONTEXT, XFltF3);
+ genDef(Cx, CONTEXT, XFltF4);
+ genDef(Cx, CONTEXT, XFltF5);
+ genDef(Cx, CONTEXT, XFltF6);
+ genDef(Cx, CONTEXT, XFltF7);
+ genDef(Cx, CONTEXT, XFltF8);
+ genDef(Cx, CONTEXT, XFltF9);
+ genDef(Cx, CONTEXT, XFltF10);
+ genDef(Cx, CONTEXT, XFltF11);
+ genDef(Cx, CONTEXT, XFltF12);
+ genDef(Cx, CONTEXT, XFltF13);
+ genDef(Cx, CONTEXT, XFltF14);
+ genDef(Cx, CONTEXT, XFltF15);
+ genDef(Cx, CONTEXT, XFltF16);
+ genDef(Cx, CONTEXT, XFltF17);
+ genDef(Cx, CONTEXT, XFltF18);
+ genDef(Cx, CONTEXT, XFltF19);
+ genDef(Cx, CONTEXT, XFltF20);
+ genDef(Cx, CONTEXT, XFltF21);
+ genDef(Cx, CONTEXT, XFltF22);
+ genDef(Cx, CONTEXT, XFltF23);
+ genDef(Cx, CONTEXT, XFltF24);
+ genDef(Cx, CONTEXT, XFltF25);
+ genDef(Cx, CONTEXT, XFltF26);
+ genDef(Cx, CONTEXT, XFltF27);
+ genDef(Cx, CONTEXT, XFltF28);
+ genDef(Cx, CONTEXT, XFltF29);
+ genDef(Cx, CONTEXT, XFltF30);
+ genDef(Cx, CONTEXT, XFltF31);
+ genDef(Cx, CONTEXT, XFsr);
+ genDef(Cx, CONTEXT, XFir);
+ genDef(Cx, CONTEXT, XPsr);
+ genDef(Cx, CONTEXT, XContextFlags);
+ genDef(Cx, CONTEXT, XIntZero);
+ genDef(Cx, CONTEXT, XIntAt);
+ genDef(Cx, CONTEXT, XIntV0);
+ genDef(Cx, CONTEXT, XIntV1);
+ genDef(Cx, CONTEXT, XIntA0);
+ genDef(Cx, CONTEXT, XIntA1);
+ genDef(Cx, CONTEXT, XIntA2);
+ genDef(Cx, CONTEXT, XIntA3);
+ genDef(Cx, CONTEXT, XIntT0);
+ genDef(Cx, CONTEXT, XIntT1);
+ genDef(Cx, CONTEXT, XIntT2);
+ genDef(Cx, CONTEXT, XIntT3);
+ genDef(Cx, CONTEXT, XIntT4);
+ genDef(Cx, CONTEXT, XIntT5);
+ genDef(Cx, CONTEXT, XIntT6);
+ genDef(Cx, CONTEXT, XIntT7);
+ genDef(Cx, CONTEXT, XIntS0);
+ genDef(Cx, CONTEXT, XIntS1);
+ genDef(Cx, CONTEXT, XIntS2);
+ genDef(Cx, CONTEXT, XIntS3);
+ genDef(Cx, CONTEXT, XIntS4);
+ genDef(Cx, CONTEXT, XIntS5);
+ genDef(Cx, CONTEXT, XIntS6);
+ genDef(Cx, CONTEXT, XIntS7);
+ genDef(Cx, CONTEXT, XIntT8);
+ genDef(Cx, CONTEXT, XIntT9);
+ genDef(Cx, CONTEXT, XIntK0);
+ genDef(Cx, CONTEXT, XIntK1);
+ genDef(Cx, CONTEXT, XIntGp);
+ genDef(Cx, CONTEXT, XIntSp);
+ genDef(Cx, CONTEXT, XIntS8);
+ genDef(Cx, CONTEXT, XIntRa);
+ genDef(Cx, CONTEXT, XIntLo);
+ genDef(Cx, CONTEXT, XIntHi);
+ genVal(ContextFrameLength, sizeof(CONTEXT));
+
+ //
+ // Exception frame offset definitions.
+ //
+
+ genCom("Exception Frame Offset Definitions and Length");
+
+ genAlt(ExArgs, KEXCEPTION_FRAME, Argument);
+
+ genCom("32-bit Nonvolatile Floating State");
+
+ genDef(Ex, KEXCEPTION_FRAME, FltF20);
+ genDef(Ex, KEXCEPTION_FRAME, FltF21);
+ genDef(Ex, KEXCEPTION_FRAME, FltF22);
+ genDef(Ex, KEXCEPTION_FRAME, FltF23);
+ genDef(Ex, KEXCEPTION_FRAME, FltF24);
+ genDef(Ex, KEXCEPTION_FRAME, FltF25);
+ genDef(Ex, KEXCEPTION_FRAME, FltF26);
+ genDef(Ex, KEXCEPTION_FRAME, FltF27);
+ genDef(Ex, KEXCEPTION_FRAME, FltF28);
+ genDef(Ex, KEXCEPTION_FRAME, FltF29);
+ genDef(Ex, KEXCEPTION_FRAME, FltF30);
+ genDef(Ex, KEXCEPTION_FRAME, FltF31);
+
+ genCom("64-bit Nonvolatile Floating State");
+
+ genDef(Ex, KEXCEPTION_FRAME, XFltF20);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF22);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF24);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF26);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF28);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF30);
+
+ genCom("32-bit Nonvolatile Integer State");
+
+ genDef(Ex, KEXCEPTION_FRAME, IntS0);
+ genDef(Ex, KEXCEPTION_FRAME, IntS1);
+ genDef(Ex, KEXCEPTION_FRAME, IntS2);
+ genDef(Ex, KEXCEPTION_FRAME, IntS3);
+ genDef(Ex, KEXCEPTION_FRAME, IntS4);
+ genDef(Ex, KEXCEPTION_FRAME, IntS5);
+ genDef(Ex, KEXCEPTION_FRAME, IntS6);
+ genDef(Ex, KEXCEPTION_FRAME, IntS7);
+ genDef(Ex, KEXCEPTION_FRAME, IntS8);
+ genDef(Ex, KEXCEPTION_FRAME, SwapReturn);
+ genDef(Ex, KEXCEPTION_FRAME, IntRa);
+ genVal(ExceptionFrameLength, sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Jump buffer offset definitions.
+ //
+
+ DisableInc (HALMIPS);
+
+ genCom("Jump Offset Definitions and Length");
+
+ genDef(Jb, _JUMP_BUFFER, FltF20);
+ genDef(Jb, _JUMP_BUFFER, FltF21);
+ genDef(Jb, _JUMP_BUFFER, FltF22);
+ genDef(Jb, _JUMP_BUFFER, FltF23);
+ genDef(Jb, _JUMP_BUFFER, FltF24);
+ genDef(Jb, _JUMP_BUFFER, FltF25);
+ genDef(Jb, _JUMP_BUFFER, FltF26);
+ genDef(Jb, _JUMP_BUFFER, FltF27);
+ genDef(Jb, _JUMP_BUFFER, FltF28);
+ genDef(Jb, _JUMP_BUFFER, FltF29);
+ genDef(Jb, _JUMP_BUFFER, FltF30);
+ genDef(Jb, _JUMP_BUFFER, FltF31);
+ genDef(Jb, _JUMP_BUFFER, IntS0);
+ genDef(Jb, _JUMP_BUFFER, IntS1);
+ genDef(Jb, _JUMP_BUFFER, IntS2);
+ genDef(Jb, _JUMP_BUFFER, IntS3);
+ genDef(Jb, _JUMP_BUFFER, IntS4);
+ genDef(Jb, _JUMP_BUFFER, IntS5);
+ genDef(Jb, _JUMP_BUFFER, IntS6);
+ genDef(Jb, _JUMP_BUFFER, IntS7);
+ genDef(Jb, _JUMP_BUFFER, IntS8);
+ genDef(Jb, _JUMP_BUFFER, IntSp);
+ genDef(Jb, _JUMP_BUFFER, Type);
+ genDef(Jb, _JUMP_BUFFER, Fir);
+
+ //
+ // Trap frame offset definitions.
+ //
+
+ EnableInc (HALMIPS);
+
+ genCom("Trap Frame Offset Definitions and Length");
+
+ genAlt(TrArgs, KTRAP_FRAME, Argument);
+
+ genCom("32-bit Volatile Floating State");
+
+ genDef(Tr, KTRAP_FRAME, FltF0);
+ genDef(Tr, KTRAP_FRAME, FltF1);
+ genDef(Tr, KTRAP_FRAME, FltF2);
+ genDef(Tr, KTRAP_FRAME, FltF3);
+ genDef(Tr, KTRAP_FRAME, FltF4);
+ genDef(Tr, KTRAP_FRAME, FltF5);
+ genDef(Tr, KTRAP_FRAME, FltF6);
+ genDef(Tr, KTRAP_FRAME, FltF7);
+ genDef(Tr, KTRAP_FRAME, FltF8);
+ genDef(Tr, KTRAP_FRAME, FltF9);
+ genDef(Tr, KTRAP_FRAME, FltF10);
+ genDef(Tr, KTRAP_FRAME, FltF11);
+ genDef(Tr, KTRAP_FRAME, FltF12);
+ genDef(Tr, KTRAP_FRAME, FltF13);
+ genDef(Tr, KTRAP_FRAME, FltF14);
+ genDef(Tr, KTRAP_FRAME, FltF15);
+ genDef(Tr, KTRAP_FRAME, FltF16);
+ genDef(Tr, KTRAP_FRAME, FltF17);
+ genDef(Tr, KTRAP_FRAME, FltF18);
+ genDef(Tr, KTRAP_FRAME, FltF19);
+
+ genCom("64-bit Volatile Floating State");
+
+ genDef(Tr, KTRAP_FRAME, XFltF0);
+ genDef(Tr, KTRAP_FRAME, XFltF1);
+ genDef(Tr, KTRAP_FRAME, XFltF2);
+ genDef(Tr, KTRAP_FRAME, XFltF3);
+ genDef(Tr, KTRAP_FRAME, XFltF4);
+ genDef(Tr, KTRAP_FRAME, XFltF5);
+ genDef(Tr, KTRAP_FRAME, XFltF6);
+ genDef(Tr, KTRAP_FRAME, XFltF7);
+ genDef(Tr, KTRAP_FRAME, XFltF8);
+ genDef(Tr, KTRAP_FRAME, XFltF9);
+ genDef(Tr, KTRAP_FRAME, XFltF10);
+ genDef(Tr, KTRAP_FRAME, XFltF11);
+ genDef(Tr, KTRAP_FRAME, XFltF12);
+ genDef(Tr, KTRAP_FRAME, XFltF13);
+ genDef(Tr, KTRAP_FRAME, XFltF14);
+ genDef(Tr, KTRAP_FRAME, XFltF15);
+ genDef(Tr, KTRAP_FRAME, XFltF16);
+ genDef(Tr, KTRAP_FRAME, XFltF17);
+ genDef(Tr, KTRAP_FRAME, XFltF18);
+ genDef(Tr, KTRAP_FRAME, XFltF19);
+ genDef(Tr, KTRAP_FRAME, XFltF21);
+ genDef(Tr, KTRAP_FRAME, XFltF23);
+ genDef(Tr, KTRAP_FRAME, XFltF25);
+ genDef(Tr, KTRAP_FRAME, XFltF27);
+ genDef(Tr, KTRAP_FRAME, XFltF29);
+ genDef(Tr, KTRAP_FRAME, XFltF31);
+
+ genCom("64-bit Volatile Integer State");
+
+ genDef(Tr, KTRAP_FRAME, XIntZero);
+ genDef(Tr, KTRAP_FRAME, XIntAt);
+ genDef(Tr, KTRAP_FRAME, XIntV0);
+ genDef(Tr, KTRAP_FRAME, XIntV1);
+ genDef(Tr, KTRAP_FRAME, XIntA0);
+ genDef(Tr, KTRAP_FRAME, XIntA1);
+ genDef(Tr, KTRAP_FRAME, XIntA2);
+ genDef(Tr, KTRAP_FRAME, XIntA3);
+ genDef(Tr, KTRAP_FRAME, XIntT0);
+ genDef(Tr, KTRAP_FRAME, XIntT1);
+ genDef(Tr, KTRAP_FRAME, XIntT2);
+ genDef(Tr, KTRAP_FRAME, XIntT3);
+ genDef(Tr, KTRAP_FRAME, XIntT4);
+ genDef(Tr, KTRAP_FRAME, XIntT5);
+ genDef(Tr, KTRAP_FRAME, XIntT6);
+ genDef(Tr, KTRAP_FRAME, XIntT7);
+ genDef(Tr, KTRAP_FRAME, XIntS0);
+ genDef(Tr, KTRAP_FRAME, XIntS1);
+ genDef(Tr, KTRAP_FRAME, XIntS2);
+ genDef(Tr, KTRAP_FRAME, XIntS3);
+ genDef(Tr, KTRAP_FRAME, XIntS4);
+ genDef(Tr, KTRAP_FRAME, XIntS5);
+ genDef(Tr, KTRAP_FRAME, XIntS6);
+ genDef(Tr, KTRAP_FRAME, XIntS7);
+ genDef(Tr, KTRAP_FRAME, XIntT8);
+ genDef(Tr, KTRAP_FRAME, XIntT9);
+ genDef(Tr, KTRAP_FRAME, XIntGp);
+ genDef(Tr, KTRAP_FRAME, XIntSp);
+ genDef(Tr, KTRAP_FRAME, XIntS8);
+ genDef(Tr, KTRAP_FRAME, XIntRa);
+ genDef(Tr, KTRAP_FRAME, XIntLo);
+ genDef(Tr, KTRAP_FRAME, XIntHi);
+
+ genSpc();
+
+ genDef(Tr, KTRAP_FRAME, Fir);
+ genDef(Tr, KTRAP_FRAME, Fsr);
+ genDef(Tr, KTRAP_FRAME, Psr);
+ genDef(Tr, KTRAP_FRAME, ExceptionRecord);
+ genDef(Tr, KTRAP_FRAME, OldIrql);
+ genDef(Tr, KTRAP_FRAME, PreviousMode);
+ genDef(Tr, KTRAP_FRAME, SavedFlag);
+ genAlt(TrOnInterruptStack, KTRAP_FRAME, u.OnInterruptStack);
+ genAlt(TrTrapFrame, KTRAP_FRAME, u.TrapFrame);
+
+ genVal(TrapFrameLength, sizeof(KTRAP_FRAME));
+ genVal(TrapFrameArguments, KTRAP_FRAME_ARGUMENTS);
+
+ //
+ // Usermode callout kernel frame definitions
+ //
+
+ DisableInc(HALMIPS);
+
+ genCom("Usermode callout kernel frame definitions");
+
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F21);
+ genDef(Cu, KCALLOUT_FRAME, F22);
+ genDef(Cu, KCALLOUT_FRAME, F23);
+ genDef(Cu, KCALLOUT_FRAME, F24);
+ genDef(Cu, KCALLOUT_FRAME, F25);
+ genDef(Cu, KCALLOUT_FRAME, F26);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F27);
+ genDef(Cu, KCALLOUT_FRAME, F28);
+ genDef(Cu, KCALLOUT_FRAME, F29);
+ genDef(Cu, KCALLOUT_FRAME, F30);
+ genDef(Cu, KCALLOUT_FRAME, F31);
+ genDef(Cu, KCALLOUT_FRAME, S0);
+ genDef(Cu, KCALLOUT_FRAME, S1);
+ genDef(Cu, KCALLOUT_FRAME, S2);
+ genDef(Cu, KCALLOUT_FRAME, S3);
+ genDef(Cu, KCALLOUT_FRAME, S4);
+ genDef(Cu, KCALLOUT_FRAME, S5);
+ genDef(Cu, KCALLOUT_FRAME, S6);
+ genDef(Cu, KCALLOUT_FRAME, S7);
+ genDef(Cu, KCALLOUT_FRAME, S8);
+ genDef(Cu, KCALLOUT_FRAME, CbStk);
+ genDef(Cu, KCALLOUT_FRAME, TrFr);
+ genDef(Cu, KCALLOUT_FRAME, Fsr);
+ genDef(Cu, KCALLOUT_FRAME, InStk);
+ genDef(Cu, KCALLOUT_FRAME, Ra);
+ genVal(CuFrameLength, OFFSET(KCALLOUT_FRAME, A0));
+ genDef(Cu, KCALLOUT_FRAME, A0);
+ genDef(Cu, KCALLOUT_FRAME, A1);
+
+ //
+ // Usermode callout user frame definitions.
+ //
+
+ genCom("Usermode callout user frame definitions");
+
+ genDef(Ck, UCALLOUT_FRAME, Buffer);
+ genDef(Ck, UCALLOUT_FRAME, Length);
+ genDef(Ck, UCALLOUT_FRAME, ApiNumber);
+ genDef(Ck, UCALLOUT_FRAME, Sp);
+ genDef(Ck, UCALLOUT_FRAME, Ra);
+
+ EnableInc(HALMIPS);
+
+ //
+ // Loader Parameter Block offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Loader Parameter Block Offset Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define LpbLoadOrderListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, LoadOrderListHead));
+
+ dumpf("#define LpbMemoryDescriptorListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, MemoryDescriptorListHead));
+
+ dumpf("#define LpbKernelStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, KernelStack));
+
+ dumpf("#define LpbPrcb 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Prcb));
+
+ dumpf("#define LpbProcess 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Process));
+
+ dumpf("#define LpbThread 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Thread));
+
+ dumpf("#define LpbInterruptStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.InterruptStack));
+
+ dumpf("#define LpbFirstLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelDcacheSize));
+
+ dumpf("#define LpbFirstLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelDcacheFillSize));
+
+ dumpf("#define LpbFirstLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelIcacheSize));
+
+ dumpf("#define LpbFirstLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelIcacheFillSize));
+
+ dumpf("#define LpbGpBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.GpBase));
+
+ dumpf("#define LpbPanicStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PanicStack));
+
+ dumpf("#define LpbPcrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PcrPage));
+
+ dumpf("#define LpbPdrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PdrPage));
+
+ dumpf("#define LpbSecondLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelDcacheSize));
+
+ dumpf("#define LpbSecondLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelDcacheFillSize));
+
+ dumpf("#define LpbSecondLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelIcacheSize));
+
+ dumpf("#define LpbSecondLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelIcacheFillSize));
+
+ dumpf("#define LpbPcrPage2 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PcrPage2));
+
+ dumpf("#define LpbRegistryLength 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryLength));
+
+ dumpf("#define LpbRegistryBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryBase));
+
+ DisableInc (HALMIPS);
+
+ //
+ // Define Client/Server data structure definitions.
+ //
+
+ genCom("Client/Server Structure Definitions");
+
+ genDef(Cid, CLIENT_ID, UniqueProcess);
+ genDef(Cid, CLIENT_ID, UniqueThread);
+
+ //
+ // Address space layout definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Address Space Layout Definitions");
+
+ genVal(KUSEG_BASE, KUSEG_BASE);
+ genVal(KSEG0_BASE, KSEG0_BASE);
+ genVal(KSEG1_BASE, KSEG1_BASE);
+ genVal(KSEG2_BASE, KSEG2_BASE);
+
+ DisableInc(HALMIPS);
+
+ genVal(CACHE_ERROR_VECTOR, CACHE_ERROR_VECTOR);
+ genVal(SYSTEM_BASE, SYSTEM_BASE);
+ genVal(PDE_BASE, PDE_BASE);
+ genVal(PTE_BASE, PTE_BASE);
+
+ //
+ // Page table and page directory entry definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Page Table and Directory Entry Definitions");
+
+ genVal(PAGE_SIZE, PAGE_SIZE);
+ genVal(PAGE_SHIFT, PAGE_SHIFT);
+ genVal(PDI_SHIFT, PDI_SHIFT);
+ genVal(PTI_SHIFT, PTI_SHIFT);
+
+ //
+ // Software interrupt request mask definitions
+ //
+
+ genCom("Software Interrupt Request Mask Definitions");
+
+ genVal(APC_INTERRUPT, (1 << (APC_LEVEL + CAUSE_INTPEND - 1)));
+ genVal(DISPATCH_INTERRUPT, (1 << (DISPATCH_LEVEL + CAUSE_INTPEND - 1)));
+
+ DisableInc(HALMIPS);
+
+ //
+ // Breakpoint instruction definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Breakpoint Definitions");
+
+ genVal(USER_BREAKPOINT, USER_BREAKPOINT);
+ genVal(KERNEL_BREAKPOINT, KERNEL_BREAKPOINT);
+ genVal(BREAKIN_BREAKPOINT, BREAKIN_BREAKPOINT);
+
+ DisableInc(HALMIPS);
+
+ genVal(BRANCH_TAKEN_BREAKPOINT, BRANCH_TAKEN_BREAKPOINT);
+ genVal(BRANCH_NOT_TAKEN_BREAKPOINT, BRANCH_NOT_TAKEN_BREAKPOINT);
+ genVal(SINGLE_STEP_BREAKPOINT, SINGLE_STEP_BREAKPOINT);
+ genVal(DIVIDE_OVERFLOW_BREAKPOINT, DIVIDE_OVERFLOW_BREAKPOINT);
+ genVal(DIVIDE_BY_ZERO_BREAKPOINT, DIVIDE_BY_ZERO_BREAKPOINT);
+ genVal(RANGE_CHECK_BREAKPOINT, RANGE_CHECK_BREAKPOINT);
+ genVal(STACK_OVERFLOW_BREAKPOINT, STACK_OVERFLOW_BREAKPOINT);
+ genVal(MULTIPLY_OVERFLOW_BREAKPOINT, MULTIPLY_OVERFLOW_BREAKPOINT);
+ genVal(DEBUG_PRINT_BREAKPOINT, DEBUG_PRINT_BREAKPOINT);
+ genVal(DEBUG_PROMPT_BREAKPOINT, DEBUG_PROMPT_BREAKPOINT);
+ genVal(DEBUG_STOP_BREAKPOINT, DEBUG_STOP_BREAKPOINT);
+ genVal(DEBUG_LOAD_SYMBOLS_BREAKPOINT, DEBUG_LOAD_SYMBOLS_BREAKPOINT);
+ genVal(DEBUG_UNLOAD_SYMBOLS_BREAKPOINT, DEBUG_UNLOAD_SYMBOLS_BREAKPOINT);
+
+ //
+ // Miscellaneous definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Miscellaneous Definitions");
+
+ genVal(Executive, Executive);
+ genVal(KernelMode, KernelMode);
+ genVal(FALSE, FALSE);
+ genVal(TRUE, TRUE);
+ genVal(UNCACHED_POLICY, UNCACHED_POLICY);
+ genVal(KiPcr, KIPCR);
+ genVal(KiPcr2, KIPCR2);
+
+ DisableInc(HALMIPS);
+
+ genVal(UsPcr, USPCR);
+ genVal(UsPcr2, USPCR2);
+ genVal(BASE_PRIORITY_THRESHOLD, BASE_PRIORITY_THRESHOLD);
+ genVal(EVENT_PAIR_INCREMENT, EVENT_PAIR_INCREMENT);
+ genVal(LOW_REALTIME_PRIORITY, LOW_REALTIME_PRIORITY);
+ genVal(KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
+ genVal(KERNEL_LARGE_STACK_COMMIT, KERNEL_LARGE_STACK_COMMIT);
+ genVal(XCODE_VECTOR_LENGTH, XCODE_VECTOR_LENGTH);
+ genVal(MM_USER_PROBE_ADDRESS, MM_USER_PROBE_ADDRESS);
+ genVal(ROUND_TO_NEAREST, ROUND_TO_NEAREST);
+ genVal(ROUND_TO_ZERO, ROUND_TO_ZERO);
+ genVal(ROUND_TO_PLUS_INFINITY, ROUND_TO_PLUS_INFINITY);
+ genVal(ROUND_TO_MINUS_INFINITY, ROUND_TO_MINUS_INFINITY);
+ genVal(CLOCK_QUANTUM_DECREMENT, CLOCK_QUANTUM_DECREMENT);
+ genVal(READY_SKIP_QUANTUM, READY_SKIP_QUANTUM);
+ genVal(THREAD_QUANTUM, THREAD_QUANTUM);
+ genVal(WAIT_QUANTUM_DECREMENT, WAIT_QUANTUM_DECREMENT);
+ genVal(ROUND_TRIP_DECREMENT_COUNT, ROUND_TRIP_DECREMENT_COUNT);
+
+ //
+ // Close header file.
+ //
+
+ fprintf(stderr, " Finished\n");
+ return;
+}
+
+VOID
+dumpf(
+ const char *format,
+ ...
+ )
+
+{
+
+ va_list(arglist);
+
+ va_start(arglist, format);
+
+ if (OutputEnabled & KSMIPS) {
+ vfprintf (KsMips, format, arglist);
+ }
+
+ if (OutputEnabled & HALMIPS) {
+ vfprintf (HalMips, format, arglist);
+ }
+
+ va_end(arglist);
+}
diff --git a/private/ntos/ke/mips/getsetrg.c b/private/ntos/ke/mips/getsetrg.c
new file mode 100644
index 000000000..88e967454
--- /dev/null
+++ b/private/ntos/ke/mips/getsetrg.c
@@ -0,0 +1,1179 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ getsetrg.c
+
+Abstract:
+
+ This module implement the code necessary to get and set register values.
+ These routines are used during the emulation of unaligned data references
+ and floating point exceptions.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+ULONG
+KiGetRegisterValue (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the 32-bit value of a register from the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register == 0) {
+ return 0;
+
+ } else if (Register < 32) {
+ return (ULONG)(&TrapFrame->XIntZero)[Register];
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ return TrapFrame->FltF0;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ return TrapFrame->FltF1;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ return TrapFrame->FltF2;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ return TrapFrame->FltF3;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ return TrapFrame->FltF4;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ return TrapFrame->FltF5;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ return TrapFrame->FltF6;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ return TrapFrame->FltF7;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ return TrapFrame->FltF8;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ return TrapFrame->FltF9;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ return TrapFrame->FltF10;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ return TrapFrame->FltF11;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ return TrapFrame->FltF12;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ return TrapFrame->FltF13;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ return TrapFrame->FltF14;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ return TrapFrame->FltF15;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ return TrapFrame->FltF16;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ return TrapFrame->FltF17;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ return TrapFrame->FltF18;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ return TrapFrame->FltF19;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ return ExceptionFrame->FltF20;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ return ExceptionFrame->FltF21;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ return ExceptionFrame->FltF22;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ return ExceptionFrame->FltF23;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ return ExceptionFrame->FltF24;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ return ExceptionFrame->FltF25;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ return ExceptionFrame->FltF26;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ return ExceptionFrame->FltF27;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ return ExceptionFrame->FltF28;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ return ExceptionFrame->FltF29;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ return ExceptionFrame->FltF30;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ return ExceptionFrame->FltF31;
+ }
+ }
+}
+
+ULONGLONG
+KiGetRegisterValue64 (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the 64-bit value of a register from the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register == 0) {
+ return 0;
+
+ } else if (Register < 32) {
+ return (&TrapFrame->XIntZero)[Register];
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ return TrapFrame->XFltF0;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ return TrapFrame->XFltF1;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ return TrapFrame->XFltF2;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ return TrapFrame->XFltF3;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ return TrapFrame->XFltF4;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ return TrapFrame->XFltF5;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ return TrapFrame->XFltF6;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ return TrapFrame->XFltF7;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ return TrapFrame->XFltF8;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ return TrapFrame->XFltF9;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ return TrapFrame->XFltF10;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ return TrapFrame->XFltF11;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ return TrapFrame->XFltF12;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ return TrapFrame->XFltF13;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ return TrapFrame->XFltF14;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ return TrapFrame->XFltF15;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ return TrapFrame->XFltF16;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ return TrapFrame->XFltF17;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ return TrapFrame->XFltF18;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ return TrapFrame->XFltF19;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ return ExceptionFrame->XFltF20;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ return TrapFrame->XFltF21;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ return ExceptionFrame->XFltF22;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ return TrapFrame->XFltF23;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ return ExceptionFrame->XFltF24;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ return TrapFrame->XFltF25;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ return ExceptionFrame->XFltF26;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ return TrapFrame->XFltF27;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ return ExceptionFrame->XFltF28;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ return TrapFrame->XFltF29;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ return ExceptionFrame->XFltF30;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ return TrapFrame->XFltF31;
+ }
+ }
+}
+
+VOID
+KiSetRegisterValue (
+ IN ULONG Register,
+ IN ULONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the 32-bit value of a register in the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register < 32) {
+ (&TrapFrame->XIntZero)[Register] = (LONG)Value;
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ TrapFrame->FltF0 = Value;
+ return;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ TrapFrame->FltF1 = Value;
+ return;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ TrapFrame->FltF2 = Value;
+ return;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ TrapFrame->FltF3 = Value;
+ return;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ TrapFrame->FltF4 = Value;
+ return;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ TrapFrame->FltF5 = Value;
+ return;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ TrapFrame->FltF6 = Value;
+ return;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ TrapFrame->FltF7 = Value;
+ return;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ TrapFrame->FltF8 = Value;
+ return;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ TrapFrame->FltF9 = Value;
+ return;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ TrapFrame->FltF10 = Value;
+ return;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ TrapFrame->FltF11 = Value;
+ return;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ TrapFrame->FltF12 = Value;
+ return;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ TrapFrame->FltF13 = Value;
+ return;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ TrapFrame->FltF14 = Value;
+ return;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ TrapFrame->FltF15 = Value;
+ return;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ TrapFrame->FltF16 = Value;
+ return;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ TrapFrame->FltF17 = Value;
+ return;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ TrapFrame->FltF18 = Value;
+ return;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ TrapFrame->FltF19 = Value;
+ return;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ ExceptionFrame->FltF20 = Value;
+ return;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ ExceptionFrame->FltF21 = Value;
+ return;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ ExceptionFrame->FltF22 = Value;
+ return;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ ExceptionFrame->FltF23 = Value;
+ return;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ ExceptionFrame->FltF24 = Value;
+ return;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ ExceptionFrame->FltF25 = Value;
+ return;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ ExceptionFrame->FltF26 = Value;
+ return;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ ExceptionFrame->FltF27 = Value;
+ return;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ ExceptionFrame->FltF28 = Value;
+ return;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ ExceptionFrame->FltF29 = Value;
+ return;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ ExceptionFrame->FltF30 = Value;
+ return;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ ExceptionFrame->FltF31 = Value;
+ return;
+ }
+ }
+}
+
+VOID
+KiSetRegisterValue64 (
+ IN ULONG Register,
+ IN ULONGLONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the 64-bit value of a register in the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register < 32) {
+ (&TrapFrame->XIntZero)[Register] = Value;
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ TrapFrame->XFltF0 = Value;
+ return;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ TrapFrame->XFltF1 = Value;
+ return;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ TrapFrame->XFltF2 = Value;
+ return;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ TrapFrame->XFltF3 = Value;
+ return;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ TrapFrame->XFltF4 = Value;
+ return;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ TrapFrame->XFltF5 = Value;
+ return;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ TrapFrame->XFltF6 = Value;
+ return;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ TrapFrame->XFltF7 = Value;
+ return;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ TrapFrame->XFltF8 = Value;
+ return;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ TrapFrame->XFltF9 = Value;
+ return;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ TrapFrame->XFltF10 = Value;
+ return;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ TrapFrame->XFltF11 = Value;
+ return;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ TrapFrame->XFltF12 = Value;
+ return;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ TrapFrame->XFltF13 = Value;
+ return;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ TrapFrame->XFltF14 = Value;
+ return;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ TrapFrame->XFltF15 = Value;
+ return;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ TrapFrame->XFltF16 = Value;
+ return;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ TrapFrame->XFltF17 = Value;
+ return;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ TrapFrame->XFltF18 = Value;
+ return;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ TrapFrame->XFltF19 = Value;
+ return;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ ExceptionFrame->XFltF20 = Value;
+ return;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ TrapFrame->XFltF21 = Value;
+ return;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ ExceptionFrame->XFltF22 = Value;
+ return;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ TrapFrame->XFltF23 = Value;
+ return;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ ExceptionFrame->XFltF24 = Value;
+ return;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ TrapFrame->XFltF25 = Value;
+ return;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ ExceptionFrame->XFltF26 = Value;
+ return;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ TrapFrame->XFltF27 = Value;
+ return;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ ExceptionFrame->XFltF28 = Value;
+ return;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ TrapFrame->XFltF29 = Value;
+ return;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ ExceptionFrame->XFltF30 = Value;
+ return;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ TrapFrame->XFltF31 = Value;
+ return;
+ }
+ }
+}
diff --git a/private/ntos/ke/mips/initkr.c b/private/ntos/ke/mips/initkr.c
new file mode 100644
index 000000000..1c6f11baf
--- /dev/null
+++ b/private/ntos/ke/mips/initkr.c
@@ -0,0 +1,463 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ initkr.c
+
+Abstract:
+
+ This module contains the code to initialize the kernel data structures
+ and to initialize the idle thread, its process, and the processor control
+ block.
+
+Author:
+
+ David N. Cutler (davec) 11-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Put all code for kernel initialization in the INIT section. It will be
+// deallocated by memory management when phase 1 initialization is completed.
+//
+
+#if defined(ALLOC_PRAGMA)
+
+#pragma alloc_text(INIT, KiInitializeKernel)
+
+#endif
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function gains control after the system has been bootstrapped and
+ before the system has been initialized. Its function is to initialize
+ the kernel data structures, initialize the idle thread and process objects,
+ initialize the processor control block, call the executive initialization
+ routine, and then return to the system startup routine. This routine is
+ also called to initialize the processor specific structures when a new
+ processor is brought on line.
+
+Arguments:
+
+ Process - Supplies a pointer to a control object of type process for
+ the specified processor.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread for
+ the specified processor.
+
+ IdleStack - Supplies a pointer the base of the real kernel stack for
+ idle thread on the specified processor.
+
+ Prcb - Supplies a pointer to a processor control block for the specified
+ processor.
+
+ Number - Supplies the number of the processor that is being
+ initialized.
+
+ LoaderBlock - Supplies a pointer to the loader parameter block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+ KIRQL OldIrql;
+ PRESTART_BLOCK RestartBlock;
+
+ //
+ // Perform platform dependent processor initialization.
+ //
+
+ HalInitializeProcessor(Number);
+
+ //
+ // Save the address of the loader parameter block.
+ //
+
+ KeLoaderBlock = LoaderBlock;
+
+ //
+ // Initialize the processor block.
+ //
+
+ Prcb->MinorVersion = PRCB_MINOR_VERSION;
+ Prcb->MajorVersion = PRCB_MAJOR_VERSION;
+ Prcb->BuildType = 0;
+
+#if DBG
+
+ Prcb->BuildType |= PRCB_BUILD_DEBUG;
+
+#endif
+
+#if defined(NT_UP)
+
+ Prcb->BuildType |= PRCB_BUILD_UNIPROCESSOR;
+
+#endif
+
+ Prcb->CurrentThread = Thread;
+ Prcb->NextThread = (PKTHREAD)NULL;
+ Prcb->IdleThread = Thread;
+ Prcb->Number = Number;
+ Prcb->SetMember = 1 << Number;
+ Prcb->PcrPage = LoaderBlock->u.Mips.PcrPage;
+
+#if !defined(NT_UP)
+
+ Prcb->TargetSet = 0;
+ Prcb->WorkerRoutine = NULL;
+ Prcb->RequestSummary = 0;
+ Prcb->IpiFrozen = 0;
+
+#if NT_INST
+
+ Prcb->IpiCounts = &KiIpiCounts[Number];
+
+#endif
+
+#endif
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+
+ //
+ // Initialize DPC listhead and lock.
+ //
+
+ InitializeListHead(&Prcb->DpcListHead);
+ KeInitializeSpinLock(&Prcb->DpcLock);
+
+ //
+ // Set address of processor block.
+ //
+
+ KiProcessorBlock[Number] = Prcb;
+
+ //
+ // Set global processor architecture, level and revision. The
+ // latter two are the least common denominator on an MP system.
+ //
+
+ KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_MIPS;
+ KeFeatureBits = 0;
+ if (KeProcessorLevel == 0 ||
+ KeProcessorLevel > (USHORT)(PCR->ProcessorId >> 8)) {
+ KeProcessorLevel = (USHORT)(PCR->ProcessorId >> 8);
+ }
+
+ if (KeProcessorRevision == 0 ||
+ KeProcessorRevision > (USHORT)(PCR->ProcessorId & 0xff)) {
+ KeProcessorRevision = (USHORT)(PCR->ProcessorId & 0xff);
+ }
+
+ //
+ // Initialize the address of the bus error routines.
+ //
+
+ PCR->DataBusError = KeBusError;
+ PCR->InstructionBusError = KeBusError;
+
+ //
+ // Initialize the idle thread initial kernel stack and limit address value.
+ //
+
+ PCR->InitialStack = IdleStack;
+ PCR->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+
+ //
+ // Initialize all interrupt vectors to transfer control to the unexpected
+ // interrupt routine.
+ //
+ // N.B. This interrupt object is never actually "connected" to an interrupt
+ // vector via KeConnectInterrupt. It is initialized and then connected
+ // by simply storing the address of the dispatch code in the interrupt
+ // vector.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initial the address of the interrupt dispatch routine.
+ //
+
+ KxUnexpectedInterrupt.DispatchAddress = KiUnexpectedInterrupt;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ KxUnexpectedInterrupt.DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ //
+ // Set the default DMA I/O coherency attributes.
+ //
+
+ KiDmaIoCoherency = 0;
+
+ //
+ // Initialize the context swap spinlock.
+ //
+
+ KeInitializeSpinLock(&KiContextSwapLock);
+
+ //
+ // Sweep the data cache to make sure the dispatch code is flushed
+ // to memory on the current processor.
+ //
+
+ HalSweepDcache();
+ }
+
+ for (Index = 0; Index < MAXIMUM_VECTOR; Index += 1) {
+ PCR->InterruptRoutine[Index] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ //
+ // Initialize the profile count and interval.
+ //
+
+ PCR->ProfileCount = 0;
+ PCR->ProfileInterval = 0x200000;
+
+ //
+ // Initialize the passive release, APC, and DPC interrupt vectors.
+ //
+
+ PCR->InterruptRoutine[0] = KiPassiveRelease;
+ PCR->InterruptRoutine[APC_LEVEL] = KiApcInterrupt;
+ PCR->InterruptRoutine[DISPATCH_LEVEL] = KiDispatchInterrupt;
+ PCR->ReservedVectors = (1 << PASSIVE_LEVEL) | (1 << APC_LEVEL) |
+ (1 << DISPATCH_LEVEL) | (1 << IPI_LEVEL);
+
+ //
+ // Initialize the set member for the current processor, set IRQL to
+ // APC_LEVEL, and set the processor number.
+ //
+
+ PCR->CurrentIrql = APC_LEVEL;
+ PCR->SetMember = 1 << Number;
+ PCR->NotMember = ~PCR->SetMember;
+ PCR->Number = Number;
+
+ //
+ // Set the initial stall execution scale factor. This value will be
+ // recomputed later by the HAL.
+ //
+
+ PCR->StallScaleFactor = 50;
+
+ //
+ // Set address of process object in thread object.
+ //
+
+ Thread->ApcState.Process = Process;
+
+ //
+ // Set the appropriate member in the active processors set.
+ //
+
+ SetMember(Number, KeActiveProcessors);
+
+ //
+ // Set the number of processors based on the maximum of the current
+ // number of processors and the current processor number.
+ //
+
+ if ((Number + 1) > KeNumberProcessors) {
+ KeNumberProcessors = Number + 1;
+ }
+
+ //
+ // If the initial processor is being initialized, then initialize the
+ // per system data structures.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initialize the address of the restart block for the boot master.
+ //
+
+ Prcb->RestartBlock = SYSTEM_BLOCK->RestartBlock;
+
+ //
+ // Initialize the kernel debugger.
+ //
+
+ if (KdInitSystem(LoaderBlock, FALSE) == FALSE) {
+ KeBugCheck(PHASE0_INITIALIZATION_FAILED);
+ }
+
+ //
+ // Initialize processor block array.
+ //
+
+ for (Index = 1; Index < MAXIMUM_PROCESSORS; Index += 1) {
+ KiProcessorBlock[Index] = (PKPRCB)NULL;
+ }
+
+ //
+ // Perform architecture independent initialization.
+ //
+
+ KiInitSystem();
+
+ //
+ // Initialize idle thread process object and then set:
+ //
+ // 1. all the quantum values to the maximum possible.
+ // 2. the process in the balance set.
+ // 3. the active processor mask to the specified processor.
+ //
+
+ KeInitializeProcess(Process,
+ (KPRIORITY)0,
+ (KAFFINITY)(0xffffffff),
+ (PULONG)(PDE_BASE + ((PDE_BASE >> PDI_SHIFT - 2) & 0xffc)),
+ FALSE);
+
+ Process->ThreadQuantum = MAXCHAR;
+
+ }
+
+ //
+ // Initialize idle thread object and then set:
+ //
+ // 1. the initial kernel stack to the specified idle stack.
+ // 2. the next processor number to the specified processor.
+ // 3. the thread priority to the highest possible value.
+ // 4. the state of the thread to running.
+ // 5. the thread affinity to the specified processor.
+ // 6. the specified processor member in the process active processors
+ // set.
+ //
+
+ KeInitializeThread(Thread, (PVOID)((ULONG)IdleStack - PAGE_SIZE),
+ (PKSYSTEM_ROUTINE)NULL, (PKSTART_ROUTINE)NULL,
+ (PVOID)NULL, (PCONTEXT)NULL, (PVOID)NULL, Process);
+
+ Thread->InitialStack = IdleStack;
+ Thread->StackBase = IdleStack;
+ Thread->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+ Thread->NextProcessor = Number;
+ Thread->Priority = HIGH_PRIORITY;
+ Thread->State = Running;
+ Thread->Affinity = (KAFFINITY)(1 << Number);
+ Thread->WaitIrql = DISPATCH_LEVEL;
+
+ //
+ // If the current processor is 0, then set the appropriate bit in the
+ // active summary of the idle process.
+ //
+
+ if (Number == 0) {
+ SetMember(Number, Process->ActiveProcessors);
+ }
+
+ //
+ // Execute the executive initialization.
+ //
+
+ try {
+ ExpInitializeExecutive(Number, LoaderBlock);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ KeBugCheck (PHASE0_EXCEPTION);
+ }
+
+ //
+ // If the initial processor is being initialized, then compute the
+ // timer table reciprocal value and reset the PRCB values for the
+ // controllable DPC behavior in order to reflect any registry
+ // overrides.
+ //
+
+ if (Number == 0) {
+ KiTimeIncrementReciprocal = KiComputeReciprocal((LONG)KeMaximumIncrement,
+ &KiTimeIncrementShiftCount);
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+ }
+
+ //
+ // Raise IRQL to dispatch level and set the priority of the idle thread
+ // to zero. This will have the effect of immediately causing the phase
+ // one initialization thread to get scheduled for execution. The idle
+ // thread priority is then set to the lowest realtime priority. This is
+ // necessary so that mutexes aquired at DPC level do not cause the active
+ // matrix to get corrupted.
+ //
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeSetPriorityThread(Thread, (KPRIORITY)0);
+ Thread->Priority = LOW_REALTIME_PRIORITY;
+
+ //
+ // Raise IRQL to the highest level.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // If a restart block exists for the current process, then set boot
+ // completed.
+ //
+ // N.B. Firmware on uniprocessor machines configured for MP operation
+ // can have a restart block address of NULL.
+ //
+
+#if !defined(NT_UP)
+
+ RestartBlock = Prcb->RestartBlock;
+ if (RestartBlock != NULL) {
+ RestartBlock->BootStatus.BootFinished = 1;
+ }
+
+ //
+ // If the current processor is not 0, then set the appropriate bit in
+ // idle summary.
+ //
+
+ if (Number != 0) {
+ SetMember(Number, KiIdleSummary);
+ }
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/intobj.c b/private/ntos/ke/mips/intobj.c
new file mode 100644
index 000000000..3e97853be
--- /dev/null
+++ b/private/ntos/ke/mips/intobj.c
@@ -0,0 +1,434 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ intobj.c
+
+Abstract:
+
+ This module implements the kernel interrupt object. Functions are provided
+ to initialize, connect, and disconnect interrupt objects.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+VOID
+KeInitializeInterrupt (
+ IN PKINTERRUPT Interrupt,
+ IN PKSERVICE_ROUTINE ServiceRoutine,
+ IN PVOID ServiceContext,
+ IN PKSPIN_LOCK SpinLock OPTIONAL,
+ IN ULONG Vector,
+ IN KIRQL Irql,
+ IN KIRQL SynchronizeIrql,
+ IN KINTERRUPT_MODE InterruptMode,
+ IN BOOLEAN ShareVector,
+ IN CCHAR ProcessorNumber,
+ IN BOOLEAN FloatingSave
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel interrupt object. The service routine,
+ service context, spin lock, vector, IRQL, Synchronized IRQL, and floating
+ context save flag are initialized.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+ ServiceRoutine - Supplies a pointer to a function that is to be
+ executed when an interrupt occurs via the specified interrupt
+ vector.
+
+ ServiceContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the ServiceRoutine parameter.
+
+ SpinLock - Supplies an optional pointer to an executive spin lock.
+
+ Vector - Supplies the index of the entry in the Interrupt Dispatch Table
+ that is to be associated with the ServiceRoutine function.
+
+ Irql - Supplies the request priority of the interrupting source.
+
+ SynchronizeIrql - The request priority that the interrupt should be
+ synchronized with.
+
+ InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
+ Latched.
+
+ ShareVector - Supplies a boolean value that specifies whether the
+ vector can be shared with other interrupt objects or not. If FALSE
+ then the vector may not be shared, if TRUE it may be.
+ Latched.
+
+ ProcessorNumber - Supplies the number of the processor to which the
+ interrupt will be connected.
+
+ FloatingSave - Supplies a boolean value that determines whether the
+ floating point registers and pipe line are to be saved before calling
+ the ServiceRoutine function.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Interrupt->Type = InterruptObject;
+ Interrupt->Size = sizeof(KINTERRUPT);
+
+ //
+ // Initialize the address of the service routine, the service context,
+ // the address of the spin lock, the address of the actual spin lock
+ // that will be used, the vector number, the IRQL of the interrupting
+ // source, the Synchronized IRQL of the interrupt object, the interrupt
+ // mode, the processor number, and the floating context save flag.
+ //
+
+ Interrupt->ServiceRoutine = ServiceRoutine;
+ Interrupt->ServiceContext = ServiceContext;
+
+ if (ARGUMENT_PRESENT(SpinLock)) {
+ Interrupt->ActualLock = SpinLock;
+
+ } else {
+ Interrupt->SpinLock = 0;
+ Interrupt->ActualLock = &Interrupt->SpinLock;
+ }
+
+ Interrupt->Vector = Vector;
+ Interrupt->Irql = Irql;
+ Interrupt->SynchronizeIrql = SynchronizeIrql;
+ Interrupt->Mode = InterruptMode;
+ Interrupt->ShareVector = ShareVector;
+ Interrupt->Number = ProcessorNumber;
+ Interrupt->FloatingSave = FloatingSave;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ Interrupt->DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ KeSweepIcache(FALSE);
+
+ //
+ // Set the connected state of the interrupt object to FALSE.
+ //
+
+ Interrupt->Connected = FALSE;
+ return;
+}
+
+BOOLEAN
+KeConnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function connects an interrupt object to the interrupt vector
+ specified by the interrupt object. If the interrupt object is already
+ connected, or an attempt is made to connect to an interrupt that cannot
+ be connected, then a value of FALSE is returned. Else the specified
+ interrupt object is connected to the interrupt vector, the connected
+ state is set to TRUE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is already connected or an attempt is made to
+ connect to an interrupt vector that cannot be connected, then a value
+ of FALSE is returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ KIRQL Irql;
+ CHAR Number;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // If the interrupt object is already connected, the interrupt vector
+ // number is invalid, an attempt is being made to connect to a vector
+ // that cannot be connected, the interrupt request level is invalid,
+ // the processor number is invalid, of the interrupt vector is less
+ // than or equal to the highest level and it not equal to the specified
+ // IRQL, then do not connect the interrupt object. Else connect interrupt
+ // object to the specified vector and establish the proper interrupt
+ // dispatcher.
+ //
+
+ Connected = FALSE;
+ Irql = Interrupt->Irql;
+ Number = Interrupt->Number;
+ Vector = Interrupt->Vector;
+ if ((((Vector >= MAXIMUM_VECTOR) || (Irql > HIGH_LEVEL) ||
+ ((Vector <= HIGH_LEVEL) &&
+ ((((1 << Vector) & PCR->ReservedVectors) != 0) || (Vector != Irql))) ||
+ (Number >= KeNumberProcessors))) == FALSE) {
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the specified interrupt vector is not connected, then
+ // connect the interrupt vector to the interrupt object dispatch
+ // code, establish the dispatcher address, and set the new
+ // interrupt mode and enable masks. Else if the interrupt is
+ // already chained, then add the new interrupt object at the end
+ // of the chain. If the interrupt vector is not chained, then
+ // start a chain with the previous interrupt object at the front
+ // of the chain. The interrupt mode of all interrupt objects in
+ // a chain must be the same.
+ //
+
+ if (Interrupt->Connected == FALSE) {
+ if (PCR->InterruptRoutine[Vector] ==
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode)) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ if (Interrupt->FloatingSave != FALSE) {
+ Interrupt->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupt->Irql == Interrupt->SynchronizeIrql) {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+
+ } else {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupt->DispatchCode);
+
+ HalEnableSystemInterrupt(Vector, Irql, Interrupt->Mode);
+
+ } else {
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interrupt->Mode == Interruptx->Mode) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ KeRaiseIrql(max(Irql, (KIRQL)KiSynchIrql), &PreviousIrql);
+ if (Interruptx->DispatchAddress != KiChainedDispatch) {
+ InitializeListHead(&Interruptx->InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ }
+
+ InsertTailList(&Interruptx->InterruptListEntry,
+ &Interrupt->InterruptListEntry);
+
+ KeLowerIrql(PreviousIrql);
+ }
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+ }
+
+ //
+ // Return whether interrupt was connected to the specified vector.
+ //
+
+ return Connected;
+}
+
+BOOLEAN
+KeDisconnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function disconnects an interrupt object from the interrupt vector
+ specified by the interrupt object. If the interrupt object is not
+ connected, then a value of FALSE is returned. Else the specified interrupt
+ object is disconnected from the interrupt vector, the connected state is
+ set to FALSE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is not connected, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ PKINTERRUPT Interrupty;
+ KIRQL Irql;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Interrupt->Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the interrupt object is connected, then disconnect it from the
+ // specified vector.
+ //
+
+ Connected = Interrupt->Connected;
+ if (Connected != FALSE) {
+ Irql = Interrupt->Irql;
+ Vector = Interrupt->Vector;
+
+ //
+ // If the specified interrupt vector is not connected to the chained
+ // interrupt dispatcher, then disconnect it by setting its dispatch
+ // address to the unexpected interrupt routine. Else remove the
+ // interrupt object from the interrupt chain. If there is only
+ // one entry remaining in the list, then reestablish the dispatch
+ // address.
+ //
+
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interruptx->DispatchAddress == KiChainedDispatch) {
+ KeRaiseIrql(max(Irql, (KIRQL)KiSynchIrql), &PreviousIrql);
+ if (Interrupt == Interruptx) {
+ Interruptx = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT, InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interruptx->DispatchCode);
+ }
+
+ RemoveEntryList(&Interrupt->InterruptListEntry);
+ Interrupty = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry);
+
+ if (Interruptx == Interrupty) {
+ if (Interrupty->FloatingSave != FALSE) {
+ Interrupty->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupty->Irql == Interrupty->SynchronizeIrql) {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+
+ } else {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupty->DispatchCode);
+ }
+
+ KeLowerIrql(PreviousIrql);
+
+ } else {
+ HalDisableSystemInterrupt(Vector, Irql);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ KeSweepIcache(TRUE);
+ Interrupt->Connected = FALSE;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+
+ //
+ // Return whether interrupt was disconnected from the specified vector.
+ //
+
+ return Connected;
+}
diff --git a/private/ntos/ke/mips/services.stb b/private/ntos/ke/mips/services.stb
new file mode 100644
index 000000000..7c2f19871
--- /dev/null
+++ b/private/ntos/ke/mips/services.stb
@@ -0,0 +1,64 @@
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// sysstubs.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch stub procedures.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// User or kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+#define STUBS_BEGIN1( t )
+#define STUBS_BEGIN2( t )
+#define STUBS_BEGIN3( t )
+#define STUBS_BEGIN4( t )
+#define STUBS_BEGIN5( t )
+#define STUBS_BEGIN6( t )
+#define STUBS_BEGIN7( t )
+#define STUBS_BEGIN8( t )
+
+#define STUBS_END
+
+#define SYSSTUBS_ENTRY1( ServiceNumber, Name, NumArgs ) SYSTEM_ENTRY(Zw##Name)
+#define SYSSTUBS_ENTRY2( ServiceNumber, Name, NumArgs ) li v0, ServiceNumber
+#define SYSSTUBS_ENTRY3( ServiceNumber, Name, NumArgs ) syscall
+#define SYSSTUBS_ENTRY4( ServiceNumber, Name, NumArgs ) .end Zw##Name ;
+#define SYSSTUBS_ENTRY5( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY6( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY7( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY8( ServiceNumber, Name, NumArgs )
+
+#define USRSTUBS_ENTRY1( ServiceNumber, Name, NumArgs) SYSTEM_ENTRY(Zw##Name)
+#define USRSTUBS_ENTRY2( ServiceNumber, Name, NumArgs) ALTERNATE_ENTRY(Nt##Name)
+#define USRSTUBS_ENTRY3( ServiceNumber, Name, NumArgs) li v0, ServiceNumber
+#define USRSTUBS_ENTRY4( ServiceNumber, Name, NumArgs) syscall
+#define USRSTUBS_ENTRY5( ServiceNumber, Name, NumArgs) .end Zw##Name ;
+#define USRSTUBS_ENTRY6( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY7( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY8( ServiceNumber, Name, NumArgs)
+
+
+ STUBS_BEGIN1( "System Service Stub Procedures" )
+ STUBS_BEGIN2( "System Service Stub Procedures" )
+ STUBS_BEGIN3( "System Service Stub Procedures" )
+ STUBS_BEGIN4( "System Service Stub Procedures" )
+ STUBS_BEGIN5( "System Service Stub Procedures" )
+ STUBS_BEGIN6( "System Service Stub Procedures" )
+ STUBS_BEGIN7( "System Service Stub Procedures" )
+ STUBS_BEGIN8( "System Service Stub Procedures" )
diff --git a/private/ntos/ke/mips/sources b/private/ntos/ke/mips/sources
new file mode 100644
index 000000000..d6a83c820
--- /dev/null
+++ b/private/ntos/ke/mips/sources
@@ -0,0 +1,41 @@
+!IFNDEF USE_CRTDLL
+MIPS_OPTIONS=-nodwalign -float
+GPSIZE=32
+!ENDIF
+
+MSC_WARNING_LEVEL=/W3 /WX
+
+MIPS_SOURCES=..\mips\alignem.c \
+ ..\mips\allproc.c \
+ ..\mips\apcuser.c \
+ ..\mips\branchem.c \
+ ..\mips\buserror.c \
+ ..\mips\callback.c \
+ ..\mips\callout.s \
+ ..\mips\dmpstate.c \
+ ..\mips\exceptn.c \
+ ..\mips\floatem.c \
+ ..\mips\flush.c \
+ ..\mips\getsetrg.c \
+ ..\mips\thredini.c \
+ ..\mips\timindex.s \
+ ..\mips\xxapcint.s \
+ ..\mips\xxclock.s \
+ ..\mips\xxflshtb.c \
+ ..\mips\xxintsup.s \
+ ..\mips\xxirql.s \
+ ..\mips\xxmiscs.s \
+ ..\mips\x4mpipi.s \
+ ..\mips\xxmpipi.c \
+ ..\mips\xxregsv.s \
+ ..\mips\xxspinlk.s \
+ ..\mips\x4ctxsw.s \
+ ..\mips\sysstubs.s \
+ ..\mips\systable.s \
+ ..\mips\x4trap.s \
+ ..\mips\threadbg.s \
+ ..\mips\initkr.c \
+ ..\mips\intobj.c \
+ ..\mips\x4start.s \
+ ..\mips\x4sqrt.s \
+ ..\mips\vdm.c
diff --git a/private/ntos/ke/mips/table.stb b/private/ntos/ke/mips/table.stb
new file mode 100644
index 000000000..2890df74b
--- /dev/null
+++ b/private/ntos/ke/mips/table.stb
@@ -0,0 +1,61 @@
+4 // This is the number of in register arguments
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// systable.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch table.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+//
+// To add a system service simply add the name of the service to the below
+// table. If the system service has in memory arguments, then immediately
+// follow the name of the serice with a comma and following that the number
+// of bytes of in memory arguments, e.g. CreateObject,40.
+//
+
+#define TABLE_BEGIN1( t ) .rdata
+#define TABLE_BEGIN2( t ) .align 4
+#define TABLE_BEGIN3( t ) .globl KiServiceTable
+#define TABLE_BEGIN4( t ) KiServiceTable:
+#define TABLE_BEGIN5( t )
+#define TABLE_BEGIN6( t )
+#define TABLE_BEGIN7( t )
+#define TABLE_BEGIN8( t )
+
+#define TABLE_ENTRY( l,bias,numargs ) .word Nt##l+bias
+
+#define TABLE_END( n ) .sdata ; .globl KiServiceLimit ; KiServiceLimit: .word n + 1
+
+#define ARGTBL_BEGIN .rdata ; .align 4 ; .globl KiArgumentTable ; KiArgumentTable:
+
+#define ARGTBL_ENTRY( e0,e1,e2,e3,e4,e5,e6,e7 ) .byte e0,e1,e2,e3,e4,e5,e6,e7
+
+#define ARGTBL_END
+
+
+ TABLE_BEGIN1( "System Service Dispatch Table" )
+ TABLE_BEGIN2( "System Service Dispatch Table" )
+ TABLE_BEGIN3( "System Service Dispatch Table" )
+ TABLE_BEGIN4( "System Service Dispatch Table" )
+ TABLE_BEGIN5( "System Service Dispatch Table" )
+ TABLE_BEGIN6( "System Service Dispatch Table" )
+ TABLE_BEGIN7( "System Service Dispatch Table" )
+ TABLE_BEGIN8( "System Service Dispatch Table" )
+ \ No newline at end of file
diff --git a/private/ntos/ke/mips/threadbg.s b/private/ntos/ke/mips/threadbg.s
new file mode 100644
index 000000000..aaf0bdd49
--- /dev/null
+++ b/private/ntos/ke/mips/threadbg.s
@@ -0,0 +1,128 @@
+// TITLE("Thread Startup")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// threadbg.s
+//
+// Abstract:
+//
+// This module implements the MIPS machine dependent code necessary to
+// startup a thread in kernel mode.
+//
+// Author:
+//
+// David N. Cutler (davec) 28-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Thread Startup")
+//++
+//
+// RoutineDescription:
+//
+// The following code is never executed. It's purpose is to allow the
+// kernel debugger to walk call frames backwards through thread startup
+// and to support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiThreadDispatch, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s7
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+ sw s3,ExIntS3(sp) //
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f30
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// This routine is called at thread startup. Its function is to call the
+// initial thread procedure. If control returns from the initial thread
+// procedure and a user mode context was established when the thread
+// was initialized, then the user mode context is restored and control
+// is transfered to user mode. Otherwise a bug check will occur.
+//
+//
+// Arguments:
+//
+// s0 (saved) - Supplies a boolean value that specified whether a user
+// mode thread context was established when the thread was initialized.
+//
+// s1 (saved) - Supplies the starting context parameter for the initial
+// thread procedure.
+//
+// s2 (saved) - Supplies the starting address of the initial thread routine.
+//
+// s3 - Supplies the starting address of the initial system routine.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiThreadStartup)
+
+ lw s0,ExIntS0(sp) // get user context flag
+ lw s1,ExIntS1(sp) // get context parameter value
+ lw s2,ExIntS2(sp) // get initial routine address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ bne zero,s0,10f // if ne, user context specified
+ subu sp,sp,TrapFrameArguments // allocate argument space
+
+ .set noreorder
+ .set noat
+10: ctc1 zero,fsr // clear floating status
+ .set at
+ .set reorder
+
+ li a0,APC_LEVEL // lower IRQL to APC level
+ jal KeLowerIrql //
+ move a0,s2 // set address of thread routine
+ move a1,s1 // set startup context parameter
+ jal s3 // call system startup routine
+ beq zero,s0,20f // if eq, no user context
+
+//
+// Finish in common exception exit code which will restore the nonvolatile
+// registers and exit to user mode.
+//
+
+ j KiExceptionExit // finish in exception exit code
+
+//
+// An attempt was made to enter user mode for a thread that has no user mode
+// context. Generate a bug check.
+//
+
+20: li a0,NO_USER_MODE_CONTEXT // set bug check code
+ jal KeBugCheck // call bug check routine
+
+ .end KiThreadDispatch
diff --git a/private/ntos/ke/mips/thredini.c b/private/ntos/ke/mips/thredini.c
new file mode 100644
index 000000000..24c3a47be
--- /dev/null
+++ b/private/ntos/ke/mips/thredini.c
@@ -0,0 +1,285 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ thredini.c
+
+Abstract:
+
+ This module implements the machine dependent functions to set the initial
+ context and data alignment handling mode for a process or thread object.
+
+Author:
+
+ David N. Cutler (davec) 1-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macros are used to check that an input object is
+// really the proper type.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextRecord OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the machine dependent context of a thread object.
+
+ N.B. This function does not check the accessibility of the context record.
+ It is assumed the the caller of this routine is either prepared to
+ handle access violations or has probed and copied the context record
+ as appropriate.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextRecord - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKEXCEPTION_FRAME CxFrame;
+ PKEXCEPTION_FRAME ExFrame;
+ ULONG InitialStack;
+ PKTRAP_FRAME TrFrame;
+
+ //
+ // If a context frame is specified, then initialize a trap frame and
+ // and an exception frame with the specified user mode context.
+ //
+
+ InitialStack = (LONG)Thread->InitialStack;
+ if (ARGUMENT_PRESENT(ContextRecord)) {
+ TrFrame = (PKTRAP_FRAME)(InitialStack - sizeof(KTRAP_FRAME));
+ ExFrame = (PKEXCEPTION_FRAME)((ULONG)TrFrame - sizeof(KEXCEPTION_FRAME));
+ CxFrame = (PKEXCEPTION_FRAME)((ULONG)ExFrame - sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Zero the exception and trap frames and copy information from the
+ // specified context frame to the trap and exception frames.
+ //
+
+ RtlZeroMemory((PVOID)ExFrame, sizeof(KEXCEPTION_FRAME));
+ RtlZeroMemory((PVOID)TrFrame, sizeof(KTRAP_FRAME));
+ KeContextToKframes(TrFrame,
+ ExFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags | CONTEXT_CONTROL,
+ UserMode);
+
+ //
+ // Set the saved previous processor mode in the trap frame and the
+ // previous processor mode in the thread object to user mode.
+ //
+
+ TrFrame->PreviousMode = UserMode;
+ Thread->PreviousMode = UserMode;
+
+ //
+ // Initialize the return address in the exception frame.
+ //
+
+ ExFrame->IntRa = 0;
+
+ } else {
+ ExFrame = NULL;
+ CxFrame = (PKEXCEPTION_FRAME)(InitialStack - sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Set the previous mode in thread object to kernel.
+ //
+
+ Thread->PreviousMode = KernelMode;
+ }
+
+ //
+ // Initialize context switch frame and set thread start up parameters.
+ //
+
+ CxFrame->SwapReturn = (ULONG)KiThreadStartup;
+ if (ExFrame == NULL) {
+ CxFrame->IntS8 = (ULONG)ExFrame;
+
+ } else {
+ CxFrame->IntS8 = (ULONG)TrFrame;
+ }
+
+ CxFrame->IntS0 = (ULONG)ContextRecord;
+ CxFrame->IntS1 = (ULONG)StartContext;
+ CxFrame->IntS2 = (ULONG)StartRoutine;
+ CxFrame->IntS3 = (ULONG)SystemRoutine;
+ Thread->KernelStack = (PVOID)CxFrame;
+ return;
+}
+
+BOOLEAN
+KeSetAutoAlignmentProcess (
+ IN PRKPROCESS Process,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ process and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the process. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Process->AutoAlignment;
+ Process->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
+
+BOOLEAN
+KeSetAutoAlignmentThread (
+ IN PKTHREAD Thread,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ thread and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the thread. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Thread->AutoAlignment;
+ Thread->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
diff --git a/private/ntos/ke/mips/timindex.s b/private/ntos/ke/mips/timindex.s
new file mode 100644
index 000000000..cf00f07be
--- /dev/null
+++ b/private/ntos/ke/mips/timindex.s
@@ -0,0 +1,111 @@
+// TITLE("Compute Timer Table Index")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// timindex.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the timer table
+// index for a timer.
+//
+// Author:
+//
+// David N. Cutler (davec) 17-May-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external vsriables that can be addressed using GP.
+//
+
+ .extern KiTimeIncrementReciprocal 2 * 4
+ .extern KiTimeIncrementShiftCount 1
+
+ SBTTL("Compute Timer Table Index")
+//++
+//
+// ULONG
+// KiComputeTimerTableIndex (
+// IN LARGE_INTEGER Interval,
+// IN LARGE_INTEGER CurrentTime,
+// IN PKTIMER Timer
+// )
+//
+// Routine Description:
+//
+// This function compute the timer table index for the specified timer
+// object and stores the due time in the timer object.
+//
+// N.B. The interval parameter is guaranteed to be negative since it is
+// expressed as relative time.
+//
+// The formula for due time calculation is:
+//
+// Due Time = Current time - Interval
+//
+// The formula for the index calculation is:
+//
+// Index = (Due Time / Maximum Time) & (Table Size - 1)
+//
+// The due time division is performed using reciprocal multiplication.
+//
+// Arguments:
+//
+// Interval (a0, a1) - Supplies the relative time at which the timer is
+// to expire.
+//
+// CurrentTime (a2, a3) - Supplies the current interrupt time.
+//
+// Timer (10(sp)) - Supplies a pointer to a dispatch object of type timer.
+//
+// Return Value:
+//
+// The time table index is returned as the function value and the due
+// time is stored in the timer object.
+//
+//--
+
+ LEAF_ENTRY(KiComputeTimerTableIndex)
+
+ subu t0,a2,a0 // subtract low parts
+ subu t1,a3,a1 // subtract high parts
+ sltu t2,a2,a0 // generate borrow from high part
+ subu t1,t1,t2 // subtract borrow
+ lw a0,4 * 4(sp) // get address of timer object
+ ld t2,KiTimeIncrementReciprocal // get 64-bit magic divisor
+ dsll t0,t0,32 // isolate low 32-bits of due time
+ dsrl t0,t0,32 //
+ dsll t1,t1,32 // isolate high 32-bits of due time
+ or t3,t1,t0 // merge low and high parts of due time
+ sd t3,TiDueTime(a0) // set due time of timer object
+
+//
+// Compute the product of the due time with the magic divisor.
+//
+
+ dmultu t2,t3 // compute 128-bit product
+ lbu v1,KiTimeIncrementShiftCount // get shift count
+ mfhi v0 // get high 32-bits of product
+
+//
+// Right shift the result by the specified shift count and isolate the timer
+// table index.
+//
+
+ dsrl v0,v0,v1 // shift low half right count bits
+ and v0,v0,TIMER_TABLE_SIZE - 1 // compute index value
+ j ra // return
+
+ .end KiComputeTimerTableIndex
diff --git a/private/ntos/ke/mips/vdm.c b/private/ntos/ke/mips/vdm.c
new file mode 100644
index 000000000..a92acd9bd
--- /dev/null
+++ b/private/ntos/ke/mips/vdm.c
@@ -0,0 +1,52 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ VDM.C
+
+Abstract:
+
+ This routine has a stub for the x86 only api NtStartVdmExecution.
+
+Author:
+
+ Dave Hastings (daveh) 2 Apr 1991
+
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+NtInitializeVDM(
+ VOID
+ )
+{
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+NtVdmStartExecution (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns STATUS_NOT_IMPLEMENTED
+
+Arguments:
+
+Return Value:
+
+ STATUS_NOT_IMPLEMENTED
+--*/
+{
+
+ return STATUS_NOT_IMPLEMENTED;
+
+}
diff --git a/private/ntos/ke/mips/x4ctxsw.s b/private/ntos/ke/mips/x4ctxsw.s
new file mode 100644
index 000000000..fcda60c50
--- /dev/null
+++ b/private/ntos/ke/mips/x4ctxsw.s
@@ -0,0 +1,1497 @@
+// TITLE("Context Swap")
+//++
+//
+// Copyright (c) 1991 - 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// x4ctxswap.s
+//
+// Abstract:
+//
+// This module implements the MIPS machine dependent code necessary to
+// field the dispatch interrupt and to perform kernel initiated context
+// switching.
+//
+// Author:
+//
+// David N. Cutler (davec) 1-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+//#define _COLLECT_SWITCH_DATA_ 1
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeNumberProcessIds 4
+ .extern KeTickCount 3 * 4
+ .extern KiContextSwapLock 4
+ .extern KiDispatcherLock 4
+ .extern KiIdleSummary 4
+ .extern KiReadySummary 4
+ .extern KiSynchIrql 4
+ .extern KiWaitInListHead 2 * 4
+ .extern KiWaitOutListHead 2 * 4
+
+ SBTTL("Switch To Thread")
+//++
+//
+// NTSTATUS
+// KiSwitchToThread (
+// IN PKTHREAD NextThread
+// IN ULONG WaitReason,
+// IN ULONG WaitMode,
+// IN PKEVENT WaitObject
+// )
+//
+// Routine Description:
+//
+// This function performs an optimal switch to the specified target thread
+// if possible. No timeout is associated with the wait, thus the issuing
+// thread will wait until the wait event is signaled or an APC is deliverd.
+//
+// N.B. This routine is called with the dispatcher database locked.
+//
+// N.B. The wait IRQL is assumed to be set for the current thread and the
+// wait status is assumed to be set for the target thread.
+//
+// N.B. It is assumed that if a queue is associated with the target thread,
+// then the concurrency count has been incremented.
+//
+// N.B. Control is returned from this function with the dispatcher database
+// unlocked.
+//
+// Arguments:
+//
+// NextThread - Supplies a pointer to a dispatcher object of type thread.
+//
+// WaitReason - supplies the reason for the wait operation.
+//
+// WaitMode - Supplies the processor wait mode.
+//
+// WaitObject - Supplies a pointer to a dispatcher object of type event
+// or semaphore.
+//
+// Return Value:
+//
+// The wait completion status. A value of STATUS_SUCCESS is returned if
+// the specified object satisfied the wait. A value of STATUS_USER_APC is
+// returned if the wait was aborted to deliver a user APC to the current
+// thread.
+//--
+
+ NESTED_ENTRY(KiSwitchToThread, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+//
+// Save the wait reason, the wait mode, and the wait object address.
+//
+
+ sw a1,ExceptionFrameLength + (1 * 4)(sp) // save wait reason
+ sw a2,ExceptionFrameLength + (2 * 4)(sp) // save wait mode
+ sw a3,ExceptionFrameLength + (3 * 4)(sp) // save wait object address
+
+//
+// If the target thread's kernel stack is resident, the target thread's
+// process is in the balance set, the target thread can can run on the
+// current processor, and another thread has not already been selected
+// to run on the current processor, then do a direct dispatch to the
+// target thread bypassing all the general wait logic, thread priorities
+// permiting.
+//
+
+ lw t9,ThApcState + AsProcess(a0) // get target process address
+ lbu v0,ThKernelStackResident(a0) // get kernel stack resident
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+ lbu v1,PrState(t9) // get target process state
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ beq zero,v0,LongWay // if eq, kernel stack not resident
+ xor v1,v1,ProcessInMemory // check if process in memory
+ move s2,a0 // set target thread address
+ bne zero,v1,LongWay // if ne, process not in memory
+
+#if !defined(NT_UP)
+
+ lw t0,PbNextThread(s0) // get address of next thread
+ lbu t1,ThNextProcessor(s1) // get current processor number
+ lw t2,ThAffinity(s2) // get target thread affinity
+ lw t3,KiPcr + PcSetMember(zero) // get processor set member
+ bne zero,t0,LongWay // if ne, next thread selected
+ and t3,t3,t2 // check if for compatible affinity
+ beq zero,t3,LongWay // if eq, affinity not compatible
+
+#endif
+
+//
+// Compute the new thread priority.
+//
+
+ lbu t4,ThPriority(s1) // get client thread priority
+ lbu t5,ThPriority(s2) // get server thread priority
+ sltu v0,t4,LOW_REALTIME_PRIORITY // check if realtime client
+ sltu v1,t5,LOW_REALTIME_PRIORITY // check if realtime server
+ beq zero,v0,60f // if eq, realtime client
+ lbu t6,ThPriorityDecrement(s2) // get priority decrement value
+ lbu t7,ThBasePriority(s2) // get client base priority
+ beq zero,v1,50f // if eq, realtime server
+ addu t8,t7,1 // computed boosted priority
+ bne zero,t6,30f // if ne, server boost active
+
+//
+// Both the client and the server are not realtime and a priority boost
+// is not currently active for the server. Under these conditions an
+// optimal switch to the server can be performed if the base priority
+// of the server is above a minimum threshold or the boosted priority
+// of the server is not less than the client priority.
+//
+
+ sltu v0,t8,t4 // check if high enough boost
+ sltu v1,t8,LOW_REALTIME_PRIORITY // check if less than realtime
+ bne zero,v0,20f // if ne, boosted priority less
+ sb t8,ThPriority(s2) // asssume boosted priority is okay
+ bne zero,v1,70f // if ne, less than realtime
+ li t8,LOW_REALTIME_PRIORITY - 1 // set high server priority
+ sb t8,ThPriority(s2) //
+ b 70f //
+
+//
+// The boosted priority of the server is less than the current priority of
+// the client. If the server base priority is above the required threshold,
+// then a optimal switch to the server can be performed by temporarily
+// raising the priority of the server to that of the client.
+//
+
+20: sltu v0,t7,BASE_PRIORITY_THRESHOLD // check if above threshold
+ subu t8,t4,t7 // compute priority decrement value
+ bne zero,v0,LongWay // if ne, priority below threshold
+ li t7,ROUND_TRIP_DECREMENT_COUNT // get system decrement count value
+ sb t8,ThPriorityDecrement(s2) // set priority decrement value
+ sb t4,ThPriority(s2) // set current server priority
+ sb t7,ThDecrementCount(s2) // set server decrement count
+ b 70f //
+
+//
+// A server boost has previously been applied to the server thread. Count
+// down the decrement count to determine if another optimal server switch
+// is allowed.
+//
+
+30: lbu t8,ThDecrementCount(s2) // decrement server count value
+ subu t8,t8,1 //
+ sb t8,ThDecrementCount(s2) // store updated decrement count
+ beq zero,t8,40f // if eq, no more switches allowed
+
+//
+// Another optimal switch to the server is allowed provided that the
+// server priority is not less than the client priority.
+//
+
+ sltu v0,t5,t4 // check if server lower priority
+ beq zero,v0,70f // if eq, server not lower priority
+ b LongWay //
+
+//
+// The server has exhausted the number of times an optimal switch may
+// be performed without reducing it priority. Reduce the priority of
+// the server to its original unboosted value minus one.
+//
+
+40: sb zero,ThPriorityDecrement(s2) // clear server priority decrement
+ sb t7,ThPriority(s2) // set server priority to base
+ b LongWay //
+
+//
+// The client is not realtime and the server is realtime. An optimal switch
+// to the server can be performed.
+//
+
+50: lb t8,PrThreadQuantum(t9) // get process quantum value
+ b 65f //
+
+//
+// The client is realtime. In order for an optimal switch to occur, the
+// server must also be realtime and run at a high or equal priority.
+//
+
+60: sltu v0,t5,t4 // check if server is lower priority
+ lb t8,PrThreadQuantum(t9) // get process quantum value
+ bne zero,v0,LongWay // if ne, server is lower priority
+65: sb t8,ThQuantum(s2) // set server thread quantum
+
+//
+// Set the next processor for the server thread.
+//
+
+70: //
+
+#if !defined(NT_UP)
+
+ sb t1,ThNextProcessor(s2) // set server next processor number
+
+#endif
+
+//
+// Set the address of the wait block list in the client thread, initialization
+// the event wait block, and insert the wait block in client event wait list.
+//
+
+ addu t0,s1,EVENT_WAIT_BLOCK_OFFSET // compute wait block address
+ sw t0,ThWaitBlockList(s1) // set address of wait block list
+ sw zero,ThWaitStatus(s1) // set initial wait status
+ sw a3,WbObject(t0) // set address of wait object
+ sw t0,WbNextWaitBlock(t0) // set next wait block address
+ lui t1,WaitAny // get wait type and wait key
+ sw t1,WbWaitKey(t0) // set wait key and wait type
+ addu t1,a3,EvWaitListHead // compute wait object listhead address
+ lw t2,LsBlink(t1) // get backward link of listhead
+ addu t3,t0,WbWaitListEntry // compute wait block list entry address
+ sw t3,LsBlink(t1) // set backward link of listhead
+ sw t3,LsFlink(t2) // set forward link in last entry
+ sw t1,LsFlink(t3) // set forward link in wait entry
+ sw t2,LsBlink(t3) // set backward link in wait entry
+
+//
+// Set the client thread wait parameters, set the thread state to Waiting,
+// and insert the thread in the proper wait list.
+//
+
+ sb zero,ThAlertable(s1) // set alertable FALSE.
+ sb a1,ThWaitReason(s1) //
+ sb a2,ThWaitMode(s1) // set the wait mode
+ lb a3,ThEnableStackSwap(s1) // get kernel stack swap enable
+ lw t1,KeTickCount + 0 // get low part of tick count
+ sw t1,ThWaitTime(s1) // set thread wait time
+ li t0,Waiting // set thread state
+ sb t0,ThState(s1) //
+ la t1,KiWaitInListHead // get address of wait in listhead
+ beq zero,a2,75f // if eq, wait mode is kernel
+ beq zero,a3,75f // if eq, kernel stack swap disabled
+ sltu t0,t4,LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ bne zero,t0,76f // if ne, thread priority in range
+75: la t1,KiWaitOutListHead // get address of wait out listhead
+76: lw t2,LsBlink(t1) // get backlink of wait listhead
+ addu t3,s1,ThWaitListEntry // compute wait list entry address
+ sw t3,LsBlink(t1) // set backward link of listhead
+ sw t3,LsFlink(t2) // set forward link in last entry
+ sw t1,LsFlink(t3) // set forward link in wait entry
+ sw t2,LsBlink(t3) // set backward link in wait entry
+
+//
+// If the current thread is processing a queue entry, then attempt to
+// activate another thread that is blocked on the queue object.
+//
+// N.B. The next thread address can change if the routine to activate
+// a queue waiter is called.
+//
+
+77: lw a0,ThQueue(s1) // get queue object address
+ beq zero,a0,78f // if eq, no queue object attached
+ sw s2,PbNextThread(s0) // set next thread address
+ jal KiActivateWaiterQueue // attempt to activate a blocked thread
+ lw s2,PbNextThread(s0) // get next thread address
+ sw zero,PbNextThread(s0) // set next thread address to NULL
+78: sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal SwapContext // swap context
+
+//
+// Lower IRQL to its previous level.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ lw v0,ThWaitStatus(s2) // get wait completion status
+ lbu a0,ThWaitIrql(s2) // get original IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+//
+// If the wait was not interrupted to deliver a kernel APC, then return the
+// completion status.
+//
+
+ xor v1,v0,STATUS_KERNEL_APC // check if awakened for kernel APC
+ bne zero,v1,90f // if ne, normal wait completion
+
+//
+// Disable interrupts an attempt to acquire the dispatcher database lock.
+//
+
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu s2,KiSynchIrql // get new IRQL level
+
+79: DISABLE_INTERRUPTS(t4) // disable interrupts
+
+#if !defined(NT_UP)
+
+80: ll t0,KiDispatcherLock // get current lock value
+ move t1,s1 // set ownership value
+ bne zero,t0,85f // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,80b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to synchronization level and save wait IRQL.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t1,KiPcr + PcIrqlTable(s2) // get translation table entry value
+ li t2,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t1,t1,PSR_INTMASK // shift table entry into position
+ lbu t3,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t4,t4,t2 // clear current interrupt enables
+ or t4,t4,t1 // set new interrupt enables
+ sb s2,KiPcr + PcCurrentIrql(zero) // set new IRQL level
+
+ ENABLE_INTERRUPTS(t4) // enable interrupts
+
+ sb t3,ThWaitIrql(s1) // set client wait IRQL
+ b ContinueWait //
+
+#if !defined(NT_UP)
+
+85: ENABLE_INTERRUPTS(t4) // enable interrupts
+
+ b 79b // try again
+
+#endif
+
+//
+// Ready the target thread for execution and wait on the specified wait
+// object.
+//
+
+LongWay: //
+ jal KiReadyThread // ready thread for execution
+
+//
+// Continue the and return the wait completion status.
+//
+// N.B. The wait continuation routine is called with the dispatcher
+// database locked.
+//
+
+ContinueWait: //
+ lw a0,ExceptionFrameLength + (3 * 4)(sp) // get wait object address
+ lw a1,ExceptionFrameLength + (1 * 4)(sp) // get wait reason
+ lw a2,ExceptionFrameLength + (2 * 4)(sp) // get wait mode
+ jal KiContinueClientWait // continue client wait
+90: lw s0,ExIntS0(sp) // restore register s0 - s2
+ lw s1,ExIntS1(sp) //
+ lw s2,ExIntS2(sp) //
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiSwitchToThread
+
+ SBTTL("Unlock Dispatcher Database")
+//++
+//
+// VOID
+// KiUnlockDispatcherDatabase (
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This routine is entered at synchronization level with the dispatcher
+// database locked. Its function is to either unlock the dispatcher
+// database and return or initiate a context switch if another thread
+// has been selected for execution.
+//
+// N.B. This code merges with the following swap context code.
+//
+// N.B. A context switch CANNOT be initiated if the previous IRQL
+// is greater than or equal to DISPATCH_LEVEL.
+//
+// N.B. This routine is carefully written to be a leaf function. If,
+// however, a context swap should be performed, the routine is
+// switched to a nested fucntion.
+//
+// Arguments:
+//
+// OldIrql (a0) - Supplies the IRQL when the dispatcher database
+// lock was acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiUnlockDispatcherDatabase)
+
+//
+// Check if a thread has been scheduled to execute on the current processor.
+//
+
+ lw t0,KiPcr + PcPrcb(zero) // get address of PRCB
+ and a0,a0,0xff // isolate old IRQL
+ sltu t1,a0,DISPATCH_LEVEL // check if IRQL below dispatch level
+ lw t2,PbNextThread(t0) // get next thread address
+ bne zero,t2,30f // if ne, a new thread selected
+
+//
+// A new thread has not been selected to run on the current processor.
+// Release the dispatcher database lock and restore IRQL to its previous
+// level.
+//
+
+10: //
+
+#if !defined(NT_UP)
+
+ sw zero,KiDispatcherLock // set spin lock not owned
+
+#endif
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+//
+// A new thread has been selected to run on the current processor, but
+// the new IRQL is not below dispatch level. If the current processor is
+// not executing a DPC, then request a dispatch interrupt on the current
+// processor before releasing the dispatcher lock and restoring IRQL.
+//
+
+
+20: bne zero,t3,10b // if ne, DPC routine active
+
+#if !defined(NT_UP)
+
+ sw zero,KiDispatcherLock // set spin lock not owned
+
+#endif
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,cause // get exception cause register
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ or t3,t3,DISPATCH_INTERRUPT // set dispatch interrupt request
+ mtc0 t3,cause // set exception cause register
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+//
+// A new thread has been selected to run on the current processor.
+//
+// If the new IRQL is less than dispatch level, then switch to the new
+// thread.
+//
+// N.B. the jump to the switch to the next thread is required.
+//
+
+30: lw t3,PbDpcRoutineActive(t0) // get DPC active flag
+ beq zero,t1,20b // if eq, IRQL not below dispatch
+ j KxUnlockDispatcherDatabase //
+
+ .end KiUnlockDispatcherDataBase
+
+//
+// N.B. This routine is carefully written as a nested function. Control
+// drops into this function from above.
+//
+
+ NESTED_ENTRY(KxUnlockDispatcherDatabase, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+ move s0,t0 // set address of PRCB
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ move s2,t2 // set next thread address
+ sb a0,ThWaitIrql(s1) // save previous IRQL
+ sw zero,PbNextThread(s0) // clear next thread address
+
+//
+// Reready current thread for execution and swap context to the selected
+// thread.
+//
+// N.B. The return from the call to swap context is directly to the swap
+// thread exit.
+//
+
+ move a0,s1 // set address of previous thread object
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal KiReadyThread // reready thread for execution
+ la ra,KiSwapThreadExit // set return address
+ j SwapContext // swap context
+
+ .end KxUnlockDispatcherDatabase
+
+ SBTTL("Swap Thread")
+//++
+//
+// VOID
+// KiSwapThread (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to select and the next thread to run on the
+// current processor and to perform a context switch to the thread.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Wait completion status (v0).
+//
+//--
+
+ NESTED_ENTRY(KiSwapThread, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+ .set noreorder
+ .set noat
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+ lw t0,KiReadySummary // get ready summary
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw s2,PbNextThread(s0) // get address of next thread
+
+#if !defined(NT_UP)
+
+ lw t1,KiPcr + PcSetMember(zero) // get processor affinity mask
+ lbu v0,PbNumber(s0) // get current processor number
+ lw v1,KeTickCount + 0 // get low part of tick count
+
+#endif
+
+ srl t3,t0,16 // isolate bits <31:16> of summary
+ li t2,16 // set base bit number
+ bnel zero,s2,120f // if ne, next thread selected
+ sw zero,PbNextThread(s0) // zero address of next thread
+
+//
+// Find the highest nibble in the ready summary that contains a set bit
+// and left justify so the nibble is in bits <31:28>.
+//
+
+ bne zero,t3,10f // if ne, bits <31:16> are nonzero
+ srl t3,t3,8 // isolate bits <31:24> of summary
+ li t2,0 // set base bit number
+ srl t3,t0,8 // isolate bits <15:8> of summary
+10: bnel zero,t3,20f // if ne, bits <15:8> are nonzero
+ addu t2,t2,8 // add bit offset to nonzero byte
+20: srl t3,t0,t2 // isolate highest nonzero byte
+ addu t2,t2,3 // adjust to high bit in nibble
+ sltu t4,t3,0x10 // check if high nibble nonzero
+ xor t4,t4,1 // complement less than indicator
+ sll t4,t4,2 // multiply by nibble width
+ addu t2,t2,t4 // compute ready queue priority
+ la t3,KiDispatcherReadyListHead // get ready listhead base address
+ nor t4,t2,zero // compute left justify shift count
+ sll t4,t0,t4 // left justify ready summary to nibble
+
+//
+// If the next bit is set in the ready summary, then scan the corresponding
+// dispatcher ready queue.
+//
+
+30: bltz t4,50f // if ltz, queue contains an entry
+ sll t4,t4,1 // position next ready summary bit
+ bne zero,t4,30b // if ne, more queues to scan
+ subu t2,t2,1 // decrement ready queue priority
+
+//
+// All ready queues were scanned without finding a runnable thread so
+// default to the idle thread and set the appropriate bit in idle summary.
+//
+
+40: //
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+ la t0,KeThreadSwitchCounters // get switch counters address
+ lw v0,TwSwitchToIdle(t0) // increment switch to idle count
+ addu v0,v0,1 //
+ sw v0,TwSwitchToIdle(t0) //
+
+#endif
+
+#if defined(NT_UP)
+
+ li t0,1 // get current idle summary
+#else
+
+ lw t0,KiIdleSummary // get current idle summary
+ or t0,t0,t1 // set member bit in idle summary
+
+#endif
+
+ sw t0,KiIdleSummary // set new idle summary
+ b 120f //
+ lw s2,PbIdleThread(s0) // set address of idle thread
+
+//
+// If the thread can execute on the current processor, then remove it from
+// the dispatcher ready queue.
+//
+
+50: sll t5,t2,3 // compute ready listhead offset
+ addu t5,t5,t3 // compute ready queue address
+ lw t6,LsFlink(t5) // get address of first queue entry
+ subu s2,t6,ThWaitListEntry // compute address of thread object
+
+#if !defined(NT_UP)
+
+60: lw t7,ThAffinity(s2) // get thread affinity
+ lw t8,ThWaitTime(s2) // get time of thread ready
+ lbu t9,ThNextProcessor(s2) // get last processor number
+ and t7,t7,t1 // check for compatible thread affinity
+ bne zero,t7,70f // if ne, thread affinity compatible
+ subu t8,v1,t8 // compute length of wait
+ lw t6,LsFlink(t6) // get address of next entry
+ bne t5,t6,60b // if ne, not end of list
+ subu s2,t6,ThWaitListEntry // compute address of thread object
+ bne zero,t4,30b // if ne, more queues to scan
+ subu t2,t2,1 // decrement ready queue priority
+ b 40b //
+ nop // fill
+
+//
+// If the thread last ran on the current processor, the processor is the
+// ideal processor for the thread, the thread has been waiting for longer
+// than a quantum, ot its priority is greater than low realtime plus 9,
+// then select the thread. Otherwise, an attempt is made to find a more
+// appropriate candidate.
+//
+
+70: lbu a0,ThIdealProcessor(s2) // get ideal processor number
+ beq v0,t9,110f // if eq, last processor number match
+ sltu t7,t2,LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ beq v0,a0,100f // if eq, ideal processor number match
+ sltu t8,t8,READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ and t8,t8,t7 // check if priority and time match
+ beql zero,t8,110f // if eq, priority or time mismatch
+ sb v0,ThNextProcessor(s2) // set next processor number
+
+//
+// Search forward in the ready queue until the end of the list is reached
+// or a more appropriate thread is found.
+//
+
+ lw t7,LsFlink(t6) // get address of next entry
+80: beq t5,t7,100f // if eq, end of list
+ subu a1,t7,ThWaitListEntry // compute address of thread object
+ lw a2,ThAffinity(a1) // get thread affinity
+ lw t8,ThWaitTime(a1) // get time of thread ready
+ lbu t9,ThNextProcessor(a1) // get last processor number
+ lbu a0,ThIdealProcessor(a1) // get ideal processor number
+ and a2,a2,t1 // check for compatible thread affinity
+ subu t8,v1,t8 // compute length of wait
+ beq zero,a2,85f // if eq, thread affinity not compatible
+ sltu t8,t8,READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ beql v0,t9,90f // if eq, processor number match
+ move s2,a1 // set thread address
+ beql v0,a0,90f // if eq, processor number match
+ move s2,a1 // set thread address
+85: bne zero,t8,80b // if ne, wait time not exceeded
+ lw t7,LsFlink(t7) // get address of next entry
+ b 110f //
+ sb v0,ThNextProcessor(s2) // set next processor number
+
+90: move t6,t7 // set list entry address
+100: sb v0,ThNextProcessor(s2) // set next processor number
+ .set at
+ .set reorder
+
+110: //
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+ la v1,KeThreadSwitchCounters + TwFindIdeal// get counter address
+ lbu a0,ThIdealProcessor(s2) // get ideal processor number
+ lbu t9,ThLastprocessor(s2) // get last processor number
+ beq v0,a0,115f // if eq, processor number match
+ addu v1,v1,TwFindLast - TwFindIdeal // compute counter address
+ beq v0,t9,115f // if eq, processor number match
+ addu v1,v1,TwFindAny - TwFindLast // compute counter address
+115: lw v0,0(v1) // increment appropriate counter
+ addu v0,v0,1 //
+ sw v0,0(v1) //
+
+#endif
+
+#endif
+
+//
+// Remove the selected thread from the ready queue.
+//
+
+ lw t7,LsFlink(t6) // get list entry forward link
+ lw t8,LsBlink(t6) // get list entry backward link
+ li t1,1 // set bit for mask generation
+ sw t7,LsFlink(t8) // set forward link in previous entry
+ sw t8,LsBlink(t7) // set backward link in next entry
+ bne t7,t8,120f // if ne, list is not empty
+ sll t1,t1,t2 // compute ready summary set member
+ xor t1,t1,t0 // clear ready summary bit
+ sw t1,KiReadySummary //
+
+//
+// Swap context to the next thread.
+//
+
+ .set noreorder
+ .set noat
+120: jal SwapContext // swap context
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ .set at
+ .set reorder
+
+//
+// Lower IRQL, deallocate context frame, and return wait completion status.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. The register v0 contains the kernel APC pending state on return.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ ALTERNATE_ENTRY(KiSwapThreadExit)
+
+ lw s1,ThWaitStatus(s2) // get wait completion status
+ lbu a0,ThWaitIrql(s2) // get original wait IRQL
+ sltu v1,a0,APC_LEVEL // check if wait IRQL is zero
+ and v1,v1,v0 // check if IRQL and APC pending set
+ beq zero,v1,10f // if eq, IRQL or pending not set
+
+//
+// Lower IRQL to APC level and dispatch APC interrupt.
+//
+
+ .set noreorder
+ .set noat
+ li a0,APC_LEVEL // set new IRQL level
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ li t2,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t3,psr // get current PSR
+ mtc0 t2,psr
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ mfc0 t4,cause // get exception cause register
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ and t4,t4,DISPATCH_INTERRUPT // clear APC interrupt pending
+ mtc0 t4,cause //
+ mtc0 t3,psr // enable interrupts
+ .set at
+ .set reorder
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t1,PbApcBypassCount(t0) // increment the APC bypass count
+ addu t1,t1,1 //
+ sw t1,PbApcBypassCount(t0) // store result
+ move a0,zero // set previous mode to kernel
+ move a1,zero // set exception frame address
+ move a2,zero // set trap frame addresss
+ jal KiDeliverApc // deliver kernel mode APC
+ move a0,zero // set original wait IRQL
+
+//
+// Lower IRQL to wait level, set return status, restore registers, and
+// return.
+//
+
+ .set noreorder
+ .set noat
+10: lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ li t2,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t3,psr // get current PSR
+ mtc0 t2,psr
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ mtc0 t3,psr // enable interrupts
+ .set at
+ .set reorder
+
+ move v0,s1 // set return status
+ lw s0,ExIntS0(sp) // restore register s0 - s2
+ lw s1,ExIntS1(sp) //
+ lw s2,ExIntS2(sp) //
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiSwapThread
+
+ SBTTL("Dispatch Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at DISPATCH_LEVEL. Its function is to process the Deferred Procedure Call
+// (DPC) list, and then perform a context switch if a new thread has been
+// selected for execution on the processor.
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database unlocked. When a return to the caller finally occurs, the
+// IRQL remains at DISPATCH_LEVEL, and the dispatcher database is still
+// unlocked.
+//
+// N.B. On entry to this routine all integer registers and the volatile
+// floating registers have been saved.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to the base of a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiDispatchInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+
+//
+// Process the deferred procedure call list.
+//
+
+PollDpcList: //
+
+ DISABLE_INTERRUPTS(s1) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t0,cause // get exception cause register
+ and t0,t0,APC_INTERRUPT // clear dispatch interrupt pending
+ mtc0 t0,cause // set exception cause register
+ .set at
+ .set reorder
+
+ addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,20f // if eq, DPC list is empty
+
+//
+// Switch to interrupt stack to process the DPC list.
+//
+
+ lw t1,KiPcr + PcInterruptStack(zero) // get interrupt stack address stack
+ subu t2,t1,ExceptionFrameLength // allocate exception frame
+ sw sp,ExIntS4(t2) // save old stack pointer
+ sw zero,ExIntRa(t2) // clear return address
+ sw t1,KiPcr + PcInitialStack(zero) // set initial stack address
+ subu t1,t1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t1,KiPcr + PcStackLimit(zero) //
+ move sp,t2 // set new stack pointer
+ sw sp,KiPcr + PcOnInterruptStack(zero) // set stack indicator
+ move v0,s1 // set previous PSR value
+ jal KiRetireDpcList // process the DPC list
+
+//
+// Switch back to previous stack and restore the initial stack limit.
+//
+
+ lw t1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t2,ThInitialStack(t1) // get initial stack address
+ lw t3,ThStackLimit(t1) // get stack limit
+ lw sp,ExIntS4(sp) // restore stack pointer
+ sw t2,KiPcr + PcInitialStack(zero) // set initial stack address
+ sw t3,KiPcr + PcStackLimit(zero) // set stack limit
+ sw zero,KiPcr + PcOnInterruptStack(zero) // clear stack indicator
+
+20: ENABLE_INTERRUPTS(s1) // enable interrupts
+
+//
+// Check to determine if quantum end has occured.
+//
+// N.B. If a new thread is selected as a result of processing a quantum
+// end request, then the new thread is returned with the dispatcher
+// database locked. Otherwise, NULL is returned with the dispatcher
+// database unlocked.
+//
+
+ lw t0,KiPcr + PcQuantumEnd(zero) // get quantum end indicator
+ bne zero,t0,70f // if ne, quantum end request
+
+//
+// Check to determine if a new thread has been selected for execution on
+// this processor.
+//
+
+ lw s2,PbNextThread(s0) // get address of next thread object
+ beq zero,s2,50f // if eq, no new thread selected
+
+//
+// Disable interrupts and attempt to acquire the dispatcher database lock.
+//
+
+ lbu a0,KiSynchIrql // get new IRQL value
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+#if !defined(NT_UP)
+
+30: ll t0,KiDispatcherLock // get current lock value
+ move t1,s2 // set lock ownership value
+ bne zero,t0,60f // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,30b // if eq, store conditional failed
+
+#endif
+
+//
+// Raise IRQL to synchronization level.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+40: lw s1,KiPcr + PcCurrentThread(zero) // get current thread object address
+ lw s2,PbNextThread(s0) // get address of next thread object
+ sw zero,PbNextThread(s0) // clear address of next thread object
+
+//
+// Reready current thread for execution and swap context to the selected thread.
+//
+
+ move a0,s1 // set address of previous thread object
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal KiReadyThread // reready thread for execution
+ jal SwapContext // swap context
+
+//
+// Restore saved registers, deallocate stack frame, and return.
+//
+
+50: lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+//
+// Enable interrupts and check DPC queue.
+//
+
+#if !defined(NT_UP)
+
+60: ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ j PollDpcList //
+
+#endif
+
+//
+// Process quantum end event.
+//
+// N.B. If the quantum end code returns a NULL value, then no next thread
+// has been selected for execution. Otherwise, a next thread has been
+// selected and the dispatcher databased is locked.
+//
+
+70: sw zero,KiPcr + PcQuantumEnd(zero) // clear quantum end indicator
+ jal KiQuantumEnd // process quantum end request
+ bne zero,v0,40b // if ne, next thread selected
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiDispatchInterrupt
+
+ SBTTL("Swap Context to Next Thread")
+//++
+//
+// Routine Description:
+//
+// This routine is called to swap context from one thread to the next.
+//
+// Arguments:
+//
+// s0 - Address of Processor Control Block.
+// s1 - Address of previous thread object.
+// s2 - Address of next thread object.
+// sp - Pointer to a exception frame.
+//
+// Return value:
+//
+// v0 - Kernel APC pending.
+// s0 - Address of Processor Control Block.
+// s2 - Address of current thread object.
+//
+//--
+
+ NESTED_ENTRY(SwapContext, 0, zero)
+
+//
+// Set the thread state to running.
+//
+
+ li t0,Running // set thread state to running
+ sb t0,ThState(s2) //
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+
+10: ll t0,KiContextSwapLock // get current lock value
+ move t1,s2 // set ownership value
+ bne zero,t0,10b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,10b // if eq, store conditional failed
+ sw zero,KiDispatcherLock // set lock not owned
+
+#endif
+
+//
+// Save old thread nonvolatile context.
+//
+
+ sw ra,ExSwapReturn(sp) // save return address
+ sw s3,ExIntS3(sp) // save integer registers s3 - s8.
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+//
+// Accumlate the total time spent in a thread.
+//
+
+#if defined(PERF_DATA)
+
+ addu a0,sp,ExFltF20 // compute address of result
+ move a1,zero // set address of optional frequency
+ jal KeQueryPerformanceCounter // query performance counter
+ lw t0,ExFltF20(sp) // get current cycle count
+ lw t1,ExFltF20 + 4(sp) //
+ lw t2,PbStartCount(s0) // get starting cycle count
+ lw t3,PbStartCount + 4(s0) //
+ sw t0,PbStartCount(s0) // set starting cycle count
+ sw t1,PbStartCount + 4(s0) //
+ lw t4,EtPerformanceCountLow(s1) // get accumulated cycle count
+ lw t5,EtPerformanceCountHigh(s1) //
+ subu t6,t0,t2 // subtract low parts
+ subu t7,t1,t3 // subtract high parts
+ sltu v0,t0,t2 // generate borrow from high part
+ subu t7,t7,v0 // subtract borrow
+ addu t6,t6,t4 // add low parts
+ addu t7,t7,t5 // add high parts
+ sltu v0,t6,t4 // generate carry into high part
+ addu t7,t7,v0 // add carry
+ sw t6,EtPerformanceCountLow(s1) // set accumulated cycle count
+ sw t7,EtPerformanceCountHigh(s1) //
+
+#endif
+
+//
+// The following entry point is used to switch from the idle thread to
+// another thread.
+//
+
+ ALTERNATE_ENTRY(SwapFromIdle)
+
+#if DBG
+
+ lw t0,ThInitialStack(s1) // get initial stack address
+ lw t1,ThStackLimit(s1) // get stack limit
+ sltu t2,sp,t0 // stack within limits?
+ sltu t3,sp,t1 //
+ xor t3,t3,t2 //
+ bne zero,t3,5f // if ne, stack within limits
+ li a0,PANIC_STACK_SWITCH // set bug check code
+ move a1,t0 // set initial stack address
+ move a2,t1 // set stack limit
+ move a3,sp // set stack address
+ jal KeBugCheckEx // bug check
+
+#endif
+
+//
+// Get the old and new process object addresses.
+//
+
+5: lw s3,ThApcState + AsProcess(s2) // get new process address
+ lw s4,ThApcState + AsProcess(s1) // get old process address
+
+//
+// Save the processor state, swap stack pointers, and set the new stack
+// limits.
+//
+
+ .set noreorder
+ .set noat
+ mfc0 s7,psr // save current PSR
+ li t1,1 << PSR_CU1 // disable interrupts
+ mtc0 t1,psr // 3 cycle hazzard
+ lw t2,ThInitialStack(s2) // get new initial stack pointer
+ lw t3,ThStackLimit(s2) // get new stack limit
+ sw sp,ThKernelStack(s1) // save old kernel stack pointer
+ lw sp,ThKernelStack(s2) // get new kernel stack pointer
+ ld t1,ThTeb(s2) // get user TEB and TLS array addresses
+ sw t2,KiPcr + PcInitialStack(zero) // set initial stack pointer
+ sw t3,KiPcr + PcStackLimit(zero) // set stack limit
+ sd t1,KiPcr + PcTeb(zero) // set user TEB and TLS array addresses
+
+//
+// If the new process is not the same as the old process, then swap the
+// address space to the new process.
+//
+// N.B. The context swap lock cannot be dropped until all references to the
+// old process address space are complete. This includes any possible
+// TB Misses that could occur referencing the new address space while
+// still executing in the old address space.
+//
+// N.B. The process address space swap is executed with interrupts disabled.
+//
+
+#if defined(NT_UP)
+
+ beq s3,s4,20f // if eq, old and new process match
+
+#else
+
+ beql s3,s4,20f // if eq, old and new process match
+ sw zero,KiContextSwapLock // set spin lock not owned
+
+//
+// Update the processor set masks.
+//
+
+ lw t0,KiPcr + PcSetMember(zero) // get processor set member
+ lw t2,PrActiveProcessors(s3) // get new active processor set
+ lw t1,PrActiveProcessors(s4) // get old active processor set
+ or t2,t2,t0 // set processor member in set
+ xor t1,t1,t0 // clear processor member in set
+ sw t2,PrActiveProcessors(s3) // set new active processor set
+ sw t1,PrActiveProcessors(s4) // set old active processor set
+ sw zero,KiContextSwapLock // set spin lock not owned
+
+#endif
+
+ lw s5,PrDirectoryTableBase(s3) // get page directory PDE
+ lw s6,PrDirectoryTableBase + 4(s3) // get hyper space PDE
+ .set at
+ .set reorder
+
+//
+// Allocate a new process PID. If the new PID number is greater than the
+// number of PIDs supported on the host processor, then flush the entire
+// TB and reset the PID number ot zero.
+//
+
+ lw v1,KiPcr + PcCurrentPid(zero) // get current processor PID
+ lw t2,KeNumberProcessIds // get number of process id's
+ addu v1,v1,1 << ENTRYHI_PID // increment master system PID
+ sltu t2,v1,t2 // any more PIDs to allocate
+ bne zero,t2,10f // if ne, more PIDs to allocate
+
+//
+// Flush the random part of the TB.
+//
+
+ jal KiFlushRandomTb // flush random part of TB
+ move v1,zero // set next PID value
+
+//
+// Swap address space to the specified process.
+//
+
+10: sw v1,KiPcr + PcCurrentPid(zero) // set current processor PID
+ li t3,PDE_BASE // get virtual address of PDR
+ or t3,t3,v1 // merge process PID
+ li t4,PDR_ENTRY << INDEX_INDEX // set entry index for PDR
+
+ .set noreorder
+ .set noat
+ mtc0 t3,entryhi // set VPN2 and PID of TB entry
+ mtc0 s5,entrylo0 // set first PDE value
+ mtc0 s6,entrylo1 // set second PDE value
+ mtc0 t4,index // set index of PDR entry
+ nop // 1 cycle hazzard
+ tlbwi // write system PDR TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+//
+// If the new thread has a kernel mode APC pending, then request an APC
+// interrupt.
+//
+
+ .set noreorder
+ .set noat
+20: lbu v0,ThApcState + AsKernelApcPending(s2) // get kernel APC pending
+ mfc0 t3,cause // get cause register contents
+ sll t2,v0,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t3,t3,t2 // merge possible APC interrupt request
+ mtc0 t3,cause // write exception cause register
+ mtc0 s7,psr // set new PSR
+ .set at
+ .set reorder
+
+//
+// Update the number of context switches for the current processor and the
+// new thread and save the address of the new thread objhect in the PCR.
+//
+
+ lw t0,PbContextSwitches(s0) // increment processor context switches
+ addu t0,t0,1 //
+ sw t0,PbContextSwitches(s0) //
+ lw t1,ThContextSwitches(s2) // increment thread context switches
+ addu t1,t1,1 //
+ sw t1,ThContextSwitches(s2) //
+ sw s2,KiPcr + PcCurrentThread(zero) // set address of new thread
+
+//
+// Restore new thread nonvolatile context.
+//
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+ lw s3,ExIntS3(sp) // restore integer registers s3 - s8.
+ lw s4,ExIntS4(sp) //
+ lw s5,ExIntS5(sp) //
+ lw s6,ExIntS6(sp) //
+ lw s7,ExIntS7(sp) //
+ lw s8,ExIntS8(sp) //
+
+//
+// Set address of current thread object and return.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ lw ra,ExSwapReturn(sp) // get return address
+ j ra // return
+
+ .end SwapContext
+
+ SBTTL("Swap Process")
+//++
+//
+// BOOLEAN
+// KiSwapProcess (
+// IN PKPROCESS NewProcess,
+// IN PKPROCESS OldProcess
+// )
+//
+// Routine Description:
+//
+// This function swaps the address space from one process to another by
+// assigning a new process id, if necessary, and loading the fixed entry
+// in the TB that maps the process page directory page.
+//
+// Arguments:
+//
+// NewProcess (a0) - Supplies a pointer to a control object of type process
+// which represents the new process that is switched to.
+//
+// OldProcess (a1) - Supplies a pointer to a control object of type process
+// which represents the old process that is switched from.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SpArg: .space 4 * 4 // argument register save area
+ .space 4 * 3 // fill for alignment
+SpRa: .space 4 // saved return address
+SpFrameLength: // length of stack frame
+SpA0: .space 4 // saved argument register a0
+
+ NESTED_ENTRY(KiSwapProcess, SpFrameLength, zero)
+
+ subu sp,sp,SpFrameLength // allocate stack frame
+ sw ra,SpRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Acquire the context swap lock, clear the processor set member in he old
+// process, set the processor member in the new process, and release the
+// context swap lock.
+//
+
+#if !defined(NT_UP)
+
+10: ll t0,KiContextSwapLock // get current lock value
+ move t1,a0 // set ownership value
+ bne zero,t0,10b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,10b // if eq, store conditional failed
+ lw t0,KiPcr + PcSetMember(zero) // get processor set member
+ lw t2,PrActiveProcessors(a0) // get new active processor set
+ lw t1,PrActiveProcessors(a1) // get old active processor set
+ or t2,t2,t0 // set processor member in set
+ xor t1,t1,t0 // clear processor member in set
+ sw t2,PrActiveProcessors(a0) // set new active processor set
+ sw t1,PrActiveProcessors(a1) // set old active processor set
+ sw zero,KiContextSwapLock // clear lock value
+
+#endif
+
+//
+// Allocate a new process PID. If the new PID number is greater than the
+// number of PIDs supported on the host processor, then flush the entire
+// TB and reset the PID number ot zero.
+//
+
+ lw v1,KiPcr + PcCurrentPid(zero) // get current processor PID
+ lw t2,KeNumberProcessIds // get number of process id's
+ addu v1,v1,1 << ENTRYHI_PID // increment master system PID
+ sltu t2,v1,t2 // any more PIDs to allocate
+ bne zero,t2,15f // if ne, more PIDs to allocate
+
+//
+// Flush the random part of the TB.
+//
+
+ sw a0,SpA0(sp) // save process object address
+ jal KiFlushRandomTb // flush random part of TB
+ lw a0,SpA0(sp) // restore process object address
+ move v1,zero // set next PID value
+
+//
+// Swap address space to the specified process.
+//
+
+15: sw v1,KiPcr + PcCurrentPid(zero) // set current processor PID
+ lw t1,PrDirectoryTableBase(a0) // get page directory PDE
+ lw t2,PrDirectoryTableBase + 4(a0) // get hyper space PDE
+ li t3,PDE_BASE // get virtual address of PDR
+ or t3,t3,v1 // merge process PID
+ li t4,PDR_ENTRY << INDEX_INDEX // set entry index for PDR
+
+ DISABLE_INTERRUPTS(t5) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mtc0 t3,entryhi // set VPN2 and PID of TB entry
+ mtc0 t1,entrylo0 // set first PDE value
+ mtc0 t2,entrylo1 // set second PDE value
+ mtc0 t4,index // set index of PDR entry
+ nop // 1 cycle hazzard
+ tlbwi // write system PDR TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t5) // enable interrupts
+
+ lw ra,SpRa(sp) // restore return address
+ addu sp,sp,SpFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiSwapProcess
diff --git a/private/ntos/ke/mips/x4mpipi.s b/private/ntos/ke/mips/x4mpipi.s
new file mode 100644
index 000000000..9b5a4bc7a
--- /dev/null
+++ b/private/ntos/ke/mips/x4mpipi.s
@@ -0,0 +1,451 @@
+// TITLE("Interprocessor Interrupt support routines")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// x4mpipi.s
+//
+// Abstract:
+//
+// This module implements the MIPS specific functions required to
+// support multiprocessor systems.
+//
+// Author:
+//
+// David N. Cutler (davec) 22-Apr-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Interprocess Interrupt Processing")
+//++
+//
+// VOID
+// KeIpiInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// );
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interprocessor interrupt.
+// It's function is to process all interprocess immediate and packet
+// requests.
+//
+// Arguments:
+//
+// TrapFrame (s8) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KeIpiInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Process all interprocessor requests.
+//
+
+ jal KiIpiProcessRequests // process requests
+ andi v1,v0,IPI_FREEZE // check if freeze is requested
+ beq zero,v1,10f // if eq, no freeze requested
+
+//
+// Save the floating state.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Freeze the execution of the current processor.
+//
+
+ move a0,s8 // set address of trap frame
+ move a1,sp // set address of exception frame
+ jal KiFreezeTargetExecution // freeze current processor execution
+
+//
+// Restore the volatile floating state.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+
+10: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate exception frame
+ j ra // return
+
+ .end KeIpiInterrupt
+
+ SBTTL("Processor Request")
+//++
+//
+// ULONG
+// KiIpiProcessRequests (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This routine processes interprocessor requests and returns a summary
+// of the requests that were processed.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The request summary is returned as the function value.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+PrS0: .space 4 // saved integer register s0
+PrS1: .space 4 // saved integer register s1
+ .space 4 // fill
+PrRa: .space 4 // saved return address
+PrFrameLength: // frame length
+
+ NESTED_ENTRY(KiIpiProcessRequests, PrFrameLength, zero)
+
+ subu sp,sp,PrFrameLength // allocate exception frame
+ sw s0,PrS0(sp) // save integer register s0
+
+#if NT_INST
+
+ sw s1,PrS1(sp) // save integer register s1
+
+#endif
+
+ sw ra,PrRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Read request summary and write a zero result interlocked.
+//
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+10: lld t1,PbRequestSummary(t0) // get request summary and entry address
+ move t2,zero // set zero value for store
+ scd t2,PbRequestSummary(t0) // zero request summary
+ beq zero,t2,10b // if eq, store conditional failed
+ dsra a0,t1,32 // shift entry address to low 32-bits
+ move s0,t1 // copy request summary
+
+//
+// Check for Packet ready.
+//
+// If a packet is ready, then get the address of the requested function
+// and call the function passing the address of the packet address as a
+// parameter.
+//
+
+ and t1,s0,IPI_PACKET_READY // check for packet ready
+ beq zero,t1,20f // if eq, packet not ready
+ lw t2,PbWorkerRoutine(a0) // get address of worker function
+ lw a1,PbCurrentPacket(a0) // get request parameters
+ lw a2,PbCurrentPacket + 4(a0) //
+ lw a3,PbCurrentPacket + 8(a0) //
+ jal t2 // call work routine
+
+#if NT_INST
+
+ lw s1,PbIpiCounts(t0) // get interrupt count structure
+ lw t1,IcPacket(s1) // increment number of packet requests
+ addu t1,t1,1 //
+ sw t1,IcPacket(s1) //
+
+#endif
+
+//
+// Check for APC interrupt request.
+//
+// If an APC interrupt is requested, then request a software interrupt at
+// APC level on the current processor.
+//
+
+20: and t1,s0,IPI_APC // check for APC interrupt request
+ beq zero,t1,25f // if eq, no APC interrupt requested
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,APC_INTERRUPT // set dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+#if NT_INST
+
+ lw t1,IcAPC(s1) // increment number of APC requests
+ addu t1,t1,1 //
+ sw t1,IcAPC(s1) //
+
+#endif
+
+//
+// Check for DPC interrupt request.
+//
+// If an DPC interrupt is requested, then request a software interrupt at
+// DPC level on the current processor.
+//
+
+25: and t1,s0,IPI_DPC // check for DPC interrupt request
+ beq zero,t1,30f // if eq, no DPC interrupt requested
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,DISPATCH_INTERRUPT // set dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+#if NT_INST
+
+ lw t1,IcDPC(s1) // increment number of DPC requests
+ addu t1,t1,1 //
+ sw t1,IcDPC(s1) //
+
+#endif
+
+//
+// Set function return value, restores registers, and return.
+//
+
+30: move v0,s0 // set funtion return value
+ lw s0,PrS0(sp) // restore integer register s0
+
+#if NT_INST
+
+ and t1,v0,IPI_FREEZE // check if freeze requested
+ beq zero,t1,40f // if eq, no freeze request
+ lw t1,IcFreeze(s1) // increment number of freeze requests
+ addu t1,t1,1 //
+ sw t1,IcFreeze(s1) //
+40: lw s1,PrS1(sp) // restore integer register s1
+
+#endif
+
+ lw ra,PrRa(sp) // restore return address
+ addu sp,sp,PrFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiIpiProcessRequests
+
+ SBTTL("Send Interprocess Request")
+//++
+//
+// VOID
+// KiIpiSend (
+// IN KAFINITY TargetProcessors,
+// IN KIPI_REQUEST IpiRequest
+// );
+//
+// Routine Description:
+//
+// This routine requests the specified operation on the target set of
+// processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// IpiRequest (a1) - Supplies the request operation mask.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSend)
+
+#if !defined(NT_UP)
+
+ move v0,a0 // copy target processor set
+ la v1,KiProcessorBlock // get processor block array address
+
+//
+// Loop through the target processors and send the request to the specified
+// recipients.
+//
+
+10: and t1,v0,1 // check if target bit set
+ srl v0,v0,1 // shift out target processor
+ beq zero,t1,30f // if eq, target not specified
+ lw t1,0(v1) // get target processor block address
+20: lld t3,PbRequestSummary(t1) // get request summary of target
+ or t3,t3,a1 // merge current request with summary
+ scd t3,PbRequestSummary(t1) // store request summary and entry address
+ beq zero,t3,20b // if eq, store conditional failed
+30: add v1,v1,4 // advance to next array element
+ bne zero,v0,10b // if ne, more targets requested
+ lw t0,__imp_HalRequestIpi // request IPI interrupt on targets
+ j t0 //
+#else
+
+ j ra // return
+
+#endif
+
+ .end KiIpiSend
+
+ SBTTL("Send Interprocess Request Packet")
+//++
+//
+// VOID
+// KiIpiSendPacket (
+// IN KAFINITY TargetProcessors,
+// IN PKIPI_WORKER WorkerFunction,
+// IN PVOID Parameter1,
+// IN PVOID Parameter2,
+// IN PVOID Parameter3
+// );
+//
+// Routine Description:
+//
+// This routine executes the specified worker function on the specified
+// set of processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// WorkerFunction (a1) - Supplies the address of the worker function.
+//
+// Parameter1 - Parameter3 (a2, a3, 4 * 4(sp)) - Supplies worker
+// function specific parameters.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSendPacket)
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+ move v0,a0 // copy target processor set
+ la v1,KiProcessorBlock // get processor block array address
+
+//
+// Store function address and parameters in the packet area of the PRCB on
+// the current processor.
+//
+
+ lw t9,4 * 4(sp) // get parameter3 value
+ sw a0,PbTargetSet(t0) // set target processor set
+ sw a1,PbWorkerRoutine(t0) // set worker function address
+ sw a2,PbCurrentPacket(t0) // store worker function parameters
+ sw a3,PbCurrentPacket + 4(t0) //
+ sw t9,PbCurrentPacket + 8(t0) //
+
+//
+// Loop through the target processors and send the packet to the specified
+// recipients.
+//
+
+10: and t1,v0,1 // check if target bit set
+ srl v0,v0,1 // shift out target processor
+ beq zero,t1,30f // if eq, target not specified
+ lw t1,0(v1) // get target processor block address
+ dsll t3,t0,32 // shift entry address to upper 32-bits
+ or t3,t3,IPI_PACKET_READY // set packet ready in lower 32-bits
+20: lld t4,PbRequestSummary(t1) // get request summary of target
+ and t5,t4,IPI_PACKET_READY // check if target packet busy
+ or t4,t4,t3 // set entry address in request summary
+ bne zero,t5,20b // if ne, target packet busy
+ scd t4,PbRequestSummary(t1) // store request summary and entry address
+ beq zero,t4,20b // if eq, store conditional failed
+30: addu v1,v1,4 // advance to get array element
+ bne zero,v0,10b // if ne, more targets requested
+ lw t0,__imp_HalRequestIpi // request IPI interrupt on targets
+ j t0 //
+
+#else
+
+ j ra // return
+
+#endif
+
+ .end KiIpiSendPacket
+
+ SBTTL("Signal Packet Done")
+//++
+//
+// VOID
+// KeIpiSignalPacketDone (
+// IN PVOID SignalDone
+// );
+//
+// Routine Description:
+//
+// This routine signals that a processor has completed a packet by
+// clearing the calling processor's set member of the requesting
+// processor's packet.
+//
+// Arguments:
+//
+// SignalDone (a0) - Supplies a pointer to the processor block of the
+// sending processor.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSignalPacketDone)
+
+ lw a1,KiPcr + PcNotMember(zero) // get processor set member
+10: ll a2,PbTargetSet(a0) // get request target set
+ and a2,a2,a1 // clear processor set member
+ sc a2,PbTargetSet(a0) // store target set
+ beq zero,a2,10b // if eq, store conditional failed
+ j ra // return
+
+ .end KiIpiSignalPacketDone
diff --git a/private/ntos/ke/mips/x4sqrt.s b/private/ntos/ke/mips/x4sqrt.s
new file mode 100644
index 000000000..71061bdf8
--- /dev/null
+++ b/private/ntos/ke/mips/x4sqrt.s
@@ -0,0 +1,113 @@
+// TITLE("Square Root")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4sqrt.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the square root
+// of a denormalized value.
+//
+// Author:
+//
+// David N. Cutler (davec) 20-Apr-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Double Square Root")
+//++
+//
+// ULONG
+// KiSquareRootDouble (
+// IN PULONG DoubleValue
+// )
+//
+// Routine Description:
+//
+// This routine is called to compute the square root of a double
+// precision denormalized value.
+//
+// N.B. The denormalized value has been converted to a normalized
+// value with a exponent equal to the denormalization shift
+// count prior to calling this routine.
+//
+// Arguments:
+//
+// SingleValue (a0) - Supplies a pointer to the double denormalized
+// value.
+//
+// Return Value:
+//
+// The inexact bit is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSquareRootDouble)
+
+ ldc1 f0,0(a0) // get double value
+ cfc1 t0,fsr // get current floating status
+ and t0,t1,0x3 // isolate rounding mode
+ ctc1 t0,fsr // set current floating status
+ sqrt.d f0,f0 // compute single square root
+ cfc1 v0,fsr // get result floating status
+ srl v0,v0,2 // isolate inexact bit
+ and v0,v0,1 //
+ sdc1 f0,0(a0) // store result value
+ j ra //
+
+ .end KiSquareRootDouble
+
+ SBTTL("Single Square Root")
+//++
+//
+// ULONG
+// KiSquareRootSingle (
+// IN PULONG SingleValue
+// )
+//
+// Routine Description:
+//
+// This routine is called to compute the square root of a single
+// precision denormalized value.
+//
+// N.B. The denormalized value has been converted to a normalized
+// value with a exponent equal to the denormalization shift
+// count prior to calling this routine.
+//
+// Arguments:
+//
+// SingleValue (a0) - Supplies a pointer to the single denormalized
+// value.
+//
+// Return Value:
+//
+// The inexact bit is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSquareRootSingle)
+
+ lwc1 f0,0(a0) // get single value
+ cfc1 t0,fsr // get current floating status
+ and t0,t1,0x3 // isolate rounding mode
+ ctc1 t0,fsr // set current floating status
+ sqrt.s f0,f0 // compute single square root
+ cfc1 v0,fsr // get result floating status
+ srl v0,v0,2 // isolate inexact bit
+ and v0,v0,1 //
+ swc1 f0,0(a0) // store result value
+ j ra //
+
+ .end KiSquareRootSingle
diff --git a/private/ntos/ke/mips/x4start.s b/private/ntos/ke/mips/x4start.s
new file mode 100644
index 000000000..842e9056f
--- /dev/null
+++ b/private/ntos/ke/mips/x4start.s
@@ -0,0 +1,968 @@
+// TITLE("System Initialization")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4start.s
+//
+// Abstract:
+//
+// This module implements the code necessary to initially startup the
+// NT system.
+//
+// Author:
+//
+// David N. Cutler (davec) 5-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KdDebuggerEnabled 1
+ .extern KeNumberProcessIds 4
+ .extern KeNumberProcessors 1
+ .extern KeNumberTbEntries 4
+ .extern KiBarrierWait 4
+ .extern KiContextSwapLock 4
+ .extern KiDispatcherLock 4
+ .extern KiSynchIrql 4
+
+ SBTTL("System Initialization")
+//++
+//
+// Routine Description:
+//
+// This routine is called when the NT system begins execution.
+// Its function is to initialize system hardware state, call the
+// kernel initialization routine, and then fall into code that
+// represents the idle thread for all processors.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the loader parameter block.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SsArgA0:.space 4 // process address argument (a0)
+SsArgA1:.space 4 // thread address argument (a1)
+SsArgA2:.space 4 // idle stack argument (a2)
+SsArgA3:.space 4 // processor block address argument (a3)
+SsPrNum:.space 4 // processor number argument
+SsLdPrm:.space 4 // loader parameter block address
+SsPte: .space 2 * 4 // Pte values
+ .space 4 // fill
+SsRa: .space 4 // saved return address
+
+SsFrameLength: // length of stack frame
+
+ NESTED_ENTRY_S(KiSystemBegin, SsFrameLength, zero, INIT)
+
+ subu sp,sp,SsFrameLength // allocate stack frame
+ sw ra,SsRa(sp) // save return address
+
+ PROLOGUE_END
+
+ ALTERNATE_ENTRY(KiInitializeSystem)
+
+ lw sp,LpbKernelStack(a0) // get address of idle thread stack
+ subu sp,sp,SsFrameLength // allocate stack frame
+ lw gp,LpbGpBase(a0) // get global pointer base address
+ sw zero,SsRa(sp) // zero return address
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+//
+// Get page frame numbers for the PCR and PDR pages that were allocated by
+// the OS loader.
+//
+
+ lw s0,LpbPdrPage(a0) // set PDR page number
+ lw s1,LpbPcrPage(a0) // set PCR page number
+ move s2,a0 // save loader parameter block address
+ lw s3,LpbPrcb(s2) // get processor block address
+ lbu s3,PbNumber(s3) // get processor number
+ lw s6,LpbPcrPage2(a0) // set second PCR page
+
+//
+// Initialize the configuration, context, page mask, watch, and wired
+// registers.
+//
+// N.B. The base virtual address of the page table pages is left shift by
+// one because of the way VPN2 in inserted into the context register
+// when a TB miss occurs. The TB miss routine right arithmetic shifts
+// the address by one to obtain the real virtual address. Note that it
+// is assumed that bits <31:30> of PTE_BASE are set.
+//
+
+ li t0,PTE_BASE << 1 // set base virtual page table address
+ li t1,FIXED_ENTRIES // set number of fixed TB entries
+ li t2,0xf000 // set frame mask register value
+
+ .set noreorder
+ .set noat
+ mfc0 s7,config // get processor configuration
+ mfc0 s8,prid // get processor id
+ mtc0 t0,context // initialize the context register
+ mtc0 zero,pagemask // initialize the page mask register
+ mtc0 zero,taglo // initialize the tag low register
+ mtc0 zero,watchlo // initialize the watch address register
+ mtc0 zero,watchhi //
+ mtc0 t1,wired // initialize the wired register
+ and s4,s7,0x7 // isolate KSEG0 cache policy
+ and t3,s8,0xff00 // isolate processor id
+ xor t3,t3,0x900 // check if r10000 processor
+ bne zero,t3,5f // if ne, not r10000 processor
+ sll s5,s4,ENTRYLO_C // shift cache policy into position
+ mtc0 t2,framemask // set frame mask register
+ .set at
+ .set reorder
+
+//
+// Clear the translation buffer.
+//
+
+5: bne zero,s3,20f // if ne, not processor zero
+ li t0,48 // set number of TB entries for r4x00
+ and t1,s8,0xff00 // isolate processor id
+ xor t1,t1,0x900 // check if r10000 processor
+ bne zero,t1,10f // if ne, not r10000 processor
+ li t0,64 // set number of TB entries for r10000
+10: sw t0,KeNumberTbEntries // store number of TB entries
+ li t0,256 // set number of process id's
+ sw t0,KeNumberProcessIds //
+20: jal KiFlushFixedTb // flush fixed TB entries
+ jal KiFlushRandomTb // flush random TB entries
+
+//
+// Initialize fixed entries that map the PCR into system and user space.
+//
+
+ sll t0,s6,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t0,t0,1 << ENTRYLO_V //
+ or t0,t0,1 << ENTRYLO_D //
+ or t0,t0,s5 //
+ sll t1,s1,ENTRYLO_PFN // shift PFN into position
+ or t1,t1,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t1,t1,1 << ENTRYLO_V //
+ or t1,t1,1 << ENTRYLO_D //
+ or t1,t1,s5 //
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,KiPcr & ~(1 << PAGE_SHIFT) // set virtual address/2 of PCR
+ li a2,PCR_ENTRY // set index of system PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+
+ sll t0,s6,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t0,t0,1 << ENTRYLO_V //
+ or t0,t0,s5 //
+ sll t1,s1,ENTRYLO_PFN // shift PFN into position
+ or t1,t1,1 << ENTRYLO_G // set G, V, and cache policy
+ or t1,t1,1 << ENTRYLO_V //
+ or t1,t1,s5 //
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,UsPcr & ~(1 << PAGE_SHIFT) // set virtual address/2 of PCR
+ li a2,PCR_ENTRY + 1 // set index of user PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+
+//
+// Set the cache policy for cached memory.
+//
+
+ li t1,KiPcr // get PCR address
+ sw s4,PcCachePolicy(t1) // set cache policy for cached memory
+ sw s5,PcAlignedCachePolicy(t1) //
+
+//
+// Set the first level data and instruction cache fill size and size.
+//
+
+ lw t2,LpbFirstLevelDcacheSize(s2) //
+ sw t2,PcFirstLevelDcacheSize(t1) //
+ lw t2,LpbFirstLevelDcacheFillSize(s2) //
+ sw t2,PcFirstLevelDcacheFillSize(t1) //
+ lw t2,LpbFirstLevelIcacheSize(s2) //
+ sw t2,PcFirstLevelIcacheSize(t1) //
+ lw t2,LpbFirstLevelIcacheFillSize(s2) //
+ sw t2,PcFirstLevelIcacheFillSize(t1) //
+
+//
+// Set the second level data and instruction cache fill size and size.
+//
+
+ lw t2,LpbSecondLevelDcacheSize(s2) //
+ sw t2,PcSecondLevelDcacheSize(t1) //
+ lw t2,LpbSecondLevelDcacheFillSize(s2) //
+ sw t2,PcSecondLevelDcacheFillSize(t1) //
+ lw t2,LpbSecondLevelIcacheSize(s2) //
+ sw t2,PcSecondLevelIcacheSize(t1) //
+ lw t2,LpbSecondLevelIcacheFillSize(s2) //
+ sw t2,PcSecondLevelIcacheFillSize(t1) //
+
+//
+// Set the data cache fill size and alignment values.
+//
+
+ lw t2,PcSecondLevelDcacheSize(t1) // get second level dcache size
+ lw t3,PcSecondLevelDcacheFillSize(t1) // get second level fill size
+ bne zero,t2,30f // if ne, second level cache present
+ lw t3,PcFirstLevelDcacheFillSize(t1) // get first level fill size
+30: subu t4,t3,1 // compute dcache alignment value
+ sw t3,PcDcacheFillSize(t1) // set dcache fill size
+ sw t4,PcDcacheAlignment(t1) // set dcache alignment value
+
+//
+// Set the instruction cache fill size and alignment values.
+//
+
+ lw t2,PcSecondLevelIcacheSize(t1) // get second level icache size
+ lw t3,PcSecondLevelIcacheFillSize(t1) // get second level fill size
+ bne zero,t2,40f // if ne, second level cache present
+ lw t3,PcFirstLevelIcacheFillSize(t1) // get first level fill size
+40: subu t4,t3,1 // compute icache alignment value
+ sw t3,PcIcacheFillSize(t1) // set icache fill size
+ sw t4,PcIcacheAlignment(t1) // set icache alignment value
+
+//
+// Sweep the data and instruction caches.
+//
+
+ jal HalSweepIcache // sweep the instruction cache
+ jal HalSweepDcache // sweep the data cache
+
+//
+// Initialize the fixed entries that map the PDR pages.
+//
+
+ sll t0,s0,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_V // set V, D, and cache policy
+ or t0,t0,1 << ENTRYLO_D //
+ or t0,t0,s5 //
+ addu t1,t0,1 << ENTRYLO_PFN // compute PTE for second PDR page
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,PDE_BASE // set system virtual address/2 of PDR
+ li a2,PDR_ENTRY // set index of system PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+ li t2,PDE_BASE // set virtual address of PDR
+ lw t0,SsPte(sp) // get first PTE value
+ lw t1,SsPte + 4(sp) // get second PTE value
+ sw t0,((PDE_BASE >> (PDI_SHIFT - 2)) & 0xffc)(t2) // set recursive PDE
+ sw t1,((PDE_BASE >> (PDI_SHIFT - 2)) & 0xffc) + 4(t2) // set hyper PDE
+
+//
+// Initialize the Processor Control Registers (PCR).
+//
+
+ li t1,KiPcr // get PCR address
+
+//
+// Initialize the minor and major version numbers.
+//
+
+ li t2,PCR_MINOR_VERSION // set minor version number
+ sh t2,PcMinorVersion(t1) //
+ li t2,PCR_MAJOR_VERSION // set major version number
+ sh t2,PcMajorVersion(t1) //
+
+//
+// Set address of processor block.
+//
+
+ lw t2,LpbPrcb(s2) // set processor block address
+ sw t2,PcPrcb(t1) //
+
+//
+// Initialize the routine addresses in the exception dispatch table.
+//
+
+ la t2,KiInvalidException // set address of invalid exception
+ li t3,XCODE_VECTOR_LENGTH // set length of dispatch vector
+ la t4,PcXcodeDispatch(t1) // compute address of dispatch vector
+50: sw t2,0(t4) // fill dispatch vector
+ subu t3,t3,1 // decrement number of entries
+ addu t4,t4,4 // advance to next vector entry
+ bgtz t3,50b // if gtz, more to fill
+
+ la t2,KiInterruptException // Initialize exception dispatch table
+ sw t2,PcXcodeDispatch + XCODE_INTERRUPT(t1) //
+ la t2,KiModifyException //
+ sw t2,PcXcodeDispatch + XCODE_MODIFY(t1) //
+ la t2,KiReadMissException // set read miss address for r4x00
+ and t3,s8,0xff00 // isolate processor id
+ xor t3,t3,0x900 // check if r10000 processor
+ bne zero,t3,55f // if ne, not r10000 processor
+ la t2,KiReadMissException9.x // set read miss address for r10000
+55: sw t2,PcXcodeDispatch + XCODE_READ_MISS(t1) //
+ la t2,KiWriteMissException //
+ sw t2,PcXcodeDispatch + XCODE_WRITE_MISS(t1) //
+ la t2,KiReadAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_READ_ADDRESS_ERROR(t1) //
+ la t2,KiWriteAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_WRITE_ADDRESS_ERROR(t1) //
+ la t2,KiInstructionBusErrorException //
+ sw t2,PcXcodeDispatch + XCODE_INSTRUCTION_BUS_ERROR(t1) //
+ la t2,KiDataBusErrorException //
+ sw t2,PcXcodeDispatch + XCODE_DATA_BUS_ERROR(t1) //
+ la t2,KiSystemServiceException //
+ sw t2,PcXcodeDispatch + XCODE_SYSTEM_CALL(t1) //
+ la t2,KiBreakpointException //
+ sw t2,PcXcodeDispatch + XCODE_BREAKPOINT(t1) //
+ la t2,KiIllegalInstructionException //
+ sw t2,PcXcodeDispatch + XCODE_ILLEGAL_INSTRUCTION(t1) //
+ la t2,KiCoprocessorUnusableException //
+ sw t2,PcXcodeDispatch + XCODE_COPROCESSOR_UNUSABLE(t1) //
+ la t2,KiIntegerOverflowException //
+ sw t2,PcXcodeDispatch + XCODE_INTEGER_OVERFLOW(t1) //
+ la t2,KiTrapException //
+ sw t2,PcXcodeDispatch + XCODE_TRAP(t1) //
+ la t2,KiInstructionCoherencyException //
+ sw t2,PcXcodeDispatch + XCODE_VIRTUAL_INSTRUCTION(t1) //
+ la t2,KiFloatingException //
+ sw t2,PcXcodeDispatch + XCODE_FLOATING_EXCEPTION(t1) //
+ la t2,KiUserAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_INVALID_USER_ADDRESS(t1)
+ la t2,KiPanicException //
+ sw t2,PcXcodeDispatch + XCODE_PANIC(t1) //
+ la t2,KiDataCoherencyException //
+ sw t2,PcXcodeDispatch + XCODE_VIRTUAL_DATA(t1) //
+
+//
+// Initialize the addresses of various data structures that are referenced
+// from the exception and interrupt handling code.
+//
+// N.B. The panic stack is a separate stack that is used when the current
+// kernel stack overlfows.
+//
+// N.B. The interrupt stack is a separate stack and is used to process all
+// interrupts that run at IRQL 3 and above.
+//
+
+ lw t2,LpbKernelStack(s2) // set initial stack address
+ sw t2,PcInitialStack(t1) //
+ lw t2,LpbPanicStack(s2) // set panic stack address
+ sw t2,PcPanicStack(t1) //
+ lw t2,LpbInterruptStack(s2) // set interrupt stack address
+ sw t2,PcInterruptStack(t1) //
+ sw gp,PcSystemGp(t1) // set system global pointer address
+ lw t2,LpbThread(s2) // set current thread address
+ sw t2,PcCurrentThread(t1) //
+
+//
+// Set current IRQL to highest value.
+//
+
+ li t2,HIGH_LEVEL // set current IRQL
+ sb t2,PcCurrentIrql(t1) //
+
+//
+// Set processor id and configuration.
+//
+
+ sw s7,PcSystemReserved(t1) // save processor configuration
+ sw s8,PcProcessorId(t1) // save processor id
+
+//
+// Clear floating status and zero the count and compare registers.
+//
+
+ .set noreorder
+ .set noat
+ ctc1 zero,fsr // clear floating status
+ mtc0 zero,count // initialize the count register
+ mtc0 zero,compare // initialize the compare register
+ .set at
+ .set reorder
+
+//
+// Set system dispatch address limits used by get and set context.
+//
+
+ la t2,KiSystemServiceDispatchStart // set starting address of range
+ sw t2,PcSystemServiceDispatchStart(t1) //
+ la t2,KiSystemServiceDispatchEnd // set ending address of range
+ sw t2,PcSystemServiceDispatchEnd(t1) //
+
+//
+// Copy the TB miss, XTB miss, cache parity, and general exception handlers to
+// low memory.
+//
+
+ bne zero,s3,100f // if ne, not processor zero
+
+//
+// Copy TB Miss Handler.
+//
+
+ la t2,KiTbMissStartAddress2.x // get user TB miss start address
+ la t3,KiTbMissEndAddress3.x // get user TB miss end address
+ and a0,s8,0xfff0 // isolate id and major chip version
+ xor a0,a0,0x420 // test if id 4 and version 2.0 chip
+ beq zero,a0,60f // if eq, version 2.0 chip
+ la t2,KiTbMissStartAddress3.x // get user TB miss start address
+ and a0,s8,0xff00 // isolate processor id
+ xor a0,a0,0x900 // check if r10000 processor
+ bne zero,a0,60f // if ne, not r10000 processor
+ la t2,KiTbMissStartAddress9.x // get user TB miss start address
+ la t3,KiTbMissEndAddress9.x // get user TB miss end address
+60: li t4,KSEG0_BASE // get copy address
+70: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,70b // if ne, more to copy
+
+//
+// Copy XTB Miss Handler.
+//
+
+ la t2,KiXTbMissStartAddress2.x // get user TB miss start address
+ la t3,KiXTbMissEndAddress3.x // get user TB miss end address
+ and a0,s8,0xfff0 // isolate id and major chip version
+ xor a0,a0,0x420 // test if id 4 and version 2.0 chip
+ beq zero,a0,73f // if eq, version 2.0 chip
+ la t2,KiXTbMissStartAddress3.x // get user TB miss start address
+ and a0,s8,0xff00 // isolate processor id
+ xor a0,a0,0x900 // check if r10000 processor
+ bne zero,a0,73f // if ne, not r10000 processor
+ la t2,KiXTbMissStartAddress9.x // get user TB miss start address
+ la t3,KiXTbMissEndAddress9.x // get user TB miss end address
+73: li t4,KSEG0_BASE + 0x80 // get copy address
+77: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,77b // if ne, more to copy
+
+//
+// Copy Cache Error Handler.
+//
+
+ la t2,KiCacheErrorStartAddress // get cache error start address
+ la t3,KiCacheErrorEndAddress // get cache error end address
+ li t4,KSEG1_BASE + 0x100 // get copy address
+80: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,80b // if ne, more to copy
+
+//
+// Copy General Exception Handler.
+//
+
+ la t2,KiGeneralExceptionStartAddress // get general exception start address
+ la t3,KiGeneralExceptionEndAddress // get general exception end address
+ li t4,KSEG0_BASE + 0x180 // get copy address
+90: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,90b // if ne, more to copy
+
+//
+// Set the default cache error routine address.
+//
+
+ la t0,SOFT_RESET_VECTOR // get soft reset vector address
+ la t1,CACHE_ERROR_VECTOR // get cache error vector address
+ sw t0,0(t1) // set default cache error routine
+
+//
+// Sweep the data and instruction caches.
+//
+
+100: jal HalSweepIcache // sweep the instruction cache
+ jal HalSweepDcache // sweep the data cache
+
+// ****** temp ******
+//
+// Setup watch registers to catch write to location 0.
+//
+// ****** temp ******
+
+// .set noreorder
+// .set noat
+// li t0,1 // set to watch writes to location 0
+// mtc0 t0,watchlo //
+// mtc0 zero,watchhi //
+// .set at
+// .set reorder
+
+//
+// Setup arguments and call kernel initialization routine.
+//
+
+ lw s0,LpbProcess(s2) // get idle process address
+ lw s1,LpbThread(s2) // get idle thread address
+ move a0,s0 // set idle process address
+ move a1,s1 // set idle thread address
+ lw a2,LpbKernelStack(s2) // set idle thread stack address
+ lw a3,LpbPrcb(s2) // get processor block address
+ sw s3,SsPrNum(sp) // set processor number
+ sw s2,SsLdPrm(sp) // set loader parameter block address
+ jal KiInitializeKernel // initialize system data structures
+
+//
+// Control is returned to the idle thread with IRQL at HIGH_LEVEL. Lower IRQL
+// to DISPATCH_LEVEL, set wait IRQL of idle thread, load global register values,
+// and enter idle loop.
+//
+
+ move s7,s3 // set processor number
+ lw s0,KiPcr + PcPrcb(zero) // get processor control block address
+ addu s3,s0,PbDpcListHead // compute DPC listhead address
+ li a0,DISPATCH_LEVEL // get dispatch level IRQL
+ sb a0,ThWaitIrql(s1) // set wait IRQL of idle thread
+ jal KeLowerIrql // lower IRQL
+
+ DISABLE_INTERRUPTS(s8) // disable interrupts
+
+ or s8,s8,1 << PSR_IE // set interrupt enable bit set
+ subu s6,s8,1 << PSR_IE // clear interrupt enable bit
+
+ ENABLE_INTERRUPTS(s8) // enable interrupts
+
+ move s4,zero // clear breakin loop counter
+ lbu a0,KiSynchIrql // get new IRQL value
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and s5,s8,t1 // clear current interrupt enables
+ or s5,s5,t0 // set new interrupt enables
+
+//
+// In a multiprocessor system the boot processor proceeds directly into
+// the idle loop. As other processors start executing, however, they do
+// not directly enter the idle loop and spin until all processors have
+// been started and the boot master allows them to proceed.
+//
+
+#if !defined(NT_UP)
+
+110: lw t0,KiBarrierWait // get the current barrier wait value
+ bne zero,t0,110b // if ne, spin until allowed to proceed
+ lbu t1,KiPcr + PcNumber(zero) // get current processor number
+ beq zero,t1,120f // if eq, processor zero
+ jal HalAllProcessorsStarted // perform platform specific operations
+ bne zero,v0,120f // if ne, initialization succeeded
+ li a0,HAL1_INITIALIZATION_FAILED // set bug check reason
+ jal KeBugCheck // bug check
+
+#endif
+
+//
+// Allocate an exception frame and store the nonvolatile register and
+// return address in the frame so when a context switch from the idle
+// thread to another thread occurs, context does not have to be saved
+// and the special swtich from idle entry pointer in the context swap
+// code can be called.
+//
+// Registers s0 - s8 have the following contents:
+//
+// s0 - Address of the current processor block.
+// s1 - Not used.
+// s2 - Not used.
+// s3 - Address of DPC listhead for current processor.
+// s4 - Debugger breakin poll counter.
+// s5 - Saved PSR with interrupt enabled and IRQL of synchronization level.
+// s6 - Saved PSR with interrupts disabled and an IRQL of DISPATCH_LEVEL.
+// s7 - Number of the current processor.
+// s8 - Saved PSR with interrupt enabled and IRQL of DISPATCH_LEVEL.
+//
+
+120: subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw s3,ExIntS3(sp) // save register s3 - s8
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ la ra,KiIdleLoop // set address of swap return
+ sw ra,ExSwapReturn(sp) //
+ j KiIdleLoop //
+
+ .end KiSystemBegin
+
+//
+// The following code represents the idle thread for a processor. The idle
+// thread executes at IRQL DISPATCH_LEVEL and continually polls for work to
+// do. Control may be given to this loop either as a result of a return from
+// the system initialize routine or as the result of starting up another
+// processor in a multiprocessor configuration.
+//
+
+ LEAF_ENTRY(KiIdleLoop)
+
+#if DBG
+
+ move s4,zero // clear breakin loop counter
+
+#endif
+
+//
+// Lower IRQL to DISPATCH_LEVEL and enable interrupts.
+//
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ li a0,DISPATCH_LEVEL // get new IRQL value
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(s8) // enable interrupts
+
+//
+// Check if the debugger is enabled, the current processor is zero, and
+// whether it is time to poll for a debugger breakin.
+//
+
+KiIdleTop: //
+
+#if DBG
+
+#if !defined(NT_UP)
+
+ bne zero,s7,CheckDpcList // if ne, not processor zero
+
+#endif
+
+ subu s4,s4,1 // decrement poll counter
+ bgtz s4,CheckDpcList // if gtz, then not time to poll
+ lbu t0,KdDebuggerEnabled // check if debugger is enabled
+ li s4,200 * 1000 // set breakin loop counter
+ beq zero,t0,CheckDpcList // if eq, debugger not enabled
+ jal KdPollBreakIn // check if breakin is requested
+ beq zero,v0,CheckDpcList // if eq, no breakin requested
+ li a0,DBG_STATUS_CONTROL_C // break in and send
+ jal DbgBreakPointWithStatus // status to the debugger
+
+#endif
+
+//
+// Enable interrupts to allow any outstanding interrupts to occur, then
+// disable interrupts and check if there is any work in the DPC list of
+// the current processor.
+//
+
+CheckDpcList: //
+
+//
+// N.B. The following code enables interrupts for a few cycles, then
+// disables them again for the subsequent DPC and next thread
+// checks.
+//
+
+ .set noreorder
+ .set noat
+ mtc0 s8,psr // enable interrupts
+ nop //
+ nop //
+ nop //
+ nop // allow interrupts to occur
+ nop //
+ mtc0 s6,psr // disable interrupts
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+//
+// Process the deferred procedure call list for the current processor.
+//
+
+ lw a0,LsFlink(s3) // get address of next entry
+ beq a0,s3,CheckNextThread // if eq, DPC list is empty
+
+ .set noreorder
+ .set noat
+ mfc0 t0,cause // get exception cause register
+ and t0,t0,APC_INTERRUPT // clear dispatch interrupt pending
+ mtc0 t0,cause // set exception cause register
+ .set at
+ .set reorder
+
+ move v0,s8 // set previous PSR value
+ jal KiRetireDpcList // process the DPC list
+
+#if DBG
+
+ move s4,zero // clear breakin loop counter
+
+#endif
+
+//
+// Check if a thread has been selected to run on the current processor.
+//
+
+CheckNextThread: //
+ lw s2,PbNextThread(s0) // get address of next thread object
+ beq zero,s2,20f // if eq, no thread selected
+
+//
+// A thread has been selected for execution on this processor. Acquire
+// dispatcher database lock, get the thread address again (it may have
+// changed), clear the address of the next thread in the processor block,
+// and call swap context to start execution of the selected thread.
+//
+// N.B. If the dispatcher database lock cannot be obtained immediately,
+// then attempt to process another DPC rather than spinning on the
+// dispatcher database lock.
+//
+
+ lbu a0,KiSynchIrql // get new IRQL value
+
+#if !defined(NT_UP)
+
+10: ll t0,KiDispatcherLock // get current lock value
+ move t1,s2 // set lock ownership value
+ bne zero,t0,CheckDpcList // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+//
+// Raise IRQL to synchronization level and enable interrupts.
+//
+
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(s5) // enable interrupts
+
+ lw s1,PbCurrentThread(s0) // get address of current thread
+ lw s2,PbNextThread(s0) // get address of next thread object
+ sw zero,PbNextThread(s0) // clear next thread address
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+
+//
+// Set the thread state to running.
+//
+
+ li t0,Running // set thread state to running
+ sb t0,ThState(s2) //
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock. In
+// this case the old process is the system process, but the context swap
+// code releases the context swap lock so it must be acquired.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+
+15: ll t0,KiContextSwapLock // get current lock value
+ move t1,s2 // set ownership value
+ bne zero,t0,15b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,15b // if eq, store conditional failed
+ sw zero,KiDispatcherLock // set lock not owned
+
+#endif
+
+ j SwapFromIdle // swap context to new thread
+
+//
+// There are no entries in the DPC list and a thread has not been selected
+// for excuttion on this processor. Call the HAL so power managment can be
+// performed.
+//
+// N.B. The HAL is called with interrupts disabled. The HAL will return
+// with interrupts enabled.
+//
+
+20: la ra,KiIdleTop // set return address
+ j HalProcessorIdle // notify HAL of idle state
+
+ .end KiIdleLoop
+
+ SBTTL("Retire Deferred Procedure Call List")
+//++
+//
+// Routine Description:
+//
+// This routine is called to retire the specified deferred procedure
+// call list. DPC routines are called using the idle thread (current)
+// stack.
+//
+// N.B. Interrupts must be disabled on entry to this routine. Control
+// is returned to the caller with the same conditions true.
+//
+// Arguments:
+//
+// v0 - Previous PSR value.
+// s0 - Address of the current PRCB.
+//
+// Return value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+DpRa: .space 4 // return address
+ .space 4 // fill
+
+#if DBG
+
+DpStart:.space 4 // DPC start time in ticks
+DpFunct:.space 4 // DPC function address
+DpCount:.space 4 // interrupt count at start of DPC
+DpTime: .space 4 // interrupt time at start of DPC
+
+#endif
+
+DpcFrameLength: // DPC frame length
+
+ NESTED_ENTRY(KiRetireDpcList, DpcFrameLength, zero)
+
+ subu sp,sp,DpcFrameLength // allocate stack frame
+ sw ra,DpRa(sp) // save return address
+
+ PROLOGUE_END
+
+5: sw sp,PbDpcRoutineActive(s0) // set DPC routine active
+ sw sp,KiPcr + PcDpcRoutineActive(zero) //
+
+//
+// Process the DPC list.
+//
+
+10: addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,60f // if eq, DPC list is empty
+
+#if !defined(NT_UP)
+
+20: ll t1,PbDpcLock(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,20b // if ne, spin lock owned
+ sc t2,PbDpcLock(s0) // set spin lock owned
+ beq zero,t2,20b // if eq, store conditional failed
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,50f // if eq, DPC list is empty
+
+#endif
+
+ lw t1,LsFlink(a0) // get address of next entry
+ subu a0,a0,DpDpcListEntry // compute address of DPC Object
+ sw t1,LsFlink(a1) // set address of next in header
+ sw a1,LsBlink(t1) // set address of previous in next
+ lw a1,DpDeferredContext(a0) // get deferred context argument
+ lw a2,DpSystemArgument1(a0) // get first system argument
+ lw a3,DpSystemArgument2(a0) // get second system argument
+ lw t1,DpDeferredRoutine(a0) // get deferred routine address
+ sw zero,DpLock(a0) // clear DPC inserted state
+ lw t2,PbDpcQueueDepth(s0) // decrement the DPC queue depth
+ subu t2,t2,1 //
+ sw t2,PbDpcQueueDepth(s0) //
+
+#if !defined(NT_UP)
+
+ sw zero,PbDpcLock(s0) // set spin lock not owned
+
+#endif
+
+ ENABLE_INTERRUPTS(v0) // enable interrupts
+
+#if DBG
+
+ sw t1,DpFunct(sp) // save DPC function address
+ lw t2,KeTickCount // save current tick count
+ sw t2,DpStart(sp) //
+ lw t3,PbInterruptCount(s0) // get current interrupt count
+ lw t4,PbInterruptTime(s0) // get current interrupt time
+ sw t3,DpCount(sp) // save interrupt count at start of DPC
+ sw t4,DpTime(sp) // save interrupt time at start of DPC
+
+#endif
+
+ jal t1 // call DPC routine
+
+#if DBG
+
+ lbu t0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ sltu t1,t0,DISPATCH_LEVEL // check if less than dispatch level
+ beq zero,t1,30f // if eq, not less than dispatch level
+ lw t1,DpFunct(sp) // get DPC function address
+ jal DbgBreakPoint // execute debug breakpoint
+30: lw t0,KeTickCount // get current tick count
+ lw t1,DpStart(sp) // get starting tick count
+ lw t2,DpFunct(sp) // get DPC function address
+ subu t3,t0,t1 // compute time in DPC function
+ sltu t3,t3,100 // check if less than one second
+ bne zero,t3,40f // if ne, less than one second
+ lw t3,PbInterruptCount(s0) // get current interrupt count
+ lw t4,PbInterruptTime(s0) // get current interrupt time
+ lw t5,DpCount(sp) // get starting interrupt count
+ lw t6,DpTime(sp) // get starting interrupt time
+ subu t3,t3,t5 // compute number of interrupts
+ subu t4,t4,t6 // compute time of interrupts
+ jal DbgBreakPoint // execute debug breakpoint
+
+#endif
+
+40: DISABLE_INTERRUPTS(v0) // disable interrupts
+
+ b 10b //
+
+//
+// Unlock DPC list and clear DPC active.
+//
+
+50:
+
+#if !defined(NT_UP)
+
+ sw zero,PbDpcLock(s0) // set spin lock not owned
+
+#endif
+
+60: sw zero,PbDpcRoutineActive(s0) // clear DPC routine active
+ sw zero,KiPcr + PcDpcRoutineActive(zero) //
+ sw zero,PbDpcInterruptRequested(s0) // clear DPC interrupt requested
+
+//
+// Check one last time that the DPC list is empty. This is required to
+// close a race condition with the DPC queuing code where it appears that
+// a DPC routine is active (and thus an interrupt is not requested), but
+// this code has decided the DPC list is empty and is clearing the DPC
+// active flag.
+//
+
+ addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ bne a0,a1,5b // if ne, DPC list is not empty
+ lw ra,DpRa(sp) // restore return address
+ addu sp,sp,DpcFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiRetireDpcList
diff --git a/private/ntos/ke/mips/x4trap.s b/private/ntos/ke/mips/x4trap.s
new file mode 100644
index 000000000..dce5e158d
--- /dev/null
+++ b/private/ntos/ke/mips/x4trap.s
@@ -0,0 +1,4622 @@
+// TITLE("Interrupt and Exception Processing")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4trap.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process MIPS
+// interrupt and exception conditions.
+//
+// N.B. This module executes in KSEG0 or KSEG1 and, in general, cannot
+// tolerate a TB Miss. Registers k0 and k1 are used for argument
+// passing during the initial stage of interrupt and exception
+// processing, and therefore, extreme care must be exercised when
+// modifying this module.
+//
+// Author:
+//
+// David N. Cutler (davec) 4-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Constant Value Definitions")
+//++
+//
+// The following are definitions of constants used in this module.
+//
+//--
+
+#define PSR_ENABLE_MASK ((0xff << PSR_INTMASK) | (0x3 << PSR_KSU) | (1 << PSR_EXL))
+
+#define PSR_MASK (~((0x3 << PSR_KSU) | (1 << PSR_EXL))) // PSR exception mask
+
+//
+// Define exception handler frame structure.
+//
+
+ .struct 0
+ .space 4 * 4 // argument save area
+HdRa: .space 4 // return address
+ .space 3 * 4 //
+HandlerFrameLength: // handler frame length
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KdpOweBreakpoint 1
+ .extern KeGdiFlushUserBatch 4
+ .extern KeNumberTbEntries 4
+ .extern PsWatchEnabled 1
+
+//
+// Define set of load/store instructions.
+//
+// This set has a one bit for each of the possible load/store instructions.
+//
+// These include: ldl, ldr, lb, lh, lwl, lw, lbu, lhu, lwr, lwu, sb, sh, swl,
+// sw, sdl. sdr. swr, ll, lwc1, lwc2, lld, ldc1, ldc2, ld, sc,
+// swc1, swc2, sdc, sdc1, sdc2, sd.
+//
+// N.B. The set is biased by a base of 0x20 which is the opcode for lb.
+//
+
+ .sdata
+ .align 3
+ .globl KiLoadInstructionSet
+KiLoadInstructionSet: // load instruction set
+ .word 0x0c000000 //
+ .word 0xf7f77fff //
+
+//
+// Define count of bad virtual address register cases.
+//
+
+#if DBG
+
+ .globl KiBadVaddrCount
+KiBadVaddrCount: // count of bad virtual
+ .word 0 //
+
+ .globl KiMismatchCount
+KiMismatchCount: // count of read miss address mismatches
+ .word 0 //
+
+#endif
+
+
+ SBTTL("System Startup")
+//++
+//
+// Routine Description:
+//
+// Control is transfered to this routine when the system is booted. Its
+// function is to transfer control to the real system startup routine.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSystemStartup)
+
+ j KiInitializeSystem // initialize system
+
+ .end KiSystemStartup
+
+ SBTTL("TB Miss Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a TB miss on a reference
+// to any part of the 32-bit address space from kernel mode. Interrupts
+// are disabled when this routine is entered.
+//
+// The function of this routine is to load a pair of second level PTEs
+// from the current page table into the TB. The context register is
+// loaded by hardware with the virtual address of the PTE * 2. In addition,
+// the entryhi register is loaded with the virtual tag, such that the PTEs
+// can be loaded directly into the TB. The badvaddr register is loaded by
+// hardware with the virtual address of the fault and is saved in case the
+// page table page is not currently mapped by the TB.
+//
+// If a fault occurs when attempting to load the specified PTEs from the
+// current page table, then it is vectored through the general exception
+// vector at KSEG0_BASE + 0x180.
+//
+// This routine is copied to address KSEG0_BASE at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+// N.B. This routine saves the contents of the badvaddr register in k1
+// so that it can be used by the general exception vector routine
+// if an exception occurs while trying to load the first PTE from
+// memory.
+//
+//--
+
+ LEAF_ENTRY(KiTbMiss)
+
+//
+// The following code is required on 2.x R4000 chips to work around a
+// chip bug. The work around is not needed for 3.0 and later chips.
+//
+
+ START_REGION(KiTbMissStartAddress2.x)
+
+ .set noreorder
+ .set noat
+ nop // ****** r4000 errata ******
+ mfc0 k0,psr // ****** r4000 errata ******
+ mtc0 zero,psr // ****** r4000 errata ******
+ mtc0 k0,psr // ****** r4000 errata ******
+ nop // ****** r4000 errata ******
+ .set at
+ .set reorder
+
+ START_REGION(KiTbMissStartAddress3.x)
+
+ .set noreorder
+ .set noat
+
+//
+// The following code is required on all MP systems to work around a problem
+// where the hardware reports a TB miss even when the entry is really in the
+// TB.
+//
+
+#if defined(NT_UP)
+
+ mfc0 k0,context // get virtual address * 2 of PTE
+ mfc0 k1,badvaddr // get bad virtual address
+ sra k0,k0,1 // compute virtual address of PTE
+
+#else
+
+ tlbp // ****** r4400 errata ******
+ mfc0 k0,context // ****** r4400 errata ******
+ nop // ****** r4400 errata ******
+ mfc0 k1,index // ****** r4400 errata ******
+ sra k0,k0,1 // compute virtual address of PTE
+ bgez k1,20f // ****** r4400 errata ******
+ mfc0 k1,badvaddr // get bad virtual address
+
+#endif
+
+ mtc0 k0,taglo // set first level active flag
+ lw k1,0(k0) // get first PTE - may fault
+ lw k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+ .set at
+ .set reorder
+
+ END_REGION(KiTbMissEndAddress3.x)
+
+//
+// The r10000 TB miss routine is different since the fine designers of the
+// chip didn't understand what the frame mask register was really for and
+// only masked PFN bits. Unfortunately they didn't mask the UC bits which
+// require the bits to be masked manually.
+//
+
+ START_REGION(KiTbMissStartAddress9.x)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,context // get virtual address * 2 of PTE
+ mfc0 k1,badvaddr // get bad virtual address
+ sra k0,k0,1 // compute virtual address of PTE
+ mtc0 k0,taglo // set first level active flag
+ lwu k1,0(k0) // get first PTE - may fault
+ lwu k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+ .set at
+ .set reorder
+
+ END_REGION(KiTbMissEndAddress9.x)
+
+ .end KiTbMiss
+
+ SBTTL("XTB Miss Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a TB miss on a reference
+// to any part of the 64-bit address space from user mode. Interrupts
+// are disabled when this routine is entered.
+//
+// The function of this routine is to load a pair of second level PTEs
+// from the current page table into the TB. The context register is
+// loaded by hardware with the virtual address of the PTE * 2. In addition,
+// the entryhi register is loaded with the virtual tag, such that the PTEs
+// can be loaded directly into the TB. The badvaddr register is loaded by
+// hardware with the virtual address of the fault and is saved in case the
+// page table page is not currently mapped by the TB.
+//
+// If a fault occurs when attempting to load the specified PTEs from the
+// current page table, then it is vectored through the general exception
+// vector at KSEG0_BASE + 0x180.
+//
+// This routine is copied to address KSEG0_BASE + 0x80 at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+// N.B. This routine saves the contents of the badvaddr register in k1
+// so that it can be used by the general exception vector routine
+// if an exception occurs while trying to load the first PTE from
+// memory.
+//
+//--
+
+ LEAF_ENTRY(KiXTbMiss)
+
+//
+// The following code is required on 2.x R4000 chips to work around a
+// chip bug. The work around is not needed for 3.0 and later chips.
+//
+
+ START_REGION(KiXTbMissStartAddress2.x)
+
+ .set noreorder
+ .set noat
+ nop // ****** r4000 errata ******
+ mfc0 k0,psr // ****** r4000 errata ******
+ mtc0 zero,psr // ****** r4000 errata ******
+ mtc0 k0,psr // ****** r4000 errata ******
+ nop // ****** r4000 errata ******
+ .set at
+ .set reorder
+
+ START_REGION(KiXTbMissStartAddress3.x)
+
+ .set noreorder
+ .set noat
+
+//
+// The following code is required on all MP systems to work around a problem
+// where the hardware reports a TB miss even when the entry is really in the
+// TB.
+//
+
+#if defined(NT_UP)
+
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dmfc0 k1,xcontext // get extended context register
+ sra k0,k0,1 // compute virtual address of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 39:31 of address
+ and k1,k1,0x7ff // check if valid user address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,badvaddr // get bad virtual address
+
+#else
+
+//
+// ****** r4400 errata ******
+//
+
+ dmfc0 k1,xcontext // get extended context register
+ tlbp // probe TB for miss address
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 39:31 of
+ and k1,k1,0x7ff // virtual address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,index // get index register
+ sra k0,k0,1 // compute virtual address of PTE
+ bgez k1,20f // if gez, address already in TB
+ mfc0 k1,badvaddr // get bad virtual address
+
+#endif
+
+ mtc0 k0,taglo // set first level active flag
+ lw k1,0(k0) // get first PTE - may fault
+ lw k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+
+//
+// The user address is greater than 32-bits.
+//
+
+30: j KiInvalidUserAddress //
+ nop //
+ .set at
+ .set reorder
+
+ END_REGION(KiXTbMissEndAddress3.x)
+
+//
+// The r10000 TB miss routine is different since the fine designers of the
+// chip didn't understand what the frame mask register was really for and
+// only masked PFN bits. Unfortunately they didn't mask the UC bits which
+// require the bits to be masked manually.
+//
+
+ START_REGION(KiXTbMissStartAddress9.x)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dmfc0 k1,xcontext // get extended context register
+ sra k0,k0,1 // compute virtual address of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 43:31 of
+ and k1,k1,0x7ff // check if valid user address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,badvaddr // get bad virtual address
+ mtc0 k0,taglo // set first level active flag
+ lwu k1,0(k0) // get first PTE - may fault
+ lwu k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+ eret //
+
+//
+// The user address is greater than 32-bits.
+//
+
+30: j KiInvalidUserAddress //
+ nop //
+ .set at
+ .set reorder
+
+ END_REGION(KiXTbMissEndAddress9.x)
+
+ .end KiXTbMiss
+
+ SBTTL("Cache Parity Error Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a cache parity error and runs
+// uncached. Its function is to remap the PCR uncached and call the cache
+// parity routine to save all pertinent cache error information, establish
+// an error stack frame, and call the system cache parity error routine.
+//
+// N.B. The cache parity error routine runs uncached and must be
+// extremely careful not access any cached addresses.
+//
+// N.B. If a second exception occurs while cache error handling is in
+// progress, then a soft reset is performed by the hardware.
+//
+// N.B. While ERL is set in the PSR, the user address space is replaced
+// by an uncached, unmapped, address that corresponds to physical
+// memory.
+//
+// N.B. There is room for up to 32 instructions in the vectored cache
+// parity error routine.
+//
+// This routine is copied to address KSEG1_BASE + 0x100 at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiCacheError)
+
+ START_REGION(KiCacheErrorStartAddress)
+
+ .set noreorder
+ .set noat
+ nop // fill
+ nop // fill
+ la k0,CACHE_ERROR_VECTOR // get cache error vector address
+ lw k0,0(k0) // get cache error routine address
+ nop // fill
+ j k0 // dispatch to cache error routine
+ nop // fill
+ .set at
+ .set reorder
+
+ END_REGION(KiCacheErrorEndAddress)
+
+ .end KiCacheError
+
+ SBTTL("General Exception Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a general exception. The reason
+// for the exception is contained in the cause register. When this routine
+// is entered, interrupts are disabled.
+//
+// The primary function of this routine is to route the exception to the
+// appropriate exception handling routine. If the cause of the exception
+// is a read or write TB miss and the access can be resolved, then this
+// routine performs the necessary processing and returns from the exception.
+// If the exception cannot be resolved, then it is dispatched to the proper
+// routine.
+//
+// This routine is copied to address KSEG0_BASE + 0x180 at system startup.
+//
+// N.B. This routine is very carefully written to not destroy k1 until
+// it has been determined that the exception did not occur in the
+// user TB miss vector routine.
+//
+// Arguments:
+//
+// k1 - Supplies the bad virtual address if the exception occurred from
+// the TB miss vector routine while attempting to load a PTE into the
+// TB.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiGeneralException)
+
+ START_REGION(KiGeneralExceptionStartAddress)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,cause // get cause of exception
+ mtc0 k1,lladdr // save possible bad virtual address
+ li k1,XCODE_READ_MISS // get exception code for read miss
+ and k0,k0,R4000_MISS_MASK // isolate exception code
+
+//
+// The read and write miss codes differ by exactly one bit such that they
+// can be tested for by a single mask operation followed by a test for the
+// read miss code.
+//
+
+ bne k0,k1,20f // if ne, not read or write miss
+ mfc0 k1,badvaddr // get the bad virtual address
+
+//
+// The exception is either a read or a write to an address that is not mapped
+// by the TB, or a reference to an invalid entry that is in the TB. Attempt to
+// resolve the reference by loading a pair of a PDEs from the page directory
+// page.
+//
+// There are four cases to be considered:
+//
+// 1. The address specified by the badvaddr register is not in the TB.
+//
+// For this case, a pair of PDEs are loaded into the TB from the
+// page directory page and execution is resumed.
+//
+// 2. The address specified by the badvaddr register is in the TB and the
+// address is not the address of a page table page.
+//
+// For this case an invalid translation has occured, but since it is
+// not the address of a page table page, then it could not have come
+// from the TB Miss handler. The badvaddr register contains the virtual
+// address of the exception and is passed to the appropriate exception
+// routine.
+//
+// 3. The address specified by the badvaddr register is in the TB, the
+// address is the address of a page table page, and the first level
+// TB miss routine was active when the current TB miss occurred.
+//
+// For this case, an invalid translation has occured, but since it is
+// a page table page and the first level TB miss routine active flag
+// is set, then the exception occured in the TB Miss handler. The
+// integer register k1 contains the virtual address of the exception
+// as saved by the first level TB fill handler and is passed to the
+// appropriate exception routine.
+//
+// N.B. The virtual address that is passed to the exception routine is
+// the exact virtual address that caused the fault and is obtained
+// from integer register k1.
+//
+// 4. The address specified by the badvaddr register is in the TB, the
+// address is the address of a page table page, and the first level
+// TB miss routine was not active when the current TB miss occurred.
+//
+// For this case, an invalid translation has occured, but since it is
+// a page table page and the first level TB miss routine active flag
+// is clear, then the exception must have occured as part of a probe
+// operation or is a page fault to an invalid page.
+//
+// N.B. The virtual address that is passed to the exception routine is
+// the exact virtual address that caused the fault and is obtained
+// from the badvaddr register.
+//
+
+ tlbp // probe TB for the faulting address
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 k1,index // read result of probe
+ mfc0 k0,context // get virtual address * 2 of PDE
+ bgez k1,10f // if gez, entry is in TB
+ sra k0,k0,1 // compute virtual address of PDE
+
+//
+// Case 1 - The entry is not in the TB.
+//
+// The TB miss is a reference to a page table page and a pair of PDEs are
+// loaded into the TB from the page directory page and execution is continued.
+//
+
+ lw k1,4(k0) // get second PDE value
+ lw k0,0(k0) // get first PDE value
+ mtc0 k1,entrylo1 // set second PTE value
+ mtc0 k0,entrylo0 // set first PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,5f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+5: //
+
+#endif
+
+ nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+
+#if DBG
+
+ lw k0,KiPcr + PcPrcb(zero) // get processor block address
+ nop // fill
+ lw k1,PbSecondLevelTbFills(k0) // increment number of second level
+ nop // fill
+ addu k1,k1,1 // TB fills
+ sw k1,PbSecondLevelTbFills(k0) //
+
+#endif
+
+ eret //
+ nop // errata
+ nop //
+ nop //
+ eret //
+
+//
+// Case 2, 3, or 4 - The entry is in the TB.
+//
+// Check for one of the three remaining cases.
+//
+
+10: mfc0 k1,badvaddr // get bad virtual address
+ mfc0 k0,taglo // get first level flag
+ srl k1,k1,PDI_SHIFT // isolate page directory index
+ xor k1,k1,PDE_BASE >> PDI_SHIFT // check if page table reference
+ bne zero,k1,20f // if ne, not a page table page
+ mfc0 k1,badvaddr // get bad virtual address
+
+//
+// Case 2 or 3 - The bad virtual address is the address of a page table page.
+//
+// Check for one of the two remaining cases.
+//
+
+ beq zero,k0,20f // if eq, not first level miss
+ nop // fill
+ mfc0 k1,lladdr // get actual bad virtual address
+
+//
+// Save bad virtual address in case it is needed by the exception handling
+// routine.
+//
+
+20: mfc0 k0,epc // get exception PC
+ mtc0 zero,taglo // clear first level miss flag
+ sd t7,KiPcr + PcSavedT7(zero) // save integer registers t7 - t9
+ sd t8,KiPcr + PcSavedT8(zero) //
+ sd t9,KiPcr + PcSavedT9(zero) //
+ sw k0,KiPcr + PcSavedEpc(zero) // save exception PC
+ sw k1,KiPcr + PcBadVaddr(zero) // save bad virtual address
+
+//
+// The bad virtual address is saved in the PCR in case it is needed by the
+// respective dispatch routine.
+//
+// N.B. EXL must be cleared in the current PSR so switching the stack
+// can occur with TB Misses enabled.
+//
+
+ mfc0 t9,psr // get current processor status
+ li t8,1 << PSR_CU1 // set coprocessor 1 enable bit
+ mfc0 t7,cause // get cause of exception
+ mtc0 t8,psr // clear EXL and disable interrupts
+ lw k1,KiPcr + PcInitialStack(zero) // get initial kernel stack
+ and t8,t9,1 << PSR_PMODE // isolate previous processor mode
+ bnel zero,t8,30f // if ne, previous mode was user
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// If the kernel stack has overflowed, then a switch to the panic stack is
+// performed and the exception/ code is set to cause a bug check.
+//
+
+ lw k1,KiPcr + PcStackLimit(zero) // get current stack limit
+ subu t8,sp,TrapFrameLength // allocate trap frame
+ sltu k1,t8,k1 // check for stack overflow
+ beql zero,k1,30f // if eq, no stack overflow
+ nop // fill
+
+//
+// The kernel stack has either overflowed. Switch to the panic stack and
+// cause a bug check to occur by setting the exception cause value to the
+// panic code.
+//
+
+ lw t7,KiPcr + PcInitialStack(zero) // ***** temp ****
+ lw t8,KiPcr + PcStackLimit(zero) // ***** temp ****
+ sw t7,KiPcr + PcSystemReserved(zero) // **** temp ****
+ sw t8,KiPcr + PcSystemReserved + 4(zero) // **** temp ****
+ lw k1,KiPcr + PcPanicStack(zero) // get address of panic stack
+ li t7,XCODE_PANIC // set cause of exception to panic
+ sw k1,KiPcr + PcInitialStack(zero) // reset initial stack pointer
+ subu t8,k1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t8,KiPcr + PcStackLimit(zero) //
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// Allocate a trap frame, save parital context, and dispatch to the appropriate
+// exception handling routine.
+//
+// N.B. At this point:
+//
+// t7 contains the cause of the exception,
+// t8 contains the new stack pointer, and
+// t9 contains the previous processor state.
+//
+// Since the kernel stack is not wired into the TB, a TB miss can occur
+// during the switch of the stack and the subsequent storing of context.
+//
+//
+
+30: sd sp,TrXIntSp(t8) // save integer register sp
+ move sp,t8 // set new stack pointer
+ cfc1 t8,fsr // get floating status register
+ sd gp,TrXIntGp(sp) // save integer register gp
+ sd s8,TrXIntS8(sp) // save integer register s8
+ sw t8,TrFsr(sp) // save current FSR
+ sw t9,TrPsr(sp) // save processor state
+ sd ra,TrXIntRa(sp) // save integer register ra
+ lw gp,KiPcr + PcSystemGp(zero) // set system general pointer
+ and t8,t7,R4000_XCODE_MASK // isolate exception code
+
+//
+// Check for system call exception.
+//
+// N.B. While k1 is being used a TB miss cannot be tolerated.
+//
+
+ xor k1,t8,XCODE_SYSTEM_CALL // check for system call exception
+ bne zero,k1,40f // if ne, not system call exception
+ move s8,sp // set address of trap frame
+
+//
+// Get the address of the current thread and form the next PSR value.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ li t8,PSR_MASK // get the PSR mask
+ and t8,t9,t8 // clear EXL and mode in PSR
+ sw ra,TrFir(s8) // set real continuation address
+ sb zero,TrSavedFlag(s8) // clear s-registers saved flag
+ j KiSystemServiceNormal // execute normal system service
+ mtc0 t8,psr // enable interrupts
+
+//
+// Save the volatile integer register state.
+//
+
+40: sd AT,TrXIntAt(s8) // save assembler temporary register
+ sd v0,TrXIntV0(s8) // save integer register v0
+ sd v1,TrXIntV1(s8) // save integer register v1
+ sd a0,TrXIntA0(s8) // save integer registers a0 - a3
+ sd a1,TrXIntA1(s8) //
+ sd a2,TrXIntA2(s8) //
+ sd a3,TrXIntA3(s8) //
+ sd t0,TrXIntT0(s8) // save integer registers t0 - t2
+ sd t1,TrXIntT1(s8) //
+ sd t2,TrXIntT2(s8) //
+ ld t0,KiPcr + PcSavedT7(zero) // get saved register t8 - t9
+ ld t1,KiPcr + PcSavedT8(zero) //
+ ld t2,KiPcr + PcSavedT9(zero) //
+ sd t3,TrXIntT3(s8) // save integer register t3 - t7
+ sd t4,TrXIntT4(s8) //
+ sd t5,TrXIntT5(s8) //
+ sd t6,TrXIntT6(s8) //
+ sd t0,TrXIntT7(s8) //
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ sd t1,TrXIntT8(s8) // save integer registers t8 - t9
+ sd t2,TrXIntT9(s8) //
+ mflo t3 // get multiplier/quotient lo and hi
+ mfhi t4 //
+ lw t5,KiPcr + PcXcodeDispatch(t8) // get exception routine address
+ xor t6,t8,XCODE_INTERRUPT // check for interrupt exception
+ lw t8,KiPcr + PcSavedEpc(zero) // get exception PC
+ sd t3,TrXIntLo(s8) // save multiplier/quotient lo and hi
+ sd t4,TrXIntHi(s8) //
+ beq zero,t6,50f // if eq, interrupt exception
+ sw t8,TrFir(s8) // save exception PC
+
+//
+// Save the volatile floating register state.
+//
+
+ sdc1 f0,TrFltF0(s8) // save floating register f0 - f19
+ sdc1 f2,TrFltF2(s8) //
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ sdc1 f18,TrFltF18(s8) //
+ srl t6,t9,PSR_PMODE // isolate previous mode
+ and t6,t6,1 //
+ li t0,PSR_MASK // clear EXL amd mode is PSR
+ and t9,t9,t0 //
+
+//
+// Dispatch to exception handing routine with:
+//
+// t5 - Address of the exception handling routine.
+// t6 - If not an interrupt, then the previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - If not an interrupt, then the new PSR with EXL and mode clear.
+// Otherwise the previous PSR with EXL and mode set.
+//
+
+50: li t4,TRUE // get saved s-registers flag
+ bltzl t7,60f // if ltz, exception in delay slot
+ addu t8,t8,4 // compute address of exception
+60: j t5 // dispatch to exception routine
+ sb t4,TrSavedFlag(s8) // set s-registers saved flag
+ .set at
+ .set reorder
+
+ END_REGION(KiGeneralExceptionEndAddress)
+
+ .end KiGeneralException
+
+ SBTTL("Invalid User Address")
+//++
+//
+// Routine Description:
+//
+// This routine is entered when an invalid user address is encountered
+// in the XTB Miss handler. When this routine is entered, interrupts
+// are disabled.
+//
+// The primary function of this routine is to route the exception to the
+// invalid user 64-bit address exception handling routine.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiInvalidUserAddress)
+
+ .set noreorder
+ .set noat
+ dmfc0 k1,badvaddr // get the bad virtual address
+ dmfc0 k0,epc // get exception PC
+ sd k1,KiPcr + PcSystemReserved(zero) // **** temp ****
+ dmfc0 k1,xcontext // **** temp ****
+ sd k0,KiPcr + PcSystemReserved + 8(zero) // **** temp ****
+ sd k1,KiPcr + PcSystemReserved + 16(zero) // **** temp ****
+ ld k1,KiPcr + PcSystemReserved(zero) // **** temp ****
+ sd t7,KiPcr + PcSavedT7(zero) // save integer registers t7 - t9
+ sd t8,KiPcr + PcSavedT8(zero) //
+ sd t9,KiPcr + PcSavedT9(zero) //
+ sw k0,KiPcr + PcSavedEpc(zero) // save exception PC
+ sw k1,KiPcr + PcBadVaddr(zero) // save bad virtual address
+
+//
+// The bad virtual address is saved in the PCR in case it is needed by the
+// respective dispatch routine.
+//
+// N.B. EXL must be cleared in the current PSR so switching the stack
+// can occur with TB Misses enabled.
+//
+
+ mfc0 t9,psr // get current processor status
+ li t8,1 << PSR_CU1 // set coprocessor 1 enable bit
+ mfc0 t7,cause // get cause of exception
+ mtc0 t8,psr // clear EXL and disable interrupts
+ lw k1,KiPcr + PcInitialStack(zero) // get initial kernel stack
+ and t8,t9,1 << PSR_PMODE // isolate previous processor mode
+ bnel zero,t8,10f // if ne, previous mode was user
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// If the kernel stack has overflowed, then a switch to the panic stack is
+// performed and the exception/ code is set to cause a bug check.
+//
+
+ lw k1,KiPcr + PcStackLimit(zero) // get current stack limit
+ subu t8,sp,TrapFrameLength // allocate trap frame
+ sltu k1,t8,k1 // check for stack overflow
+ beql zero,k1,10f // if eq, no stack overflow
+ nop // fill
+
+//
+// The kernel stack has either overflowed. Switch to the panic stack and
+// cause a bug check to occur by setting the exception cause value to the
+// panic code.
+//
+
+ lw k1,KiPcr + PcPanicStack(zero) // get address of panic stack
+ li t7,XCODE_PANIC // set cause of exception to panic
+ sw k1,KiPcr + PcInitialStack(zero) // reset initial stack pointer
+ subu t8,k1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t8,KiPcr + PcStackLimit(zero) //
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// Allocate a trap frame, save parital context, and dispatch to the appropriate
+// exception handling routine.
+//
+// N.B. At this point:
+//
+// t7 contains the cause of the exception,
+// t8 contains the new stack pointer, and
+// t9 contains the previous processor state.
+//
+// Since the kernel stack is not wired into the TB, a TB miss can occur
+// during the switch of the stack and the subsequent storing of context.
+//
+//
+
+10: sd sp,TrXIntSp(t8) // save integer register sp
+ move sp,t8 // set new stack pointer
+ cfc1 t8,fsr // get floating status register
+ sd gp,TrXIntGp(sp) // save integer register gp
+ sd s8,TrXIntS8(sp) // save integer register s8
+ sw t8,TrFsr(sp) // save current FSR
+ sw t9,TrPsr(sp) // save processor state
+ sd ra,TrXIntRa(sp) // save integer register ra
+ lw gp,KiPcr + PcSystemGp(zero) // set system general pointer
+ and t8,t7,R4000_XCODE_MASK // isolate exception code
+
+//
+// Check for panic stack switch.
+//
+// N.B. While k1 is being used a TB miss cannot be tolerated.
+//
+
+ xor k1,t8,XCODE_PANIC // check for panic stack switch
+ bnel zero,k1,20f // if ne, invalid user address
+ li t8,XCODE_INVALID_USER_ADDRESS // set exception dispatch code
+
+//
+// Save the volatile integer register state.
+//
+
+20: move s8,sp // set address of trap frame
+ sd AT,TrXIntAt(s8) // save assembler temporary register
+ sd v0,TrXIntV0(s8) // save integer register v0
+ sd v1,TrXIntV1(s8) // save integer register v1
+ sd a0,TrXIntA0(s8) // save integer registers a0 - a3
+ sd a1,TrXIntA1(s8) //
+ sd a2,TrXIntA2(s8) //
+ sd a3,TrXIntA3(s8) //
+ sd t0,TrXIntT0(s8) // save integer registers t0 - t2
+ sd t1,TrXIntT1(s8) //
+ sd t2,TrXIntT2(s8) //
+ ld t0,KiPcr + PcSavedT7(zero) // get saved register t8 - t9
+ ld t1,KiPcr + PcSavedT8(zero) //
+ ld t2,KiPcr + PcSavedT9(zero) //
+ sd t3,TrXIntT3(s8) // save integer register t3 - t7
+ sd t4,TrXIntT4(s8) //
+ sd t5,TrXIntT5(s8) //
+ sd t6,TrXIntT6(s8) //
+ sd t0,TrXIntT7(s8) //
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ sd t1,TrXIntT8(s8) // save integer registers t8 - t9
+ sd t2,TrXIntT9(s8) //
+ mflo t3 // get multiplier/quotient lo and hi
+ mfhi t4 //
+ lw t5,KiPcr + PcXcodeDispatch(t8) // get exception routine address
+ lw t8,KiPcr + PcSavedEpc(zero) // get exception PC
+ sd t3,TrXIntLo(s8) // save multiplier/quotient lo and hi
+ sd t4,TrXIntHi(s8) //
+ sw t8,TrFir(s8) // save exception PC
+
+//
+// Save the volatile floating register state.
+//
+
+ sdc1 f0,TrFltF0(s8) // save floating register f0 - f19
+ sdc1 f2,TrFltF2(s8) //
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ sdc1 f18,TrFltF18(s8) //
+ srl t6,t9,PSR_PMODE // isolate previous mode
+ and t6,t6,1 //
+ li t0,PSR_MASK // clear EXL amd mode is PSR
+ and t9,t9,t0 //
+
+//
+// Dispatch to exception handing routine with:
+//
+// t5 - Address of the exception handling routine.
+// t6 - Previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+//
+
+ li t4,TRUE // get saved s-registers flag
+ bltzl t7,30f // if ltz, exception in delay slot
+ addu t8,t8,4 // compute address of exception
+30: j t5 // dispatch to exception routine
+ sb t4,TrSavedFlag(s8) // set s-registers saved flag
+ .set at
+ .set reorder
+
+ .end KiInvalidUserAddress
+
+ SBTTL("Address Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiAddressErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a read or write address error exception
+// code is read from the cause register. When this routine is entered,
+// interrupts are disabled.
+//
+// The function of this routine is to raise an data misalignment exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiReadAddressErrorException)
+
+ li t0,0 // set read indicator
+ b 10f // join common code
+
+ ALTERNATE_ENTRY(KiWriteAddressErrorException)
+
+ li t0,1 // set write indicator
+
+//
+// Common code for read and write address error exceptions.
+//
+
+10: addu a0,s8,TrExceptionRecord // compute exception record address
+ lw t1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw t0,ErExceptionInformation(a0) // save load/store indicator
+ sw t1,ErExceptionInformation + 4(a0) // save bad virtual address
+ sw t8,ErExceptionAddress(a0) // set exception address
+
+//
+// If the faulting instruction address is the same as the faulting virtual
+// address, then the fault is an instruction misalignment exception. Otherwise,
+// the exception is a data misalignment.
+//
+
+ li t3,STATUS_INSTRUCTION_MISALIGNMENT // set exception code
+ beq t1,t8,20f // if eq, instruction misalignment
+ li t3,STATUS_DATATYPE_MISALIGNMENT // set exception code
+
+//
+// If the faulting address is a kernel address and the previous mode was
+// user, then the address error is really an access violation since an
+// attempt was made to access kernel memory from user mode.
+//
+
+20: bgez t1,30f // if gez, KUSEG address
+ beq zero,a3,30f // if eq, previous mode was kernel
+ li t3,STATUS_ACCESS_VIOLATION // set exception code
+30: sw t3,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ li t0,2 // set number of exception parameters
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiAddressErrorDispatch
+
+ SBTTL("Breakpoint Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiBreakpointDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a breakpoint exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise a breakpoint exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiBreakpointException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+ lw t0,0(t8) // get breakpoint instruction
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw t0,ErExceptionInformation(a0) // save breakpoint instruction
+ li t1,STATUS_BREAKPOINT // set exception code
+ sw t1,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+
+ ALTERNATE_ENTRY(KiKernelBreakpoint)
+
+ break KERNEL_BREAKPOINT // kernel breakpoint instruction
+
+ .end KiBreakpointDispatch
+
+ SBTTL("Bug Check Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiBugCheckDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when the following codes are read from the cause
+// register:
+//
+// Data coherency,
+// Instruction coherency,
+// Invlid exception, and
+// Panic exception.
+//
+// The function of this routine is to cause a bug check with the appropriate
+// code.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiDataCoherencyException)
+
+ li a0,DATA_COHERENCY_EXCEPTION // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiInstructionCoherencyException)
+
+ li a0,INSTRUCTION_COHERENCY_EXCEPTION // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiInvalidException)
+
+ li a0,TRAP_CAUSE_UNKNOWN // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiPanicException)
+
+ li a0,PANIC_STACK_SWITCH // set bug check code
+10: lw a1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a2,t8 // set address of faulting instruction
+ .set at
+ .set reorder
+
+ move a3,t6 // set previous mode
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiBugCheckDispatch
+
+ SBTTL("Coprocessor Unusable Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiCoprocessorUnusableDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a coprocessor unusable exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an illegal instruction exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiCoprocessorUnusableException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_ILLEGAL_INSTRUCTION // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiCoprocessorUnusableDispatch
+
+ SBTTL("Data Bus Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiDataBusErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a data bus error exception code is read from
+// the cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to capture the current machine state and
+// call the exception dispatcher which will provide specical case processing
+// of this exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiDataBusErrorException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,DATA_BUS_ERROR | 0xdfff0000 // set special exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiDataBusErrorDispatch
+
+ SBTTL("Floating Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiFloatDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a floating exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise a floating exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiFloatingException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ cfc1 t0,fsr // get current floating status
+ li t1,~(0x3f << FSR_XI) // get exception mask value
+ and t1,t0,t1 // clear exception bits
+ ctc1 t1,fsr // set new floating status
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_FLOAT_STACK_CHECK // set floating escape code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiFloatDispatch
+
+ SBTTL("Illegal Instruction Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiIllegalInstructionDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an illegal instruction exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an illegal instruction exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiIllegalInstructionException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_ILLEGAL_INSTRUCTION // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiIllegalInstructionDispatch
+
+ SBTTL("Instruction Bus Error Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiInstructionBusErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an instruction bus error exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to capture the current machine state and
+// call the exception dispatcher which will provide specical case processing
+// of this exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiInstructionBusErrorException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,INSTRUCTION_BUS_ERROR | 0xdfff0000 // set special exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiInstructionBusErrorDispatch
+
+ SBTTL("Integer Overflow Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiIntegerOverflowDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an integer overflow exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an integer overflow exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiIntegerOverflowException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_INTEGER_OVERFLOW // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiIntegerOverflowDispatch
+
+ SBTTL("Interrupt Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ EXCEPTION_HANDLER(KiInterruptHandler)
+
+ NESTED_ENTRY(KiInterruptDistribution, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an interrupt exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to determine the highest priority pending
+// interrupt, raise the IRQL to the level of the highest interrupt, and then
+// dispatch the interrupt to the proper service routine.
+//
+// Arguments:
+//
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The old PSR with EXL and mode set.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiInterruptException)
+
+ .set noreorder
+ .set noat
+ lbu t1,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ srl t2,t7,CAUSE_INTPEND + 4 // isolate high interrupt pending bits
+ and t2,t2,0xf //
+ bne zero,t2,10f // if ne, use high bits as index
+ sb t1,TrOldIrql(s8) // save old IRQL
+ srl t2,t7,CAUSE_INTPEND // isolate low interrupt pending bits
+ and t2,t2,0xf //
+ addu t2,t2,16 // bias low bits index by 16
+10: lbu t0,KiPcr + PcIrqlMask(t2) // get new IRQL from mask table
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ nor t2,t2,zero // complement interrupt enable mask
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+
+//
+// It is possible that the interrupt was asserted and then deasserted before
+// the interrupt dispatch code executed. Therefore, there may be an interrupt
+// pending at the current or a lower level. This interrupt is not yet valid
+// and cannot be processed until the IRQL is lowered.
+//
+
+ sltu t4,t1,t0 // check if old IRQL less than new
+ beq zero,t4,40f // if eq, no valid interrupt pending
+ subu t4,t0,DISPATCH_LEVEL + 1 // check if above dispatch level
+
+//
+// If the interrupt level is above dispatch level, then execute the service
+// routine on the interrupt stack. Otherwise, execute the service on the
+// current stack.
+//
+
+ bgezal t4,60f // if gez, above dispatch level
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+
+//
+// N.B. The following code is duplicated on the control path where the stack
+// is switched to the interrupt stack. This is done to avoid branching
+// logic.
+//
+
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ mtc0 t9,psr // enable interrupts
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ sll t0,t0,2 // compute offset in vector table
+ lw a0,KiPcr + PcInterruptRoutine(t0) // get service routine address
+
+#if DBG
+
+ sw a0,TrExceptionRecord(s8) // save service routine address
+
+#endif
+
+//
+// Increment interrupt count and call interrupt service routine.
+//
+// N.B. It is known that the interrupt is either an APC interrupt or
+// a dispatch interrupt, and therefore, the volatile floating
+// state is saved and restored to avoid saves and restores in
+// both interrupt dispatchers.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ lw t2,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t3,PbInterruptCount(t2) // increment the count of interrupts
+ addu t3,t3,1 //
+ sw t3,PbInterruptCount(t2) // store result
+ jal a0 // call interrupt service routine
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Common exit point for special dispatch and APC interrupt bypass.
+//
+// Restore state and exit interrupt.
+//
+
+ ALTERNATE_ENTRY(KiInterruptExit)
+
+40: lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t3,TrOldIrql(s8) // get old IRQL
+ and t4,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,50f // if eq, previous mode was kernel
+ sb t3,KiPcr + PcCurrentIrql(zero) // restore old IRQL
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possible APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+50: sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// Switch to interrupt stack.
+//
+
+60: j KiSwitchStacks //
+
+//
+// Increment number of bypassed dispatch interrupts and check if an APC
+// interrupt is pending and the old IRQL is zero.
+//
+
+ ALTERNATE_ENTRY(KiContinueInterrupt)
+
+ .set noreorder
+ .set noat
+ lw t7,KiPcr + PcPrcb(zero) // get current PRCB
+ li t1,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t9,psr // get current PSR
+ mtc0 t1,psr // disable interrupts - 3 cycle hazzard
+ lw t1,PbDpcBypassCount(t7) // increment the DPC bypass count
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ lbu t8,TrOldIrql(s8) // get old IRQL
+ mfc0 t6,cause // get exception cause register
+ addu t1,t1,1 //
+ sw t1,PbDpcBypassCount(t7) // store result
+ and t5,t6,APC_INTERRUPT // check for an APC interrupt
+ beq zero,t5,70f // if eq, no APC interrupt
+ li t0,APC_LEVEL // set new IRQL to APC_LEVEL
+ bne zero,t8,70f // if ne, APC interrupts blocked
+ move a0,zero // set previous mode to kernel
+
+//
+// An APC interrupt is pending.
+//
+
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+ nor t2,t2,zero // complement interrupt enable mask
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+ or t9,t9,t3 // merge new interrupt enable mask
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ and t6,t6,DISPATCH_INTERRUPT // clear APC interrupt pending
+ mtc0 t6,cause //
+ mtc0 t9,psr // enable interrupts
+ .set at
+ .set reorder
+
+ lw t1,PbApcBypassCount(t7) // increment the APC bypass count
+ addu t1,t1,1 //
+ sw t1,PbApcBypassCount(t7) //
+ move a1,zero // set exception frame address
+ move a2,zero // set trap frame address
+ jal KiDeliverApc // deliver kernel mode APC
+
+70: RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+ j KiInterruptExit //
+
+ .end KiInterruptDistribution
+
+ SBTTL("Interrupt Stack Switch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument register area
+ .space 2 * 4 // fill
+SwSp: .space 4 // saved stack pointer
+SwRa: .space 4 // saved return address
+SwFrameLength: // length of stack frame
+
+ EXCEPTION_HANDLER(KiInterruptHandler)
+
+ NESTED_ENTRY(KiInterruptStackSwitch, SwFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sw sp,SwSp(sp) // save stack pointer
+ sw ra,SwRa(sp) // save return address
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//
+// The interrupt level is above dispatch level. Execute the interrupt
+// service routine on the interrupt stack.
+//
+// N.B. The following code is duplicated on the control path where the stack
+// is not switched to the interrupt stack. This is done to avoid branching
+// logic.
+//
+
+
+ ALTERNATE_ENTRY(KiSwitchStacks)
+
+ .set noreorder
+ .set noat
+ lw t4,KiPcr + PcOnInterruptStack(zero) // get stack indicator
+ sw sp,KiPcr + PcOnInterruptStack(zero) // set new stack indicator
+ sw t4,TrOnInterruptStack(s8) // save previous stack indicator
+ move t5,sp // save current stack pointer
+ bne zero,t4,10f // if ne, aleady on interrupt stack
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+
+//
+// Switch to the interrupt stack.
+//
+
+ lw t6,KiPcr + PcInitialStack(zero) // get old initial stack address
+ lw t7,KiPcr + PcStackLimit(zero) // and stack limit
+ lw sp,KiPcr + PcInterruptStack(zero) // set interrupt stack address
+ sw t6,KiPcr + PcSavedInitialStack(zero) // save old stack address
+ sw t7,KiPcr + PcSavedStackLimit(zero) // and stack limit
+ sw sp,KiPcr + PcInitialStack(zero) // set new initial stack address
+ subu t4,sp,KERNEL_STACK_SIZE // and stack limit
+ sw t4,KiPcr + PcStackLimit(zero) //
+10: subu sp,sp,SwFrameLength // allocate stack frame
+ sw t5,SwSp(sp) // save previous stack pointer
+ sw ra,SwRa(sp) // save return address
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ mtc0 t9,psr // enable interrupts
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ sll t0,t0,2 // compute offset in vector table
+ lw a0,KiPcr + PcInterruptRoutine(t0) // get service routine address
+
+#if DBG
+
+ sw a0,TrExceptionRecord(s8) // save service routine address
+
+#endif
+
+//
+// Increment interrupt count and call interrupt service routine.
+//
+
+ lw t2,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t3,PbInterruptCount(t2) // increment the count of interrupts
+ addu t3,t3,1 //
+ sw t3,PbInterruptCount(t2) // store result
+ jal a0 // call interrupt service routine
+
+//
+// Restore state, and exit interrupt.
+//
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lbu t8,TrOldIrql(s8) // get old IRQL
+ lw t9,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible later, then restore the volatile register state.
+//
+
+ lw t2,TrOnInterruptStack(s8) // get saved stack indicator
+ sb t8,KiPcr + PcCurrentIrql(zero) // restore old IRQL
+ sw t9,KiPcr + PcSavedT7(zero) // save processor status
+ bne zero,t2,KiTrapExit // if ne, stay on interrupt stack
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ lw t3,KiPcr + PcSavedInitialStack(zero) // get old initial stack
+ lw t4,KiPcr + PcSavedStackLimit(zero) // get old stack limit
+ sltu t8,t8,DISPATCH_LEVEL // check if IRQL less than dispatch
+ sw t3,KiPcr + PcInitialStack(zero) // restore old initial stack
+ sw t4,KiPcr + PcStackLimit(zero) // restore old stack limit
+ mfc0 t6,cause // get exception cause register
+ beq zero,t8,KiTrapExit // if eq, old IRQL dispatch or above
+ sw t2,KiPcr + PcOnInterruptStack(zero) // restore stack indicator
+
+//
+// Check if a DPC interrupt is pending since the old IRQL is less than
+// DISPATCH_LEVEL and it is more efficient to directly dispatch than
+// let the interrupt logic request the interrupt.
+//
+
+ and t8,t6,DISPATCH_INTERRUPT // check for dispatch interrupt
+ beql zero,t8,40f // if eq, no dispatch interrupt
+ lw t7,KiPcr + PcCurrentThread(zero) // get current thread address
+
+//
+// A dispatch interrupt is pending.
+//
+
+ move sp,s8 // set correct stack pointer
+ li t0,DISPATCH_LEVEL // set new IRQL to DISPATCH_LEVEL
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ nor t2,t2,zero // complement interrupt enable mask
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ mtc0 t9,psr // enable interrupts
+ .set at
+ .set reorder
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+//
+// N.B. The following code returns to the main interrupt dispatch so
+// get and set context APCs can virtually unwind the stack properly.
+//
+
+ la ra,KiContinueInterrupt // set return address
+ j KiDispatchInterrupt // process dispatch interrupt
+
+//
+// If the previous mode is user and a user mode APC is pending, then
+// request an APC interrupt.
+//
+
+ .set noreorder
+ .set noat
+40: and t4,t9,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,50f // if eq, previous mode was kernel
+ ld AT,TrXIntAt(s8) // restore integer register AT
+ lbu t3,ThApcState + AsUserApcPending(t7) // get user APC pending
+ sb zero,ThAlerted(t7) // clear kernel mode alerted
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t6,t6,t3 // merge possible APC interrupt request
+ mtc0 t6,cause // set exception cause register
+ .set at
+ .set reorder
+
+//
+// Common trap exit sequence for all traps.
+//
+
+ ALTERNATE_ENTRY(KiTrapExit)
+
+ .set noreorder
+ .set noat
+ ld AT,TrXIntAt(s8) // restore integer register AT
+50: ld v0,TrXIntV0(s8) // restore integer register v0
+ ld v1,TrXIntV1(s8) // restore integer register v1
+ ld a0,TrXIntA0(s8) // restore integer registers a0 - a3
+ ld a1,TrXIntA1(s8) //
+ ld a2,TrXIntA2(s8) //
+ ld t0,TrXIntLo(s8) // restore lo and hi integer registers
+ ld t1,TrXIntHi(s8) //
+ ld a3,TrXIntA3(s8) //
+ mtlo t0 //
+ mthi t1 //
+ ld t0,TrXIntT0(s8) // restore integer registers t0 - t7
+ ld t1,TrXIntT1(s8) //
+ ld t2,TrXIntT2(s8) //
+ ld t3,TrXIntT3(s8) //
+ ld t4,TrXIntT4(s8) //
+ ld t5,TrXIntT5(s8) //
+ ld t6,TrXIntT6(s8) //
+ ld t7,TrXIntT7(s8) //
+ ld s0,TrXIntS0(s8) // restore integer registers s0 - s7
+ ld s1,TrXIntS1(s8) //
+ ld s2,TrXIntS2(s8) //
+ ld s3,TrXIntS3(s8) //
+ ld s4,TrXIntS4(s8) //
+ ld s5,TrXIntS5(s8) //
+ ld s6,TrXIntS6(s8) //
+ ld s7,TrXIntS7(s8) //
+ ld t8,TrXIntT8(s8) // restore integer registers t8 - t9
+ ld t9,TrXIntT9(s8) //
+
+//
+// Common exit sequence for system services.
+//
+
+ ALTERNATE_ENTRY(KiServiceExit)
+
+ ld gp,TrXIntGp(s8) // restore integer register gp
+ ld sp,TrXIntSp(s8) // restore stack pointer
+ ld ra,TrXIntRa(s8) // restore return address
+ ld s8,TrXIntS8(s8) // restore integer register s8
+
+//
+// WARNING: From this point on no TB Misses can be tolerated.
+//
+
+ li k0,1 << PSR_EXL // set EXL bit in temporary PSR
+ mtc0 k0,psr // set new PSR value - 3 cycle hazzard
+ lw k0,KiPcr + PcSavedT7(zero) // get previous processor status
+ lw k1,KiPcr + PcSavedEpc(zero) // get continuation address
+ nop //
+ mtc0 k0,psr // set new PSR value - 3 cycle hazzard
+ mtc0 k1,epc // set continuation PC
+ nop //
+ nop //
+ eret //
+ nop // errata
+ nop //
+ nop //
+ eret //
+ .set at
+ .set reorder
+
+ .end KiInterruptStackSwitch
+
+ SBTTL("Interrupt Exception Handler")
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiInterruptHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+//
+// Routine Description:
+//
+// Control reaches here when an exception is not handled by an interrupt
+// service routine or an unwind is initiated in an interrupt service
+// routine that would result in an unwind through the interrupt dispatcher.
+// This is considered to be a fatal system error and bug check is called.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved s8 register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ NESTED_ENTRY(KiInterruptHandler, HandlerFrameLength, zero)
+
+ subu sp,sp,HandlerFrameLength // allocate stack frame
+ sw ra,HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw t0,ErExceptionFlags(a0) // get exception flags
+ li a0,INTERRUPT_UNWIND_ATTEMPTED // assume unwind in progress
+ and t1,t0,EXCEPTION_UNWIND // check if unwind in progress
+ bne zero,t1,10f // if ne, unwind in progress
+ li a0,INTERRUPT_EXCEPTION_NOT_HANDLED // set bug check code
+10: jal KeBugCheck // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiInterruptHandler
+
+ SBTTL("Memory Management Exceptions")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiVirtualMemoryDispatch, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a modify, read miss, or write miss exception
+// code is read from the cause register. When this routine is entered,
+// interrupts are disabled.
+//
+// The function of this routine is to call memory management in an attempt
+// to resolve the problem. If memory management can resolve the problem,
+// then execution is continued. Otherwise an exception record is constructed
+// and an exception is raised.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiReadMissException)
+
+ li a0,0 // set read indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+
+//
+// N.B. The following code is a work around for a chip bug where the bad
+// virtual address is not correct on an instruction stream TB miss.
+//
+// If the exception PC is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+// If the instruction at the exception PC is not in the TB or the
+// TB entry is invalid, then the bad virtual address is incorrect
+// and the instruction is repeated.
+//
+// If the instruction at the exception PC is valid and is a load or
+// a store instruction, then the effective address is computed and
+// compared with the bad virtual address. If the comparison is equal,
+// then the bad virtual address is correct. Otherwise, the address is
+// incorrect and the instruction is repeated.
+//
+// If the instruction at the exception PC is valid, is not a load or
+// store instruction, and is not the last instruction in the page,
+// the bad virtual address is correct.
+//
+// If the instruction at the exception PC is valid, is not a load or
+// a store instruction, and is the last instruction in the page, then
+//
+// If the exception PC + 4 is equal to the bad virtual address,
+// then the bad virtual address is correct.
+//
+// If the instruction at the exception PC + 4 is not in the TB
+// or the TB entry is invalid, then the bad virtual address is
+// incorrect and the instruction is repeated.
+//
+// If the instruction at the exception PC + 4 is valid and is a
+// load or a store instruction, then the effective address is
+// computed and compared with the bad virtual address. If the
+// comparison is equal, the the bad virtual address is correct.
+// Otherwise, the address is incorrect and the instruction is
+// repeated.
+//
+
+#if !defined(NT_UP)
+
+ lw t7,TrFir(s8) // get exception PC
+
+ .set noreorder
+ .set noat
+ srl t0,t7,30 // isolate high bits of exception PC
+ beq a1,t7,30f // if eq, addresses match
+ xor a2,t0,0x2 // check for kseg0 or kseg1 address
+
+//
+// If the instruction at the exception PC is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated.
+//
+
+ beq zero,a2,4f // if eq, kseg0 or kseg1 address
+ srl t1,t7,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ mfc0 v0,entryhi // get current VPN2 and PID
+ sll t1,t1,ENTRYHI_VPN2 //
+ and v1,v0,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t7,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,3f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+3: mtc0 zero,pagemask // restore page mask register
+ beq zero,t5,20f // if eq, PTE not valid but in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ nop // 2 cycle hazzard
+ nop //
+
+//
+// If the instruction at the exception PC is a load or a store instruction,
+// then compute its effective virtual address. Otherwise, check to determine
+// if the instruction is at the end of the page.
+//
+
+4: lw t0,0(t7) // get instruction value
+ ld t1,KiLoadInstructionSet // get load/store instruction set
+ li t2,1 // compute opcode set member
+ srl t3,t0,32 - 6 // right justify opcode value
+ dsll t2,t2,t3 // shift opcode member into position
+ and t2,t2,t1 // check if load/store instruction
+ bne zero,t2,10f // if ne, load/store instruction
+ srl t1,t0,21 - 3 // extract base register number
+
+//
+// If the instruction at the exception PC + 4 is not the first instruction in
+// next page, then the bad virtual address is correct.
+//
+
+5: addu t0,t7,4 // compute next instruction address
+ and t1,t0,0xfff // isolate offset in page
+ bne zero,t1,30f // if ne, not in next page
+ srl t1,t0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+
+//
+// If the exception PC + 4 is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+
+ beq a1,t0,30f // if eq, address match
+ sll t1,t1,ENTRYHI_VPN2 //
+
+//
+// If the instruction at the exception PC + 4 is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated. Otherwise, the bad virtual address is correct.
+//
+
+ beq zero,a2,8f // if eq, kseg0 or kseg1 address
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t0,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,7f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+7: mtc0 zero,pagemask // restore page mask register
+ beq zero,t5,20f // if eq, PTE is invalid
+ mtc0 v0,entryhi // restore VPN2 and PID
+ nop // 2 cycle hazzard
+ nop //
+
+//
+// If the first instruction in the next page is a load/store, then compute
+// its effective virtual address. Otherwise, the bad virtual address is not
+// valid and the instruction at the exception PC should be repeated.
+//
+
+8: lw t0,0(t0) // get instruction value
+ ld t1,KiLoadInstructionSet // get load/store instruction set
+ li t2,1 // compute opcode set member
+ srl t3,t0,32 - 6 // right justify opcode value
+ dsll t2,t2,t3 // shift opcode member into position
+ and t2,t2,t1 // check if load/store instruction
+ beq zero,t2,20f // if eq, not load/store instruction
+ srl t1,t0,21 - 3 // extract base register number
+
+//
+// The faulting instruction was a load/store instruction.
+//
+// Compute the effect virtual address and check to detemrine if it is equal
+// to the bad virtual address.
+//
+
+10: and t1,t1,0x1f << 3 // isolate base register number
+ la t2,12f // get base address of load table
+ addu t2,t2,t1 // compute address of register load
+ j t2 // dispath to register load routine
+ sll t1,t0,16 // shift displacement into position
+
+12: b 14f // zero
+ move t2,zero //
+
+ b 14f // at
+ lw t2,TrXIntAt(s8) //
+
+ b 14f // v0
+ lw t2,TrXIntV0(s8) //
+
+ b 14f // v1
+ lw t2,TrXIntV1(s8) //
+
+ b 14f // a0
+ lw t2,TrXIntA0(s8) //
+
+ b 14f // a1
+ lw t2,TrXIntA1(s8) //
+
+ b 14f // a2
+ lw t2,TrXIntA2(s8) //
+
+ b 14f // a3
+ lw t2,TrXIntA3(s8) //
+
+ b 14f // t0
+ lw t2,TrXIntT0(s8) //
+
+ b 14f // t1
+ lw t2,TrXIntT1(s8) //
+
+ b 14f // t2
+ lw t2,TrXIntT2(s8) //
+
+ b 14f // t3
+ lw t2,TrXIntT3(s8) //
+
+ b 14f // t4
+ lw t2,TrXIntT4(s8) //
+
+ b 14f // t5
+ lw t2,TrXIntT5(s8) //
+
+ b 14f // t6
+ lw t2,TrXIntT6(s8) //
+
+ b 14f // t7
+ lw t2,TrXIntT7(s8) //
+
+ b 14f // s0
+ move t2,s0 //
+
+ b 14f // s1
+ move t2,s1 //
+
+ b 14f // s2
+ move t2,s2 //
+
+ b 14f // s3
+ move t2,s3 //
+
+ b 14f // s4
+ move t2,s4 //
+
+ b 14f // s5
+ move t2,s5 //
+
+ b 14f // s6
+ move t2,s6 //
+
+ b 14f // s7
+ move t2,s7 //
+
+ b 14f // t8
+ lw t2,TrXIntT8(s8) //
+
+ b 14f // t9
+ lw t2,TrXIntT9(s8) //
+
+ b 14f // k0
+ move t2,zero //
+
+ b 14f // k1
+ move t2,zero //
+
+ b 14f // gp
+ lw t2,TrXIntGp(s8) //
+
+ b 14f // sp
+ lw t2,TrXIntSp(s8) //
+
+ b 14f // s8
+ lw t2,TrXIntS8(s8) //
+
+ lw t2,TrXIntRa(s8) // ra
+
+//
+// If the effective virtual address matches the bad virtual address, then
+// the bad virtual address is correct. Otherwise, repeat the instruction.
+//
+
+14: sra t1,t1,16 // sign extend displacement value
+ addu t3,t2,t1 // compute effective load address
+ beq a1,t3,30f // if eq, bad virtual address is okay
+ nop // fill
+
+#if DBG
+
+ lw ra,KiMismatchCount // increment address mismatch count
+ nop // TB fills
+ addu ra,ra,1 //
+ sw ra,KiMismatchCount // store result
+
+#endif
+
+
+//
+// N.B. PSR and EPC may have changed because of TB miss and need to be
+// reloaded.
+//
+
+20: nop // 2 cycle hazzard
+ nop //
+ lw t0,TrPsr(s8) // get previous processor state
+ lw t1,TrFir(s8) // get continuation address
+
+#if DBG
+
+ lw ra,KiBadVaddrCount // increment number of second level
+ nop // TB fills
+ addu ra,ra,1 //
+ sw ra,KiBadVaddrCount // store result
+
+#endif
+
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+#else
+
+ b 30f // join common code
+
+#endif
+
+ ALTERNATE_ENTRY(KiReadMissException9.x)
+
+ li a0,0 // set read indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+ b 30f // join common code
+
+ ALTERNATE_ENTRY(KiModifyException)
+
+ ALTERNATE_ENTRY(KiWriteMissException)
+
+ li a0,1 // set write indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+//
+// Common code for modify, read miss, and write miss exceptions.
+//
+
+30: sw t8,TrExceptionRecord + ErExceptionAddress(s8) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a2,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw a0,TrExceptionRecord + ErExceptionInformation(s8) // save load/store indicator
+ sw a1,TrExceptionRecord + ErExceptionInformation + 4(s8) // save bad virtual address
+ sw a2,TrExceptionRecord + ErExceptionCode(s8) // save previous mode
+ jal MmAccessFault // call memory management fault routine
+
+//
+// Check if working set watch is enabled.
+//
+
+ lbu t0,PsWatchEnabled // get working set watch enable flag
+ lw t1,TrExceptionRecord + ErExceptionCode(s8) // get previous mode
+ move a0,v0 // set status of fault resolution
+ bltz v0,40f // if ltz, unsuccessful resolution
+ beq zero,t0,35f // if eq, watch not enabled
+ lw a1,TrExceptionRecord + ErExceptionAddress(s8) // get exception address
+ lw a2,TrExceptionRecord + ErExceptionInformation + 4(s8) // set bad address
+ jal PsWatchWorkingSet // record working set information
+
+//
+// Check if the debugger has any owed breakpoints.
+//
+
+35: lbu t0,KdpOweBreakpoint // get owned breakpoint flag
+ beq zero,t0,37f // if eq, no owed breakpoints
+ jal KdSetOwedBreakpoints // insert breakpoints if necessary
+37: j KiAlternateExit //
+
+//
+// The exception was not resolved. Fill in the remainder of the exception
+// record and attempt to dispatch the exception.
+//
+
+40: addu a0,s8,TrExceptionRecord // compute exception record address
+ lw a3,ErExceptionCode(a0) // restore previous mode
+ li t1,STATUS_IN_PAGE_ERROR | 0x10000000 // get special code
+ beq v0,t1,60f // if eq, special bug check code
+ li t0,2 // set number of parameters
+ li t1,STATUS_ACCESS_VIOLATION // get access violation code
+ beq v0,t1,50f // if eq, access violation
+ li t1,STATUS_GUARD_PAGE_VIOLATION // get guard page violation code
+ beq v0,t1,50f // if eq, guard page violation
+ li t1,STATUS_STACK_OVERFLOW // get stack overflow code
+ beq v0,t1,50f // if eq, stack overflow
+ li t0,3 // set number of parameters
+ sw v0,ErExceptionInformation + 8(a0) // save real status value
+ li v0,STATUS_IN_PAGE_ERROR // set in page error status
+50: sw v0,ErExceptionCode(a0) // save exception code
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+
+//
+// Generate a bug check - A page fault has occured at an IRQL that is greater
+// than APC_LEVEL.
+//
+
+60: li a0,IRQL_NOT_LESS_OR_EQUAL // set bug check code
+ lw a1,TrExceptionRecord + ErExceptionInformation + 4(s8) // set bad virtual address
+ lbu a2,KiPcr + PcCurrentIrql(zero) // set current IRQL
+ lw a3,TrExceptionRecord + ErExceptionInformation(s8) // set load/store indicator
+ lw t1,TrFir(s8) // set exception PC
+ sw t1,4 * 4(sp) //
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiVirtualMemoryDispatch
+
+ SBTTL("System Service Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ EXCEPTION_HANDLER(KiSystemServiceHandler)
+
+ NESTED_ENTRY(KiSystemServiceDispatch, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp - TrapFrameLength(sp) // save stack pointer
+ subu sp,sp,TrapFrameLength // allocate trap frame
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a system call exception code is read from
+// the cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to call the specified system service.
+//
+// N.B. The exception dispatcher jumps to the correct entry point depending
+// on whether the system service is a fast path event pair servive or
+// a normal service. The new PSR has been loaded before the respective
+// routines are entered.
+//
+// Arguments:
+//
+// v0 - Supplies the system service code.
+// t0 - Supplies the address of the current thread object.
+// t9 - Supplies the previous PSR with the EXL and mode set.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiSystemServiceException)
+
+ START_REGION(KiSystemServiceDispatchStart)
+
+ ALTERNATE_ENTRY(KiSystemServiceNormal)
+
+ srl t9,t9,PSR_PMODE // isolate previous processor mode
+ lbu t3,ThPreviousMode(t0) // get old previous mode from thread object
+ lw t4,ThTrapFrame(t0) // get current trap frame address
+ and t9,t9,0x1 // isolate previous mode
+ sb t9,ThPreviousMode(t0) // set new previous mode in thread object
+ sb t3,TrPreviousMode(s8) // save old previous mode of thread object
+ sw t4,TrTrapFrame(s8) // save current trap frame address
+
+#if DBG
+
+ lbu t7,ThKernelApcDisable(t0) // get current APC disable count
+ lbu t8,ThApcStateIndex(t0) // get current APC state index
+ sb t7,TrExceptionRecord(s8) // save APC disable count
+ sb t8,TrExceptionRecord + 1(s8) // save APC state index
+
+#endif
+
+//
+// If the specified system service number is not within range, then
+// attempt to convert the thread to a GUI thread and retry the service
+// dispatch.
+//
+// N.B. The argument registers a0-a3, the system service number in v0,
+// and the thread address in t0 must be preserved while attempting
+// to convert the thread to a GUI thread.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceRepeat)
+
+ sw s8,ThTrapFrame(t0) // save address of trap frame
+ lw t6,ThServiceTable(t0) // get service descriptor table address
+ srl t1,v0,SERVICE_TABLE_SHIFT // isolate service descriptor offset
+ and t1,t1,SERVICE_TABLE_MASK //
+ add t6,t6,t1 // compute service descriptor address
+ lw t4,SdLimit(t6) // get service number limit
+ lw t5,SdBase(t6) // get service table address
+ and t7,v0,SERVICE_NUMBER_MASK // isolate service table offset
+ sll v1,t7,2 // compute system service offset value
+ sltu t4,t7,t4 // check if invalid service number
+ addu v1,v1,t5 // compute address of service entry
+ beq zero,t4,50f // if eq, invalid service number
+ lw v1,0(v1) // get address of service routine
+
+#if DBG
+
+ lw t6,SdCount(t6) // get service count table address
+ sll t5,t7,2 // compute system service offset value
+ beq zero,t6,12f // if eq, table not defined
+ addu t6,t6,t5 // compute address of service entry
+ lw t7,0(t6) // increment system service count
+ addu t7,t7,1 //
+ sw t7,0(t6) // store result
+12: //
+
+#endif
+
+//
+// If the system service is a GUI service and the GDI user batch queue is
+// not empty, then call the appropriate service to flush the user batch.
+//
+
+ xor t2,t1,SERVICE_TABLE_TEST // check if GUI system service
+ bne zero,t2,15f // if ne, not GUI system service
+ lw t3,KiPcr + PcTeb(zero) // get current thread TEB address
+ sw v1,TrXIntV1(s8) // save service routine address
+ sw a0,TrXIntA0(s8) // save possible arguments 1 and 2
+ lw t4,TeGdiBatchCount(t3) // get number of batched GDI calls
+ sw a1,TrXIntA1(s8) //
+ sw a2,TrXIntA2(s8) // save possible third argument
+ lw t5,KeGdiFlushUserBatch // get address of flush routine
+ beq zero,t4,15f // if eq, no batched calls
+ sw a3,TrXIntA3(s8) // save possible fourth argument
+ jal t5 // flush GDI user batch
+ lw v1,TrXIntV1(s8) // restore service routine address
+ lw a0,TrXIntA0(s8) // restore possible arguments
+ lw a1,TrXIntA1(s8) //
+ lw a2,TrXIntA2(s8) //
+ lw a3,TrXIntA3(s8) //
+15: addu a0,a0,zero // make sure of sign extension
+ addu a1,a1,zero // N.B. needed for 64-bit addressing
+ and t1,v1,1 // check if any in-memory arguments
+ beq zero,t1,30f // if eq, no in-memory arguments
+
+//
+// The following code captures arguments that were passed in memory on the
+// callers stack. This is necessary to ensure that the caller does not modify
+// the arguments after they have been probed and is also necessary in kernel
+// mode because a trap frame has been allocated on the stack.
+//
+// If the previous mode is user, then the user stack is probed for readability.
+//
+// N.B. The maximum possible number of parameters are copied to avoid loop
+// and computational overhead.
+//
+
+ START_REGION(KiSystemServiceStartAddress)
+
+ subu sp,sp,TrapFrameArguments // allocate argument list space
+ lw t0,TrXIntSp(s8) // get previous stack pointer
+ beq zero,t9,20f // if eq, previous mode was kernel
+ li t1,MM_USER_PROBE_ADDRESS // get user probe address
+ sltu t2,t0,t1 // check if stack in user region
+ bne zero,t2,20f // if ne, stack in user region
+ move t0,t1 // set invalid user stack address
+20: ld t1,16(t0) // get twelve argument values from
+ ld t2,24(t0) // callers stack
+ ld t3,32(t0) //
+ ld t4,40(t0) //
+ ld t5,48(t0) //
+ ld t6,56(t0) //
+ sd t1,16(sp) // stores arguments on kernel stack
+ sd t2,24(sp) //
+ sd t3,32(sp) //
+ sd t4,40(sp) //
+ sd t5,48(sp) //
+ sd t6,56(sp) //
+
+ END_REGION(KiSystemServiceEndAddress)
+
+ subu v1,v1,1 // clear low bit of service address
+
+//
+// Call system service.
+//
+
+30: addu a2,a2,zero // make sure of sign extension
+ addu a3,a3,zero // needed for 64-bit addressing
+ jal v1 // call system service
+
+//
+// Restore old trap frame address from the current trap frame.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceExit)
+
+ lw a0,KiPcr + PcPrcb(zero) // get processor block address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t3,TrTrapFrame(s8) // get old trap frame address
+ lw t0,PbSystemCalls(a0) // increment number of system calls
+ addu t0,t0,1 //
+ sw t0,PbSystemCalls(a0) //
+ sw t3,ThTrapFrame(t2) // restore old trap frame address
+
+//
+// Restore state and exit system service.
+//
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lbu t3,TrPreviousMode(s8) // get old previous mode
+
+#if DBG
+
+ lbu a2,ThKernelApcDisable(t2) // get current APC disable count
+ lbu a3,ThApcStateIndex(t2) // get current APC state index
+ lbu t5,TrExceptionRecord(s8) // get previous APC disable count
+ lbu t6,TrExceptionRecord + 1(s8) // get previous APC state index
+ xor t7,t5,a2 // compare APC disable count
+ xor t8,t6,a3 // compare APC state index
+ or t9,t8,t7 // merge comparison value
+ bne zero,t9,60f // if ne, invalid state or count
+ nop // fill
+
+#endif
+
+ and t4,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,40f // if eq, previous mode was kernel
+ sb t3,ThPreviousMode(t2) // restore old previous mode
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possilbe APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+40: sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiServiceExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// The specified system service number is not within range. Attempt to
+// convert the thread to a GUI thread if specified system service is
+// not a base service and the thread has not already been converted to
+// a GUI thread.
+//
+// N.B. The argument register a0-a3, the system service number in v0,
+// and the thread address in t0 must be preserved if an attempt
+// is made to convert the thread to a GUI thread.
+//
+
+50: xor t2,t1,SERVICE_TABLE_TEST // check if GUI system service
+ sw v0,TrXIntV0(s8) // save system service number
+ bne zero,t2,55f // if ne, not GUI system service
+ sw a0,TrXIntA0(s8) // save argument register a0
+ sw a1,TrXIntA1(s8) // save argument registers a1-a3
+ sw a2,TrXIntA2(s8) //
+ sw a3,TrXIntA3(s8) //
+ jal PsConvertToGuiThread // attempt to convert to GUI thread
+ move v1,v0 // save completion status
+ move s8,sp // reset trap frame address
+ lw v0,TrXIntV0(s8) // restore system service number
+ lw a0,TrXIntA0(s8) // restore argument registers a0-a3
+ lw a1,TrXIntA1(s8) //
+ lw a2,TrXIntA2(s8) //
+ lw a3,TrXIntA3(s8) //
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ beq zero,v1,KiSystemServiceRepeat // if eq, successful conversion
+
+//
+// Return invalid system service status for invalid service code.
+//
+
+55: li v0,STATUS_INVALID_SYSTEM_SERVICE // set completion status
+ b KiSystemServiceExit //
+
+//
+// An attempt is being made to exit a system service while kernel APCs are
+// disabled, or while attached to another process and the previous mode is
+// not kernel.
+//
+// a2 - Supplies the APC disable count.
+// a3 - Supplies the APC state index.
+//
+
+#if DBG
+
+60: li a0,APC_INDEX_MISMATCH // set bug check code
+ move a1,t5 // set previous APC disable
+ sw t6,4 * 4(sp) // set previous state index
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+#endif
+
+ START_REGION(KiSystemServiceDispatchEnd)
+
+ .end KiSystemServiceDispatch
+
+ SBTTL("System Service Exception Handler")
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiSystemServiceHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+// )
+//
+// Routine Description:
+//
+// Control reaches here when a exception is raised in a system service
+// or the system service dispatcher, and for an unwind during a kernel
+// exception.
+//
+// If an unwind is being performed and the system service dispatcher is
+// the target of the unwind, then an exception occured while attempting
+// to copy the user's in-memory argument list. Control is transfered to
+// the system service exit by return a continue execution disposition
+// value.
+//
+// If an unwind is being performed and the previous mode is user, then
+// bug check is called to crash the system. It is not valid to unwind
+// out of a system service into user mode.
+//
+// If an unwind is being performed, the previous mode is kernel, the
+// system service dispatcher is not the target of the unwind, and the
+// thread does not own any mutexes, then the previous mode field from
+// the trap frame is restored to the thread object. Otherwise, bug
+// check is called to crash the system. It is invalid to unwind out of
+// a system service while owning a mutex.
+//
+// If an exception is being raised and the exception PC is within the
+// range of the system service dispatcher in-memory argument copy code,
+// then an unwind to the system service exit code is initiated.
+//
+// If an exception is being raised and the exception PC is not within
+// the range of the system service dispatcher, and the previous mode is
+// not user, then a continue searh disposition value is returned. Otherwise,
+// a system service has failed to handle an exception and bug check is
+// called. It is invalid for a system service not to handle all exceptions
+// that can be raised in the service.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved s8 register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// If bug check is called, there is no return from this routine and the
+// system is crashed. If an exception occured while attempting to copy
+// the user in-memory argument list, then there is no return from this
+// routine, and unwind is called. Otherwise, ExceptionContinueSearch is
+// returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSystemServiceHandler)
+
+ subu sp,sp,HandlerFrameLength // allocate stack frame
+ sw ra,HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw t0,ErExceptionFlags(a0) // get exception flags
+ and t1,t0,EXCEPTION_UNWIND // check if unwind in progress
+ bne zero,t1,40f // if ne, unwind in progress
+
+//
+// An exception is in progress.
+//
+// If the exception PC is within the in-memory argument copy code of the
+// system service dispatcher, then call unwind to transfer control to the
+// system service exit code. Otherwise, check if the previous mode is user
+// or kernel mode.
+//
+//
+
+ lw t0,ErExceptionAddress(a0) // get address of exception
+ la t1,KiSystemServiceStartAddress // get start address of range
+ sltu t3,t0,t1 // check if before start of range
+ la t2,KiSystemServiceEndAddress // get end address of range
+ bne zero,t3,10f // if ne, before start of range
+ sltu t3,t0,t2 // check if before end of range
+ bne zero,t3,30f // if ne, before end of range
+
+//
+// If the previous mode was kernel mode, then a continue search disposition
+// value is returned. Otherwise, the exception was raised in a system service
+// and was not handled by that service. Call bug check to crash the system.
+//
+
+10: lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t1,ThPreviousMode(t0) // get previous mode from thread object
+ bne zero,t1,20f // if ne, previous mode was user
+
+//
+// Previous mode is kernel mode.
+//
+
+ li v0,ExceptionContinueSearch // set disposition code
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// Previous mode is user mode. Call bug check to crash the system.
+//
+
+20: li a0,SYSTEM_SERVICE_EXCEPTION // set bug check code
+ jal KeBugCheck // call bug check routine
+
+//
+// The exception was raised in the system service dispatcher. Unwind to the
+// the system service exit code.
+//
+
+30: lw a3,ErExceptionCode(a0) // set return value
+ move a2,zero // set exception record address
+ move a0,a1 // set target frame address
+ la a1,KiSystemServiceExit // set target PC address
+ jal RtlUnwind // unwind to system service exit
+
+//
+// An unwind is in progress.
+//
+// If a target unwind is being performed, then continue execution is returned
+// to transfer control to the system service exit code. Otherwise, restore the
+// previous mode if the previous mode is not user and there are no mutexes owned
+// by the current thread.
+//
+
+40: and t1,t0,EXCEPTION_TARGET_UNWIND // check if target unwind in progress
+ bne zero,t1,60f // if ne, target unwind in progress
+
+//
+// An unwind is being performed through the system service dispatcher. If the
+// previous mode is not kernel or the current thread owns one or more mutexes,
+// then call bug check and crash the system. Otherwise, restore the previous
+// mode in the current thread object.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,CxXIntS8(a2) // get address of trap frame
+ lbu t3,ThPreviousMode(t0) // get previous mode from thread object
+ lbu t4,TrPreviousMode(t1) // get previous mode from trap frame
+ bne zero,t3,50f // if ne, previous mode was user
+
+//
+// Restore previous from trap frame to thread object and continue the unwind
+// operation.
+//
+
+ sb t4,ThPreviousMode(t0) // restore previous mode from trap frame
+ li v0,ExceptionContinueSearch // set disposition value
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// An attempt is being made to unwind into user mode. Call bug check to crash
+// the system.
+//
+
+50: li a0,SYSTEM_UNWIND_PREVIOUS_USER // set bug check code
+ jal KeBugCheck // call bug check
+
+//
+// A target unwind is being performed. Return a continue execution disposition
+// value.
+//
+
+60: li v0,ExceptionContinueSearch // set disposition value
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiSystemServiceHandler
+
+ SBTTL("Trap Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiTrapDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a trap exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise an array bounds exceeded
+// exception.
+//
+// N.B. Integer register v1 is not usuable in the first instuction of the
+// routine.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiTrapException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t1,STATUS_ARRAY_BOUNDS_EXCEEDED // set exception code
+ sw t1,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiTrapDispatch
+
+ SBTTL("User Address Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiUserAddressErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a read or write user address error exception
+// is generated from the XTB miss handler. A user address error exception
+// occurs when an invalid 64-bit user address is generated. Interrupts are
+// disabled when this routine is entered.
+//
+// The function of this routine is to raise an access violation exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiUserAddressErrorException)
+
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+
+//
+// N.B. The following code is a work around for a chip bug where the bad
+// virtual address is not correct on an instruction stream TB miss.
+//
+// If the exception PC is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+// If the instruction at the exception PC is not in the TB or the
+// TB entry is invalid, then the bad virtual address is incorrect
+// and the instruction is repeated.
+//
+// Otherwise, the bad virtual address is correct.
+//
+
+#if !defined(NT_UP)
+
+ move t7,t8 // get address of faulting instruction
+
+ .set noreorder
+ .set noat
+ srl t0,t7,30 // isolate high bits of exception PC
+ beq a1,t7,30f // if eq, addresses match
+ xor a2,t0,0x2 // check for kseg0 or kseg1 address
+
+//
+// If the instruction at the exception PC is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated.
+//
+
+ beq zero,a2,30f // if eq, kseg0 or kseg1 address
+ srl t1,t7,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ mfc0 v0,entryhi // get current VPN2 and PID
+ sll t1,t1,ENTRYHI_VPN2 //
+ and v1,v0,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t7,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,10f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+10: mtc0 zero,pagemask // restore page mask register
+ mtc0 v0,entryhi // restore VPN2 and PID
+ bne zero,t5,30f // if ne, PTE valid
+
+//
+// N.B. PSR and EPC may have changed because of TB miss and need to be
+// reloaded.
+//
+
+20: nop // 2 cycle hazzard
+ nop //
+ lw t0,TrPsr(s8) // get previous processor state
+ lw t1,TrFir(s8) // get continuation address
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+#endif
+
+30: addu a0,s8,TrExceptionRecord // compute exception record address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw zero,ErExceptionInformation(a0) // save load/store indicator
+ sw a1,ErExceptionInformation + 4(a0) // save bad virtual address
+ sw t8,ErExceptionAddress(a0) // set exception address
+
+//
+// If the address is a reference to the last 64k of user address space, then
+// treat the error as an address error. Otherwise, treat the error as an
+// access violation.
+//
+
+ li t3,STATUS_ACCESS_VIOLATION // set exception code
+ li t4,0x7fff0000 // get address mask value
+ and t5,t4,t1 // isolate high address bits
+ bne t4,t5,40f // if ne, invalid user address
+ li t3,STATUS_DATATYPE_MISALIGNMENT // set exception code
+40: sw t3,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ li t0,2 // set number of exception parameters
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiUserAddressErrorDispatch
+
+ SBTTL("Exception Dispatch")
+//++
+//
+// Routine Desription:
+//
+// Control is transfered to this routine to call the exception
+// dispatcher to resolve an exception.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to an exception record.
+//
+// a3 - Supplies the previous processor mode.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ NESTED_ENTRY(KiExceptionDispatch, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+ move a1,sp // set exception frame address
+ move a2,s8 // set trap frame address
+ li t0,TRUE // set first chance TRUE
+ sw t0,ExArgs + (4 * 4)(sp) //
+ jal KiDispatchException // call exception dispatcher
+
+ SBTTL("Exception Exit")
+//++
+//
+// Routine Desription:
+//
+// Control is transfered to this routine to exit from an exception.
+//
+// N.B. This transfer of control occurs from:
+//
+// 1. a fall through from the above code.
+// 2. an exit from the continue system service.
+// 3. an exit from the raise exception system service.
+// 4. an exit into user mode from thread startup.
+//
+// N.B. The alternate exit point is used by memory management which does
+// generate an exception frame.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+// sp - Supplies a pointer to an exception frame.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ ALTERNATE_ENTRY(KiExceptionExit)
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+
+ ALTERNATE_ENTRY(KiAlternateExit)
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ and t3,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t3,10f // if eq, previous mode was kernel
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possible APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+10: sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ ldc1 f0,TrFltF0(s8) // restore floating register f0
+ ldc1 f2,TrFltF2(s8) // restore floating registers f2 - f19
+ ldc1 f4,TrFltF4(s8) //
+ ldc1 f6,TrFltF6(s8) //
+ ldc1 f8,TrFltF8(s8) //
+ ldc1 f10,TrFltF10(s8) //
+ ldc1 f12,TrFltF12(s8) //
+ ldc1 f14,TrFltF14(s8) //
+ ldc1 f16,TrFltF16(s8) //
+ j KiTrapExit //
+ ldc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiExceptionDispatch
+
+ SBTTL("Disable Interrupts")
+//++
+//
+// BOOLEAN
+// KiDisableInterrupts (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function disables interrupts and returns whether interrupts
+// were previously enabled.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// A boolean value that determines whether interrupts were previously
+// enabled (TRUE) or disabled(FALSE).
+//
+//--
+
+ LEAF_ENTRY(KiDisableInterrupts)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,psr // get current processor status
+ li t1,~(1 << PSR_IE) // set interrupt enable mask
+ and t2,t1,t0 // clear interrupt enable
+ mtc0 t2,psr // disable interrupts
+ and v0,t0,1 << PSR_IE // iosolate current interrupt enable
+ srl v0,v0,PSR_IE //
+ .set at
+ .set reorder
+
+ j ra // return
+
+ .end KiDisableInterrupts
+
+ SBTTL("Restore Interrupts")
+//++
+//
+// VOID
+// KiRestoreInterrupts (
+// IN BOOLEAN Enable
+// )
+//
+// Routine Description:
+//
+// This function restores the interrupt enable that was returned by
+// the disable interrupts function.
+//
+// Arguments:
+//
+// Enable (a0) - Supplies the interrupt enable value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreInterrupts)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,psr // get current processor status
+ and a0,a0,0xff // isolate interrupt enable
+ sll t1,a0,PSR_IE // shift interrupt enable into position
+ or t1,t1,t0 // merge interrupt enable with PSR
+ mtc0 t1,psr // restore previous interrupt enable
+ nop //
+ .set at
+ .set reorder
+
+ j ra // return
+
+ .end KiRestoreInterrupts
+
+ SBTTL("Fill Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN BOOLEAN Invalid
+// )
+//
+// Routine Description:
+//
+// This function fills a translation buffer entry. If the entry is already
+// in the translation buffer, then the entry is overwritten. Otherwise, a
+// random entry is overwritten.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// Invalid (a2) - Supplies a boolean value that determines whether the
+// TB entry should be invalidated.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillEntryTb)
+
+ and a0,a0,~0x7 // clear low bits of PTE address
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+
+#if DBG
+
+ xor t2,t1,t0 // compare G-bits
+ and t2,t2,1 << ENTRYLO_G // isolate comparison
+ beq zero,t2,5f // if eq, G-bits match
+ break KERNEL_BREAKPOINT // break into kernel debugger
+5: //
+
+#endif
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ mtc0 a1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ bltz t3,20f // if ltz, entry is not in TB
+ nop // fill
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry within range
+ beq zero,t4,10f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+10: tlbwi // overwrite indexed entry
+ nop // 3 cycle hazzard
+ nop //
+ b 30f //
+ nop //
+
+20: tlbwr // overwrite random TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillEntryTb
+
+ SBTTL("Fill Large Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillLargeEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN ULONG PageSize
+// )
+//
+// Routine Description:
+//
+// This function fills a large translation buffer entry.
+//
+// N.B. It is assumed that the large entry is not in the TB and therefore
+// the TB is not probed.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// PageSize (a2) - Supplies the size of the large page table entry.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillLargeEntryTb)
+
+ and a0,a0,~0x7 // clear low bits of PTE address
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+ subu a2,a2,1 // compute the page mask value
+ srl a2,a2,PAGE_SHIFT //
+ sll a2,a2,PAGE_SHIFT + 1 //
+ nor a3,a2,zero // compute virtual address mask
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and a1,a3,a1 // isolate large entry virtual address
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ li a3,LARGE_ENTRY // set large entry index
+ mtc0 a1,entryhi // set entry high value for large entry
+ mtc0 a2,pagemask // set page mask value
+ mtc0 a3,index //
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ nop // 1 cycle hazzard
+ tlbwi // overwrite large TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mtc0 zero,pagemask // clear page mask value
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillLargeEntryTb
+
+ SBTTL("Fill Fixed Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillFixedEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN ULONG Index
+// )
+//
+// Routine Description:
+//
+// This function fills a fixed translation buffer entry.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// Index (a2) - Supplies the index where the TB entry is to be written.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillFixedEntryTb)
+
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ mtc0 a1,entryhi // set VPN2 and PID for probe
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ mtc0 a2,index // set TB entry index
+ nop // 1 cycle hazzard
+ tlbwi // overwrite indexed TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillFixedEntryTb
+
+ SBTTL("Flush Entire Translation Buffer")
+//++
+//
+// VOID
+// KeFlushCurrentTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function flushes the random part of the translation buffer.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFlushCurrentTb)
+
+ j KiFlushRandomTb // execute common code
+
+ .end KeFlushCurrentTb
+
+ SBTTL("Flush Fixed Translation Buffer Entries")
+//++
+//
+// VOID
+// KiFlushFixedTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called to flush all the fixed entries from the
+// translation buffer.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushFixedTb)
+
+ .set noreorder
+ .set noat
+ move t0,zero // set base index of fixed TB entries
+ j KiFlushTb //
+ mfc0 t3,wired // set highest index number + 1
+ .set at
+ .set reorder
+
+ .end KiFlushFixedTb
+
+ SBTTL("Flush Random Translation Buffer Entries")
+//++
+//
+// VOID
+// KiFlushRandomTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called to flush all the random entries from the TB.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushRandomTb)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,wired // set base index of random TB entries
+ lw t3,KeNumberTbEntries // set number of entries
+ .set at
+ .set reorder
+
+ ALTERNATE_ENTRY(KiFlushTb)
+
+ li t4,KSEG0_BASE // set high part of TB entry
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ sll t0,t0,INDEX_INDEX // shift starting index into position
+ sll t3,t3,INDEX_INDEX // shift ending index into position
+ and t1,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ li t4,KSEG0_BASE // set invalidate address
+ or t4,t4,t1 // merge PID with VPN2 of virtual address
+ mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ mtc0 t4,entryhi //
+ mtc0 t0,index // set TB entry index
+10: addu t0,t0,1 << INDEX_INDEX //
+ tlbwi // write TB entry
+ bne t0,t3,10b // if ne, more entries to flush
+ mtc0 t0,index // set TB entry index
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushRandomTb
+
+ SBTTL("Flush Multiple TB Entry")
+//++
+//
+// VOID
+// KiFlushMultipleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID *Virtual,
+// IN ULONG Count
+// )
+//
+// Routine Description:
+//
+// This function flushes a multiples entries from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies a pointer to an array of virtual addresses of
+// the entries that are flushed from the translation buffer.
+//
+// Count (a2) - Supplies the number of TB entries to flush.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushMultipleTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ nop //
+ and a3,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+10: lw v0,0(a1) // get virtual address
+ addu a1,a1,4 // advance to next entry
+ subu a2,a2,1 // reduce number of entries
+ srl t2,v0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ or t2,t2,a3 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe TB for entry
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ nop //
+ bltz t3,30f // if ltz, entry is not in TB
+ lui t2,KSEG0_BASE >> 16 // set invalidate address
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry region
+ beq zero,t4,20f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+20: mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ or t2,t2,a3 // merge PID with VPN2 of invalid address
+ mtc0 t2,entryhi // set VPN2 and PID for TB write
+ nop // 1 cycle hazzard
+ tlbwi // overwrite index TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+30: bgtz a2,10b // if gtz, more entires to flush
+ mtc0 zero,pagemask // restore page mask register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushMultipleTb
+
+ SBTTL("Flush Single TB Entry")
+//++
+//
+// VOID
+// KiFlushSingleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID Virtual
+// )
+//
+// Routine Description:
+//
+// This function flushes a single entry from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be flushed from the translation buffer.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushSingleTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ srl t2,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ and a2,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t2,t2,a2 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe TB for entry
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ nop //
+ bltz t3,20f // if ltz, entry is not in TB
+ lui t2,KSEG0_BASE >> 16 // set invalid address
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry region
+ beq zero,t4,10f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+10: mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ or t2,t2,a2 // merge PID with VPN2 of invalid address
+ mtc0 t2,entryhi // set VPN2 and PID for TB write
+ nop // 1 cycle hazzard
+ tlbwi // overwrite index TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mtc0 zero,pagemask // restore page mask register
+ .set at
+ .set reorder
+
+20: ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushSingleTb
+
+ SBTTL("Probe Tb Entry")
+//++
+//
+// ULONG
+// KiProbeEntryTb (
+// IN PVOID VirtualAddress
+// )
+//
+// Routine Description:
+//
+// This function is called to determine if a specified entry is valid
+/// and within the fixed portion of the TB.
+//
+// Arguments:
+//
+// VirtualAddress - Supplies the virtual address to probe.
+//
+// Return Value:
+//
+// A value of TRUE is returned if the specified entry is valid and within
+// the fixed part of the TB. Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KiProbeEntryTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ srl t2,a0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ and t1,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t2,t2,t1 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ li v0,FALSE // set to return failure
+ bltz t3,20f // if ltz, entry is not in TB
+ sll a0,a0,0x1f - (ENTRYHI_VPN2 - 1) // shift VPN<12> into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ bltz a0,10f // if ltz, check second PTE
+ mfc0 t2,entrylo1 // get second PTE for probe
+ mfc0 t2,entrylo0 // get first PTE for probe
+10: mtc0 t1,entryhi // restore current PID
+ mtc0 zero,pagemask // restore page mask register
+ sll t2,t2,0x1f - ENTRYLO_V // shift valid bit into sign position
+ bgez t2,20f // if geq, entry is not valid
+ srl t3,INDEX_INDEX // isolate TB index
+ and t3,t3,0x3f //
+ mfc0 t4,wired // get number of wired entries
+ nop // fill
+ sltu v0,t3,t4 // check if entry in fixed part of TB
+ .set at
+ .set reorder
+
+20: ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiProbeEntryTb
+
+ SBTTL("Read Tb Entry")
+//++
+//
+// VOID
+// KiReadEntryTb (
+// IN ULONG Index,
+// OUT PTB_ENTRY TbEntry
+// )
+//
+// Routine Description:
+//
+// This function is called to read an entry from the TB.
+//
+// Arguments:
+//
+// Index - Supplies the index of the entry to read.
+//
+// TbEntry - Supplies a pointer to a TB entry structure that receives the
+// contents of the specified TB entry.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiReadEntryTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ sll a0,INDEX_INDEX // shift index into position
+ mfc0 t1,entryhi // save entry high register
+ mtc0 a0,index // set TB entry index
+ nop //
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t2,entrylo0 // save first PTE value
+ mfc0 t3,entrylo1 // save second PTE value
+ mfc0 t4,entryhi // save entry high value
+ mfc0 t5,pagemask // save page mask value
+ mtc0 t1,entryhi // restore entry high register
+ mtc0 zero,pagemask // restore page mask register
+ nop // 1 cycle hazzard
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ sw t2,TbEntrylo0(a1) // set first PTE value
+ sw t3,TbEntrylo1(a1) // set second PTE value
+ sw t4,TbEntryhi(a1) // set entry high value
+ sw t5,TbPagemask(a1) // set page mask value
+ j ra // return
+
+ .end KiReadEntryTb
+
+ SBTTL("Passive Release")
+//++
+//
+// VOID
+// KiPassiveRelease (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called when an interrupt has been passively released.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiPassiveRelease)
+
+ j ra // return
+
+ .end KiPassiveRelease
diff --git a/private/ntos/ke/mips/xxapcint.s b/private/ntos/ke/mips/xxapcint.s
new file mode 100644
index 000000000..0d7c5c8c8
--- /dev/null
+++ b/private/ntos/ke/mips/xxapcint.s
@@ -0,0 +1,123 @@
+// TITLE("Asynchronous Procedure Call (APC) Interrupt")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxapcint.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// Asynchronous Procedure Call (APC) interrupt.
+//
+// Author:
+//
+// David N. Cutler (davec) 3-Apr-1990
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Asynchronous Procedure Call Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at APC_LEVEL. Its function is to allocate an exception frame and call
+// the kernel APC delivery routine to deliver kernel mode APCs and to check
+// if a user mode APC should be delivered. If a user mode APC should be
+// delivered, then the kernel APC delivery routine constructs a context
+// frame on the user stack and alters the exception and trap frames so that
+// control will be transfered to the user APC dispatcher on return from the
+// interrupt.
+//
+// N.B. On entry to this routine all integer registers and the volatile
+// floating registers have been saved. The remainder of the machine
+// state is saved if and only if the previous mode was user mode.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiApcInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Determine the previous mode.
+//
+
+ lw t0,TrPsr(s8) // get saved processor status
+ srl t0,t0,PSR_KSU + 1 // isolate previous mode
+ and a0,t0,0x1 //
+ beq zero,a0,20f // if eq, kernel mode
+
+//
+// The previous mode was user.
+//
+// Save the nonvolatile floating state so a context record can be
+// properly constructed to deliver an APC to user mode if required.
+// It is also necessary to save the volatile floating state for
+// suspend/resume operations.
+//
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Clear APC interrupt.
+//
+
+20: DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ li t2,~APC_INTERRUPT // clear APC interrupt pending
+ and t1,t1,t2 //
+ mtc0 t1,cause //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+//
+// Attempt to deliver an APC.
+//
+
+ move a1,sp // set address of exception frame
+ move a2,s8 // set address of trap frame
+ jal KiDeliverApc // call APC delivery routine
+
+//
+// Deallocate stack frame and return.
+//
+
+ lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate exception frame
+ j ra // return
+
+ .end KiApcInterrupt
diff --git a/private/ntos/ke/mips/xxclock.s b/private/ntos/ke/mips/xxclock.s
new file mode 100644
index 000000000..cdd8a7818
--- /dev/null
+++ b/private/ntos/ke/mips/xxclock.s
@@ -0,0 +1,592 @@
+// TITLE("Interval and Profile Clock Interrupts")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxclock.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// interval and profile clock interrupts.
+//
+// Author:
+//
+// David N. Cutler (davec) 27-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeMaximumIncrement 4
+ .extern KeTickCount 3 * 4
+ .extern KeTimeAdjustment 4
+ .extern KiAdjustDpcThreshold 4
+ .extern KiIdealDpcRate 4
+ .extern KiMaximumDpcQueueDepth 4
+ .extern KiProfileListHead 2 * 4
+ .extern KiProfileLock 4
+ .extern KiTickOffset 4
+
+ SBTTL("Update System Time")
+//++
+//
+// VOID
+// KeUpdateSystemTime (
+// IN PKTRAP_FRAME TrapFrame,
+// IN ULONG TimeIncrement
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the system time and check to
+// determine if a timer has expired.
+//
+// N.B. This routine is executed on a single processor in a multiprocess
+// system. The remainder of the processors only execute the quantum end
+// and runtime update code.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// TimeIncrement (a1) - Supplies the time increment in 100ns units.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateSystemTime)
+
+//
+// Update the interrupt time.
+//
+
+ ld t8,KiPcr2 + Pc2InterruptTime // get interrupt time
+ daddu t8,t8,a1 // add time increment value
+ sd t8,KiPcr2 + Pc2InterruptTime // store interrupt time
+
+//
+// Update tick offset and check for "system clock" tick.
+//
+
+ lw a2,KiTickOffset // get tick offset value
+ sub a2,a2,a1 // subtract time increment
+ ld v0,KeTickCount // get low and high 1 tick count
+ la t0,KiTimerTableListHead // get base address of timer table
+ sw a2,KiTickOffset // store tick offset value
+ bgtz a2,10f // if gtz, tick not completed
+ lw a3,KeMaximumIncrement // get maximum increment value
+
+//
+// Update system time.
+//
+
+ lw t1,KeTimeAdjustment // get time adjustment value
+ ld t2,KiPcr2 + Pc2SystemTime // get low and high 1 system time
+ daddu t2,t2,t1 // add time increment value
+ sd t2,KiPcr2 + Pc2SystemTime // store low nad high 1 system time
+
+//
+// Update the tick count.
+//
+// N.B. The tick count is updated in a very strict manner so that an
+// interlock does not have to be used in an MP system. This is
+// required for backward compatibility with old drivers and file
+// systems.
+//
+
+ daddu t2,v0,1 // increment tick count
+ dsrl t3,t2,32 // get high half of tick count
+ sw t2,KiPcr2 + Pc2TickCountLow(zero) // store low tick count
+
+ .set noreorder
+ .set noat
+ sw t3,KeTickCount + 8 // store high 2 tick count
+ sd t2,KeTickCount // store low and high 1 tick count
+ .set at
+ .set reorder
+
+//
+// Compute next tick offset value.
+//
+
+ addu a3,a3,a2 // add maximum increment to residue
+ sw a3,KiTickOffset // store tick offset value
+
+//
+// Check to determine if a timer has expired at the current hand value.
+//
+
+ and t1,v0,TIMER_TABLE_SIZE - 1 // reduce to table table index
+ sll t2,t1,3 // compute timer table listhead address
+ addu t2,t2,t0 //
+ lw t3,LsFlink(t2) // get address of first timer in list
+ beq t2,t3,5f // if eq, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+
+ ld t4,TiDueTime - TiTimerListEntry(t3) // get timer due time
+ sltu t9,t8,t4 // check if timer is due
+ beq zero,t9,20f // if eq, timer has expired
+
+//
+// Check to determine if a timer has expired at the next hand value.
+//
+
+5: addu v0,v0,1 // advance hand value to next entry
+10: and t1,v0,TIMER_TABLE_SIZE - 1 // reduce to table table index
+ sll t2,t1,3 // compute timer table listhead address
+ addu t2,t2,t0 //
+ lw t3,LsFlink(t2) // get address of first timer in list
+ beq t2,t3,40f // if eq, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+
+ ld t4,TiDueTime - TiTimerListEntry(t3) // get timer due time
+ sltu t9,t8,t4 // check if timer is due
+ bne zero,t9,40f // if ne, timer has not expired
+
+//
+// Put timer expiration DPC in the system DPC list and initiate a dispatch
+// interrupt on the current processor.
+//
+
+20: la t0,KiTimerExpireDpc // get expiration DPC address
+ lw a1,KiPcr + PcPrcb(zero) // get address of PRCB
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ addu t3,a1,PbDpcListHead // compute DPC listhead address
+ addu v1,a1,PbDpcLock // compute DPC lock address
+
+#if !defined(NT_UP)
+
+30: ll t4,0(v1) // get current lock value
+ move t5,t3 // set lock ownership value
+ bne zero,t4,30b // if ne, spin lock owned
+ sc t5,0(v1) // set spin lock owned
+ beq zero,t5,30b // if eq, store conditional failed
+
+#endif
+
+ lw t4,DpLock(t0) // get DPC inserted state
+ bne zero,t4,35f // if ne, DPC entry already inserted
+ lw t4,LsBlink(t3) // get address of last entry in list
+ sw v1,DpLock(t0) // set DPC inserted state
+ sw v0,DpSystemArgument1(t0) // set timer table hand value
+ addu t0,t0,DpDpcListEntry // compute address of DPC list entry
+ sw t0,LsBlink(t3) // set address of new last entry
+ sw t0,LsFlink(t4) // set next link in old last entry
+ sw t3,LsFlink(t0) // set address of next entry
+ sw t4,LsBlink(t0) // set address of previous entry
+ lw t5,PbDpcQueueDepth(a1) // increment DPC queue depth
+ addu t5,t5,1 //
+ sw t5,PbDpcQueueDepth(a1) //
+
+ .set noreorder
+ .set noat
+ mfc0 t3,cause // get exception cause register
+ or t3,t3,DISPATCH_INTERRUPT // merge dispatch interrut request
+ mtc0 t3,cause // set exception cause register
+ .set at
+ .set reorder
+
+35: //
+
+#if !defined(NT_UP)
+
+ sw zero,0(v1) // set spin lock not owned
+
+#endif
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+40: blez a2,50f // if lez, full tick
+ j ra // return
+50: j KeUpdateRunTime
+
+ .end KeUpdateSystemTime
+
+ SBTTL("Update Thread and Process Runtime")
+//++
+//
+// VOID
+// KeUpdateRunTime (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the runtime of the current
+// thread, update the runtime of the current thread's process, and decrement
+// the current thread's quantum.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateRunTime)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t2,ThApcState + AsProcess(t0) // get address of current process
+ lw t3,TrPsr(a0) // get saved processor status
+ lw t5,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t7,PbDpcRoutineActive(t5) // get DPC active flag
+ and t4,t3,0x1 << PSR_PMODE // isolate previous processor mode
+ bne zero,t4,30f // if ne, previous mode was user
+
+//
+// If a DPC is active, then increment the time spent executing DPC routines.
+// Otherwise, if the old IRQL is greater than DPC level, then increment the
+// time spent executing interrupt services routines. Otherwise, increment
+// the time spent in kernel mode for the current thread.
+//
+
+ lbu t6,TrOldIrql(a0) // get previous IRQL
+ subu t6,t6,DISPATCH_LEVEL // compare IRQL with DPC level
+ bltz t6,20f // if ltz, increment thread kernel time
+ addu t8,t5,PbInterruptTime // compute interrupt time address
+ bgtz t6,10f // if gtz, increment interrupt time
+ addu t8,t5,PbDpcTime // compute DPC time address
+ beq zero,t7,20f // if eq, increment thread kernel time
+
+//
+// Update the time spend in DPC/interrupt processing.
+//
+
+10: lw t6,0(t8) // get processor time
+ addu t6,t6,1 // increment processor time
+ sw t6,0(t8) // store processor time
+ addu t9,t5,PbKernelTime // compute processor kernel time address
+ b 50f //
+
+//
+// Update the time spent in kernel mode for the current thread.
+//
+
+20: lw t6,ThKernelTime(t0) // get kernel time
+ addu t6,t6,1 // increment kernel time
+ sw t6,ThKernelTime(t0) // store kernel time
+ addu t2,t2,PrKernelTime // compute process kernel time address
+ addu t9,t5,PbKernelTime // compute processor kernel time address
+ b 40f //
+
+//
+// Update the time spent in user mode for the current thread.
+//
+
+30: lw t6,ThUserTime(t0) // get user time
+ addu t6,t6,1 // increment user time
+ sw t6,ThUserTime(t0) // store user time
+ addu t2,t2,PrUserTime // compute process user time address
+ addu t9,t5,PbUserTime // compute processor user time address
+
+//
+// Update the time spent in kernel/user mode for the current thread's process.
+//
+// N.B. The update of the process time must be synchronized across processors.
+//
+
+40: ll t6,0(t2) // get process time
+ addu t6,t6,1 // increment process time
+ sc t6,0(t2) // store process time
+ beq zero,t6,40b // if eq, store conditional failed
+
+//
+// Update the time spent in kernel/user mode for the current processor.
+//
+
+50: lw t6,0(t9) // get processor time
+ addu t6,t6,1 // increment processor time
+ sw t6,0(t9) // store processor time
+
+//
+// Update the DPC request rate which is computed as the average between
+// the previous rate and the current rate.
+//
+
+ lw a0,PbDpcCount(t5) // get current DPC count
+ lw a1,PbDpcLastCount(t5) // get last DPC count
+ lw a2,PbDpcRequestRate(t5) // get last DPC request rate
+ lw a3,PbDpcQueueDepth(t5) // get current DPC queue depth
+ sw a0,PbDpcLastCount(t5) // set last DPC count
+ subu a0,a0,a1 // compute count during interval
+ addu a0,a0,a2 // compute sum of current and last
+ srl a0,a0,1 // average current and last
+ sw a0,PbDpcRequestRate(t5) // set new DPC request rate
+
+//
+// If the current DPC queue depth is not zero, a DPC routine is not active,
+// and a DPC interrupt has not been requested, then request a dispatch
+// interrupt, decrement the maximum DPC queue depth, and reset the threshold
+// counter if appropriate.
+//
+
+ lw v0,PbDpcInterruptRequested(t5) // get DPC interrupt requested
+ beq zero,a3,60f // if eq, DPC queue is empty
+ or v0,v0,t7 // merge DPC interrupt requested and active
+ bne zero,v0,60f // if ne, DPC active or interrupt requested
+
+ DISABLE_INTERRUPTS(a1) // disable interrupt
+
+ .set noreorder
+ .set noat
+ mfc0 a2,cause // get exception cause register
+ lw v0,PbMaximumDpcQueueDepth(t5) // get maximum queue depth
+ lw v1,KiIdealDpcRate // get ideal DPC rate
+ or a2,a2,DISPATCH_INTERRUPT // merge dispatch interrut request
+ mtc0 a2,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(a1) // enable interrupts
+
+ sltu a0,a0,v1 // test if current rate less than ideal
+ lw a1,KiAdjustDpcThreshold // reset initial threshold counter
+ sw a1,PbAdjustDpcThreshold(t5) //
+ beq zero,a0,KiDecrementQuantum // if eq, rate greater or equal ideal
+ subu v0,v0,1 // decrement maximum DPC queue depth
+ beq zero,v0,KiDecrementQuantum // if eq, current value is one
+ sw v0,PbMaximumDpcQueueDepth(t5) // set new maximum DPC queue depth
+ b KiDecrementQuantum //
+
+//
+// The DPC queue is empty or a DPC routine is active or a DPC interrupt
+// has been requested. Count down the adjustment threshold and if the
+// count reaches zero, then increment the maximum DPC queue depth, but
+// no above the initial value and reset the adjustment threshold value.
+//
+
+60: lw a0,PbAdjustDpcThreshold(t5) // get adjustment threshold counter
+ lw a1,PbMaximumDpcQueueDepth(t5) // get current maximum queue depth
+ lw a2,KiMaximumDpcQueueDepth // get initial maximum queue depth
+ subu a0,a0,1 // decrement adjustment threshold counter
+ sw a0,PbAdjustDpcThreshold(t5) //
+ bne zero,a0,KiDecrementQuantum // if ne, adjustment counter not zero
+ lw a0,KiAdjustDpcThreshold //set new DPC threshold counter
+ sw a0,PbAdjustDpcThreshold(t5) //
+ beq a1,a2,KiDecrementQuantum // if eq, currently at maximum depth
+ addu a1,a1,1 // increment current maximum queue depth
+ sw a1,PbMaximumDpcQueueDepth(t5) // set new maximum DPC queue depth
+
+//
+// Decrement current thread quantum and check to determine if a quantum end
+// has occurred.
+//
+
+ ALTERNATE_ENTRY(KiDecrementQuantum)
+
+ lb t6,ThQuantum(t0) // get current thread quantum
+ sub t6,t6,CLOCK_QUANTUM_DECREMENT // decrement current quantum
+ sb t6,ThQuantum(t0) // store thread quantum
+ bgtz t6,60f // if gtz, quantum remaining
+
+//
+// Set quantum end flag and initiate a dispatch interrupt on the current
+// processor.
+//
+
+ lw t1,PbIdleThread(t5) // get address of idle
+ beq t0,t1,60f // if eq, idle thread
+ sw sp,KiPcr + PcQuantumEnd(zero) // set quantum end indicator
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,DISPATCH_INTERRUPT // merge dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ nop // 1 cycle hazzard
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+60: j ra // return
+
+ .end KeUpdateRunTime
+
+
+ SBTTL("Process Profile Interrupt")
+//++
+//
+// VOID
+// KeProfileInterruptWithSource (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE ProfileSource
+// )
+//
+// VOID
+// KeProfileInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// profile timer. Its function is to update the profile information for
+// the currently active profile objects.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// ProfileSource (a1) - Supplies the source of the profile interrupt
+// KeProfileInterrupt is an alternate entry for backwards
+// compatibility that sets the source to zero (ProfileTime)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+ .space 3 * 4 //
+PfRa: .space 4 // return address
+ProfileFrameLength: // profile frame length
+
+ NESTED_ENTRY(KeProfileInterrupt, ProfileFrameLength, zero)
+
+ move a1, zero // set profile source to ProfileTime
+
+ ALTERNATE_ENTRY(KeProfileInterruptWithSource)
+
+ subu sp,sp,ProfileFrameLength // allocate stack frame
+ sw ra,PfRa(sp) // save return address
+
+ PROLOGUE_END
+
+#if !defined(NT_UP)
+
+10: ll t0,KiProfileLock // get current lock value
+ move t1,s0 // set ownership value
+ bne zero,t0,10b // if ne, spin lock owned
+ sc t1,KiProfileLock // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+ lw a2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw a2,ThApcState + AsProcess(a2) // get address of current process
+ addu a2,a2,PrProfileListHead // compute profile listhead address
+ jal KiProcessProfileList // process the process profile list
+ la a2,KiProfileListHead // get profile listhead address
+ jal KiProcessProfileList // process the system profile list
+
+#if !defined(NT_UP)
+
+ sw zero,KiProfileLock // set spin lock not owned
+
+#endif
+
+ lw ra,PfRa(sp) // restore return address
+ addu sp,sp,ProfileFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeProfileInterrupt
+
+ SBTTL("Process Profile List")
+//++
+//
+// VOID
+// KiProcessProfileList (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE Source,
+// IN PLIST_ENTRY ListHead
+// )
+//
+// Routine Description:
+//
+// This routine is called to process a profile list.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Source (a1) - Supplies profile source to match
+//
+// ListHead (a2) - Supplies a pointer to a profile list.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiProcessProfileList)
+
+ lw t8,LsFlink(a2) // get address of next entry
+ li a3,0xfffffffc // set bucket mask value
+ beq a2,t8,30f // if eq, end of list
+ lw t0,TrFir(a0) // get interrupt PC address
+ lw t6,KiPcr + PcSetMember(zero) // get current processor member
+
+//
+// Scan profile list and increment profile buckets as appropriate.
+//
+
+10: lw t1,PfRangeBase - PfProfileListEntry(t8) // get base of range
+ lw t2,PfRangeLimit - PfProfileListEntry(t8) // get limit of range
+ lhu t3,PfSource - PfProfileListEntry(t8) // get source
+ lhu t4,PfAffinity - PfProfileListEntry(t8) // get affinity
+ bne t3,a1,20f // if ne, source mismatch
+ sltu v0,t0,t1 // check against range base
+ sltu v1,t0,t2 // check against range limit
+ and t5,t6,t4 // check against processor
+ bne zero,v0,20f // if ne, less that range base
+ beq zero,v1,20f // if eq, not less that range limit
+ beq zero,t5,20f // if eq, affinity mismatch
+ subu t1,t0,t1 // compute offset in range
+ lw t2,PfBucketShift - PfProfileListEntry(t8) // get shift count
+ lw v0,PfBuffer - PfProfileListEntry(t8) // get profile buffer address
+ srl v1,t1,t2 // compute bucket offset
+ and v1,v1,a3 // clear low order offset bits
+ addu v1,v1,v0 // compute bucket address
+ lw v0,0(v1) // increment profile bucket
+ addu v0,v0,1 //
+ sw v0,0(v1) //
+20: lw t8,LsFlink(t8) // get address of next entry
+ bne a2,t8,10b // if ne, more entries in profile list
+30: j ra // return
+
+ .end KiProcessProfileList
diff --git a/private/ntos/ke/mips/xxflshtb.c b/private/ntos/ke/mips/xxflshtb.c
new file mode 100644
index 000000000..77b9a44be
--- /dev/null
+++ b/private/ntos/ke/mips/xxflshtb.c
@@ -0,0 +1,593 @@
+/*++
+
+Copyright (c) 1992-1994 Microsoft Corporation
+
+Module Name:
+
+ xxflshtb.c
+
+Abstract:
+
+ This module implements machine dependent functions to flush the
+ translation buffer.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ );
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ );
+
+VOID
+KeFlushEntireTb (
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the entire translation buffer (TB) on all
+ processors that are currently running threads which are children
+ of the current process or flushes the entire translation buffer
+ on all processors in the host configuration.
+
+Arguments:
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors, disable context switching,
+ // and send the flush entire parameters to the target processors,
+ // if any, for execution.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushEntireTbTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush TB on current processor.
+ //
+
+ KeFlushCurrentTb();
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing the entire TB.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the entire TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KeFlushCurrentTb();
+ return;
+}
+
+#endif
+
+
+VOID
+KeFlushMultipleTb (
+ IN ULONG Number,
+ IN PVOID *Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE *PtePointer OPTIONAL,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes multiple entries from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a multiple entries from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies an optional pointer to an array of pointers to
+ page table entries that receive the specified page table entry
+ value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ ULONG Index;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ ASSERT(Number <= FLUSH_MULTIPLE_MAXIMUM);
+
+ //
+ // Compute the target set of processors.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+
+#endif
+
+ //
+ // If a page table entry address array is specified, then set the
+ // specified page table entries to the specific value.
+ //
+
+ if (ARGUMENT_PRESENT(PtePointer)) {
+ for (Index = 0; Index < Number; Index += 1) {
+ *PtePointer[Index] = PteValue;
+ }
+ }
+
+ //
+ // If any target processors are specified, then send a flush multiple
+ // packet to the target set of processor.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushMultipleTbTarget,
+ (PVOID)Number,
+ (PVOID)Virtual,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified entries from the TB on the current processor.
+ //
+
+ KiFlushMultipleTb(Invalid, &Virtual[0], Number);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing multiple TB entries.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Pid - Supplies the PID of the TB entries to flush.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ PVOID Array[FLUSH_MULTIPLE_MAXIMUM];
+
+ ASSERT((ULONG)Number <= FLUSH_MULTIPLE_MAXIMUM);
+
+ //
+ // Capture the virtual addresses that are to be flushed from the TB
+ // on the current processor and clear the packet address.
+ //
+
+ for (Index = 0; Index < (ULONG)Number; Index += 1) {
+ Array[Index] = ((PVOID *)(Virtual))[Index];
+ }
+
+ KiIpiSignalPacketDone(SignalDone);
+
+ //
+ // Flush the specified virtual addresses from the TB on the current
+ // processor.
+ //
+
+ KiFlushMultipleTb(TRUE, &Array[0], (ULONG)Number);
+ return;
+}
+
+#endif
+
+
+HARDWARE_PTE
+KeFlushSingleTb (
+ IN PVOID Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE PtePointer,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a single entry from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a single entry from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies a pointer to the page table entry which
+ receives the specified value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ HARDWARE_PTE OldPte;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+
+#endif
+
+ //
+ // Capture the previous contents of the page table entry and set the
+ // page table entry to the new value.
+ //
+
+ OldPte = *PtePointer;
+ *PtePointer = PteValue;
+
+ //
+ // If any target processors are specified, then send a flush single
+ // packet to the target set of processors.
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushSingleTbTarget,
+ (PVOID)Virtual,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified entry from the TB on the current processor.
+ //
+
+ KiFlushSingleTb(Invalid, Virtual);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return OldPte;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing a single TB entry.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ RequestPacket - Supplies a pointer to a flush single TB packet address.
+
+ Pid - Not used.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush a single entry form the TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KiFlushSingleTb(TRUE, Virtual);
+ return;
+}
+
+#endif
diff --git a/private/ntos/ke/mips/xxintsup.s b/private/ntos/ke/mips/xxintsup.s
new file mode 100644
index 000000000..0370ec766
--- /dev/null
+++ b/private/ntos/ke/mips/xxintsup.s
@@ -0,0 +1,713 @@
+// TITLE("Interrupt Object Support Routines")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxintsup.s
+//
+// Abstract:
+//
+// This module implements the code necessary to support interrupt objects.
+// It contains the interrupt dispatch code and the code template that gets
+// copied into an interrupt object.
+//
+// Author:
+//
+// David N. Cutler (davec) 2-Apr-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Synchronize Execution")
+//++
+//
+// BOOLEAN
+// KeSynchronizeExecution (
+// IN PKINTERRUPT Interrupt,
+// IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
+// IN PVOID SynchronizeContext
+// )
+//
+// Routine Description:
+//
+// This function synchronizes the execution of the specified routine with the
+// execution of the service routine associated with the specified interrupt
+// object.
+//
+// Arguments:
+//
+// Interrupt (a0) - Supplies a pointer to a control object of type interrupt.
+//
+// SynchronizeRoutine (a1) - Supplies a pointer to a function whose execution
+// is to be synchronized with the execution of the service routine associated
+// with the specified interrupt object.
+//
+// SynchronizeContext (a2) - Supplies a pointer to an arbitrary data structure
+// which is to be passed to the function specified by the SynchronizeRoutine
+// parameter.
+//
+// Return Value:
+//
+// The value returned by the SynchronizeRoutine function is returned as the
+// function value.
+//
+//--
+
+ .struct 0
+SyArg: .space 4 * 4 // argument register save area
+SyS0: .space 4 // saved integer register s0
+SyIrql: .space 4 // saved IRQL value
+ .space 4 // fill for alignment
+SyRa: .space 4 // saved return address
+SyFrameLength: // length of stack frame
+SyA0: .space 4 // saved argument registers a0 - a2
+SyA1: .space 4 //
+SyA2: .space 4 //
+
+ NESTED_ENTRY(KeSynchronizeExecution, SyFrameLength, zero)
+
+ subu sp,sp,SyFrameLength // allocate stack frame
+ sw ra,SyRa(sp) // save return address
+ sw s0,SyS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+ sw a1,SyA1(sp) // save synchronization routine address
+ sw a2,SyA2(sp) // save synchronization routine context
+
+//
+// Raise IRQL to the synchronization level and acquire the associated
+// spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s0,InActualLock(a0) // get address of spin lock
+
+#endif
+
+ lbu a0,InSynchronizeIrql(a0) // get synchronization IRQL
+ addu a1,sp,SyIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise IRQL to synchronization IRQL
+
+#if defined(R4000) && !defined(NT_UP)
+
+10: ll t0,0(s0) // get current lock value
+ move t1,s0 // set lock ownership value
+ bne zero,t0,10b // if ne, spin lock owned
+ sc t1,0(s0) // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+//
+// Call specified routine passing the specified context parameter.
+//
+
+ lw t0,SyA1(sp) // get synchronize routine address
+ lw a0,SyA2(sp) // get synchronize routine context
+ jal t0 // call specified routine
+
+//
+// Release spin lock, lower IRQL to its previous level, and return the value
+// returned by the specified routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s0) // set spin lock not owned
+
+#endif
+
+ lbu a0,SyIrql(sp) // get saved IRQL
+ move s0,v0 // save return value
+ jal KeLowerIrql // lower IRQL to previous level
+ move v0,s0 // set return value
+ lw s0,SyS0(sp) // restore integer register s0
+ lw ra,SyRa(sp) // restore return address
+ addu sp,sp,SyFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeSynchronizeExecution
+
+ SBTTL("Chained Dispatch")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to more than one interrupt object. Its
+// function is to walk the list of connected interrupt objects and call
+// each interrupt service routine. If the mode of the interrupt is latched,
+// then a complete traversal of the chain must be performed. If any of the
+// routines require saving the volatile floating point machine state, then
+// it is only saved once.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ChArg: .space 4 * 4 // argument register save area
+ChS0: .space 4 // saved integer registers s0 - s6
+ChS1: .space 4 //
+ChS2: .space 4 //
+ChS3: .space 4 //
+ChS4: .space 4 //
+ChS5: .space 4 //
+ChS6: .space 4 //
+ChRa: .space 4 // saved return address
+ChFrameLength: // length of stack frame
+ChIrql: .space 4 // saved IRQL value
+
+ NESTED_ENTRY(KiChainedDispatch, ChFrameLength, zero)
+
+ subu sp,sp,ChFrameLength // allocate stack frame
+ sw ra,ChRa(sp) // save return address
+ sw s0,ChS0(sp) // save integer registers s0 - s6
+ sw s1,ChS1(sp) //
+ sw s2,ChS2(sp) //
+ sw s3,ChS3(sp) //
+ sw s4,ChS4(sp) //
+ sw s5,ChS5(sp) //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw s6,ChS6(sp) //
+
+#endif
+
+ PROLOGUE_END
+
+//
+// Initialize loop variables.
+//
+
+ addu s0,a0,InInterruptListEntry // set address of listhead
+ move s1,s0 // set address of first entry
+ move s2,zero // clear floating state saved flag
+ lbu s3,InMode(a0) // get mode of interrupt
+ lbu s4,InIrql(a0) // get interrupt source IRQL
+
+//
+// Walk the list of connected interrupt objects and call the respective
+// interrupt service routines.
+//
+
+10: subu a0,s1,InInterruptListEntry // compute interrupt object address
+ lbu t0,InFloatingSave(a0) // get floating save flag
+ bne zero,s2,20f // if ne, floating state already saved
+ beq zero,t0,20f // if eq, don't save floating state
+
+//
+// Save volatile floating registers f0 - f19 in trap frame.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ li s2,1 // set floating state saved flag
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+20: lbu s5,InSynchronizeIrql(a0) // get synchronization IRQL
+ beq s4,s5,25f // if eq, IRQL levels are the same
+ move a0,s5 // set synchronization IRQL
+ addu a1,sp,ChIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+ subu a0,s1,InInterruptListEntry // recompute interrupt object address
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+25: //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s6,InActualLock(a0) // get address of spin lock
+30: ll t1,0(s6) // get current lock value
+ move t2,s6 // set lock ownership value
+ bne zero,t1,30b // if ne, spin lock owned
+ sc t2,0(s6) // set spin lock owned
+ beq zero,t2,30b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s6) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ beq s4,s5,35f // if eq, IRQL levels are the same
+ move a0,s4 // set interrupt source IRQL
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Get next list entry and check for end of loop.
+//
+
+35: lw s1,LsFlink(s1) // get next interrupt object address
+ beq zero,v0,40f // if eq, interrupt not handled
+ beq zero,s3,50f // if eq, level sensitive interrupt
+40: bne s0,s1,10b // if ne, not end of list
+
+//
+// Either the interrupt is level sensitive and has been handled or the end of
+// the interrupt object chain has been reached. Check to determine if floating
+// machine state needs to be restored.
+//
+
+50: beq zero,s2,60f // if eq, floating state not saved
+
+//
+// Restore volatile floating registers f0 - f19 from trap frame.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Restore integer registers s0 - s6, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+60: lw s0,ChS0(sp) // restore integer registers s0 - s6
+ lw s1,ChS1(sp) //
+ lw s2,ChS2(sp) //
+ lw s3,ChS3(sp) //
+ lw s4,ChS4(sp) //
+ lw s5,ChS5(sp) //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s6,ChS6(sp) //
+
+#endif
+
+ lw ra,ChRa(sp) // restore return address
+ addu sp,sp,ChFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiChainedDispatch
+
+ SBTTL("Floating Dispatch")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to save the volatile floating machine state and then call the specified
+// interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+FlArg: .space 4 * 4 // argument register save area
+FlS0: .space 4 // saved integer registers s0 - s1
+FlS1: .space 4 //
+FlIrql: .space 4 // saved IRQL value
+FlRa: .space 4 // saved return address
+FlFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiFloatingDispatch, FlFrameLength, zero)
+
+ subu sp,sp,FlFrameLength // allocate stack frame
+ sw ra,FlRa(sp) // save return address
+ sw s0,FlS0(sp) // save integer registers s0 - s1
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw s1,FlS1(sp) //
+
+#endif
+
+ PROLOGUE_END
+
+//
+// Save volatile floating registers f0 - f19 in trap frame.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+ move s0,a0 // save address of interrupt object
+ lbu a0,InSynchronizeIrql(s0) // get synchronization IRQL
+ lbu t0,InIrql(s0) // get interrupt source IRQL
+ beq a0,t0,10f // if eq, IRQL levels are the same
+ addu a1,sp,FlIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+10: move a0,s0 // restore address of interrupt object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s1,InActualLock(a0) // get address of spin lock
+20: ll t1,0(s1) // get current lock value
+ move t2,s1 // set lock ownership value
+ bne zero,t1,20b // if ne, spin lock owned
+ sc t2,0(s1) // set spin lock owned
+ beq zero,t2,20b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s1) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ lbu a0,InIrql(s0) // get interrupt source IRQL
+ lbu t0,InSynchronizeIrql(s0) // get synchronization IRQL
+ beq a0,t0,30f // if eq, IRQL levels are the same
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Restore volatile floating registers f0 - f19 from trap frame.
+//
+
+30: RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Restore integer registers s0 - s1, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,FlS0(sp) // restore integer registers s0 - s1
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s1,FlS1(sp) //
+
+#endif
+
+ lw ra,FlRa(sp) // restore return address
+ addu sp,sp,FlFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiFloatingDispatch
+
+ SBTTL("Interrupt Dispatch - Raise IRQL")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// N.B. This routine raises the interrupt level to the synchronization
+// level specified in the interrupt object.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+RdArg: .space 4 * 4 // argument register save area
+RdS0: .space 4 // saved integer register s0
+ .space 4 // fill
+RdIrql: .space 4 // saved IRQL value
+RdRa: .space 4 // saved return address
+RdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchRaise, RdFrameLength, zero)
+
+ subu sp,sp,RdFrameLength // allocate stack frame
+ sw ra,RdRa(sp) // save return address
+ sw s0,RdS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+//
+// Raise IRQL to synchronization level.
+//
+
+ move s0,a0 // save address of interrupt object
+ lbu a0,InSynchronizeIrql(s0) // get synchronization IRQL
+ addu a1,sp,RdIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+ move a0,s0 // restore address of interrupt object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s0,InActualLock(a0) // get address of spin lock
+10: ll t1,0(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(s0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s0) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the previous level.
+//
+
+ lbu a0,RdIrql(sp) // get previous IRQL
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Restore integer register s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,RdS0(sp) // restore integer registers s0 - s1
+ lw ra,RdRa(sp) // restore return address
+ addu sp,sp,RdFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiInterruptDispatchRaise
+
+ SBTTL("Interrupt Dispatch - Same IRQL")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#if defined(NT_UP)
+
+ LEAF_ENTRY(KiInterruptDispatchSame)
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ j t0 // jump to service routine
+
+#else
+
+ .struct 0
+SdArg: .space 4 * 4 // argument register save area
+SdS0: .space 4 // saved integer register s0
+ .space 4 * 2 // fill
+SdRa: .space 4 // saved return address
+SdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchSame, SdFrameLength, zero)
+
+ subu sp,sp,SdFrameLength // allocate stack frame
+ sw ra,SdRa(sp) // save return address
+ sw s0,SdS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+ lw s0,InActualLock(a0) // get address of spin lock
+10: ll t1,0(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(s0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failed
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+ sw zero,0(s0) // set spin lock not owned
+
+//
+// Restore integer register s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,SdS0(sp) // restore integer registers s0 - s1
+ lw ra,SdRa(sp) // restore return address
+ addu sp,sp,SdFrameLength // deallocate stack frame
+ j ra // return
+
+#endif
+
+ .end KiInterruptDispatchSame
+
+ SBTTL("Interrupt Template")
+//++
+//
+// Routine Description:
+//
+// This routine is a template that is copied into each interrupt object. Its
+// function is to determine the address of the respective interrupt object
+// and then transfer control to the appropriate interrupt dispatcher.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt template within an interrupt
+// object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiInterruptTemplate)
+
+ .set noreorder
+ .set noat
+ lw t0,InDispatchAddress - InDispatchCode(a0) // get dispatcher address
+ subu a0,a0,InDispatchCode // compute address of interrupt object
+ j t0 // transfer control to dispatch routine
+ nop //
+ .set at
+ .set reorder
+
+ .end KiInterruptTemplate
+
+ SBTTL("Unexpected Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is not connected to an interrupt object. Its function
+// is to report the error and dismiss the interrupt.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiUnexpectedInterrupt)
+
+ j ra // ****** temp ******
+
+ .end KiUnexpectedInterrupt
diff --git a/private/ntos/ke/mips/xxirql.s b/private/ntos/ke/mips/xxirql.s
new file mode 100644
index 000000000..9ededf117
--- /dev/null
+++ b/private/ntos/ke/mips/xxirql.s
@@ -0,0 +1,218 @@
+// TITLE("Manipulate Interrupt Request Level")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// manpirql.s
+//
+// Abstract:
+//
+// This module implements the code necessary to lower and raise the current
+// Interrupt Request Level (IRQL).
+//
+//
+// Author:
+//
+// David N. Cutler (davec) 12-Aug-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KiSynchIrql 4
+
+ SBTTL("Lower Interrupt Request Level")
+//++
+//
+// VOID
+// KeLowerIrql (
+// KIRQL NewIrql
+// )
+//
+// Routine Description:
+//
+// This function lowers the current IRQL to the specified value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeLowerIrql)
+
+ and a0,a0,0xff // isolate new IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeLowerIrql
+
+ SBTTL("Raise Interrupt Request Level")
+//++
+//
+// VOID
+// KeRaiseIrql (
+// KIRQL NewIrql,
+// PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to the specified value and returns
+// the old IRQL value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that recieves the old
+// IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeRaiseIrql)
+
+ and a0,a0,0xff // isolate new IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu t2,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ sb t2,0(a1) // store old IRQL
+ j ra // return
+
+ .end KeRaiseIrql
+
+ SBTTL("Raise Interrupt Request Level to DPC Level")
+//++
+//
+// KIRQL
+// KeRaiseIrqlToDpcLevel (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with dispatch level.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiRaiseIrqlToXxxLevel)
+
+ ALTERNATE_ENTRY(KeRaiseIrqlToDpcLevel)
+
+ li a0,DISPATCH_LEVEL // set new IRQL value
+ b KeSwapIrql // finish in common code
+
+ SBTTL("Swap Interrupt Request Level")
+//++
+//
+// KIRQL
+// KeRaiseIrqlToSynchLevel (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with synchronization level.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeRaiseIrqlToSynchLevel)
+
+ lbu a0,KiSynchIrql // set new IRQL level
+
+//++
+//
+// KIRQL
+// KeSwapIrql (
+// IN KIRQL NewIrql
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with the specified IRQL.
+//
+// Arguments:
+//
+// NewIrql (a0) - supplies the new IRQL value.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeSwapIrql)
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KiRaiseIrqlToXxxLevel
diff --git a/private/ntos/ke/mips/xxmiscs.s b/private/ntos/ke/mips/xxmiscs.s
new file mode 100644
index 000000000..ccdf0ead7
--- /dev/null
+++ b/private/ntos/ke/mips/xxmiscs.s
@@ -0,0 +1,289 @@
+// TITLE("Miscellaneous Kernel Functions")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// misc.s
+//
+// Abstract:
+//
+// This module implements machine dependent miscellaneous kernel functions.
+// Functions are provided to request a software interrupt, continue thread
+// execution, flush the write buffer, and perform last chance exception
+// processing.
+//
+// Author:
+//
+// David N. Cutler (davec) 31-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Request Software Interrupt")
+//++
+//
+// VOID
+// KiRequestSoftwareInterrupt (
+// ULONG RequestIrql
+// )
+//
+// Routine Description:
+//
+// This function requests a software interrupt at the specified IRQL
+// level.
+//
+// Arguments:
+//
+// RequestIrql (a0) - Supplies the request IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRequestSoftwareInterrupt)
+
+ li t0,1 << (CAUSE_INTPEND - 1) // get partial request mask value
+
+ DISABLE_INTERRUPTS(t1) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t2,cause // get exception cause register
+ sll t0,t0,a0 // shift request mask into position
+ or t2,t2,t0 // merge interrupt request mask
+ mtc0 t2,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t1) // enable interrupts
+
+ j ra // return
+
+ .end KiRequestSoftwareInterrupt
+
+ SBTTL("Continue Execution System Service")
+//++
+//
+// NTSTATUS
+// NtContinue (
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN TestAlert
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to continue execution after
+// an exception has occurred. Its functions is to transfer information from
+// the specified context record into the trap frame that was built when the
+// system service was executed, and then exit the system as if an exception
+// had occurred.
+//
+// Arguments:
+//
+// ContextRecord (a0) - Supplies a pointer to a context record.
+//
+// TestAlert (a1) - Supplies a boolean value that specifies whether alert
+// should be tested for the previous processor mode.
+//
+// N.B. Register s8 is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record is misaligned or is not accessible, then the appropriate
+// status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtContinue, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ li t0,TRUE // set saved s-registers flag
+ sb t0,TrSavedFlag(s8) //
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Transfer information from the context frame to the exception and trap
+// frames.
+//
+
+ sb a1,ExceptionFrameLength + 4(sp) // save test alert argument
+ move a1,sp // set address of exception frame
+ move a2,s8 // set address of trap frame
+ jal KiContinue // transfer context to kernel frames
+
+//
+// If the kernel continuation routine returns success, then exit via the
+// exception exit code. Otherwise return to the system service dispatcher.
+//
+
+ bne zero,v0,20f // if ne, transfer failed
+
+//
+// Check to determine if alert should be tested for the previous processor
+// mode and restore the previous mode in the thread object.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t1,ExceptionFrameLength + 4(sp) // get test alert argument
+ lw t2,TrTrapFrame(s8) // get old trap frame address
+ lbu t3,TrPreviousMode(s8) // get old previous mode
+ lbu a0,ThPreviousMode(t0) // get current previous mode
+ sw t2,ThTrapFrame(t0) // restore old trap frame address
+ sb t3,ThPreviousMode(t0) // restore old previous mode
+ beq zero,t1,10f // if eq, don't test for alert
+ jal KeTestAlertThread // test alert for current thread
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+10: j KiExceptionExit // finish in exception exit
+
+//
+// Context record is misaligned or not accessible.
+//
+
+20: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate stack frame
+ j ra // return
+
+ .end NtContinue
+
+ SBTTL("Raise Exception System Service")
+//++
+//
+// NTSTATUS
+// NtRaiseException (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN FirstChance
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to raise an exception.
+// The exception can be raised as a first or second chance exception.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// ContextRecord (a1) - Supplies a pointer to a context record.
+//
+// FirstChance (a2) - Supplies a boolean value that determines whether
+// this is the first (TRUE) or second (FALSE) chance for dispatching
+// the exception.
+//
+// N.B. Register s8 is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record or exception record is misaligned or is not accessible,
+// then the appropriate status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtRaiseException, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ li t0,TRUE // set saved s-registers flag
+ sb t0,TrSavedFlag(s8) //
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Call the raise exception kernel routine which will marshall the arguments
+// and then call the exception dispatcher.
+//
+
+ sw a2,ExArgs + 16(sp) // set first chance argument
+ move a2,sp // set address of exception frame
+ move a3,s8 // set address of trap frame
+ jal KiRaiseException // call raise exception routine
+
+//
+// If the raise exception routine returns success, then exit via the exception
+// exit code. Otherwise return to the system service dispatcher.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,TrTrapFrame(s8) // get old trap frame address
+ bne zero,v0,10f // if ne, dispatch not successful
+ sw t1,ThTrapFrame(t0) // restore old trap frame address
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+ j KiExceptionExit // finish in exception exit
+
+//
+// The context or exception record is misaligned or not accessible, or the
+// exception was not handled.
+//
+
+10: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate stack frame
+ j ra // return
+
+ .end NtRaiseException
diff --git a/private/ntos/ke/mips/xxmpipi.c b/private/ntos/ke/mips/xxmpipi.c
new file mode 100644
index 000000000..3d32c41c2
--- /dev/null
+++ b/private/ntos/ke/mips/xxmpipi.c
@@ -0,0 +1,209 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+
+Module Name:
+
+ xxmpipi.c
+
+Abstract:
+
+ This module implements MIPS specific MP routine.
+
+Author:
+
+ David N. Cutler 24-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiRestoreProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the current
+ processor context structure in the processor block to the
+ specified trap and exception frames.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from the processor context structure
+ // to the specified trap and exception frames
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame,
+ CONTEXT_FULL,
+ KernelMode);
+
+ return;
+}
+
+VOID
+KiSaveProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the specified trap
+ and exception frames to the processor context structure in the current
+ processor block.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from specified trap and exception
+ // frames to the current processor context structure.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame);
+
+ //
+ // Save the current processor control state.
+ //
+
+ KiSaveProcessorControlState(&Prcb->ProcessorState);
+ return;
+}
+
+VOID
+KiSaveProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ )
+
+/*++
+
+Routine Description:
+
+ This routine saves the processor's control state for debugger.
+
+Arguments:
+
+ ProcessorState (a0) - Supplies a pointer to the processor state.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+
+ //
+ // Read Tb entries and store in the processor state structure.
+ //
+
+ for (Index = 0; Index < KeNumberTbEntries; Index += 1) {
+ KiReadEntryTb(Index, &ProcessorState->TbEntry[Index]);
+ }
+
+ return;
+}
+
+BOOLEAN
+KiIpiServiceRoutine (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+
+ This function is called at IPI_LEVEL to process any outstanding
+ interprocess request for the current processor.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame
+
+Return Value:
+
+ A value of TRUE is returned, if one of more requests were service.
+ Otherwise, FALSE is returned.
+
+--*/
+
+{
+
+ ULONG RequestSummary;
+
+ //
+ // Process any outstanding interprocessor requests.
+ //
+
+ RequestSummary = KiIpiProcessRequests();
+
+ //
+ // If freeze is requested, then freeze target execution.
+ //
+
+ if ((RequestSummary & IPI_FREEZE) != 0) {
+ KiFreezeTargetExecution(TrapFrame, ExceptionFrame);
+ }
+
+ //
+ // Return whether any requests were processed.
+ //
+
+ return (RequestSummary & ~IPI_FREEZE) != 0;
+}
diff --git a/private/ntos/ke/mips/xxregsv.s b/private/ntos/ke/mips/xxregsv.s
new file mode 100644
index 000000000..8b593bfd5
--- /dev/null
+++ b/private/ntos/ke/mips/xxregsv.s
@@ -0,0 +1,151 @@
+// TITLE("Register Save and Restore")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxregsv.s
+//
+// Abstract:
+//
+// This module implements the code necessary to save and restore processor
+// registers during exception and interrupt processing.
+//
+// Author:
+//
+// David N. Cutler (davec) 12-Aug-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Save Volatile Floating Registers")
+//++
+//
+// Routine Desription:
+//
+// This routine is called to save the volatile floating registers.
+//
+// N.B. This routine uses a special argument passing mechanism and destroys
+// no registers. It is assumed that floating register f0 is saved by the
+// caller.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSaveVolatileFloatState)
+
+ .set noreorder
+ .set noat
+
+#if defined(_EXTENDED_FLOAT)
+
+ sdc1 f1,TrDblF1(s8) // save odd floating registers
+ sdc1 f3,TrDblF3(s8) //
+ sdc1 f5,TrDblF5(s8) //
+ sdc1 f7,TrDblF7(s8) //
+ sdc1 f9,TrDblF9(s8) //
+ sdc1 f11,TrDblF11(s8) //
+ sdc1 f13,TrDblF13(s8) //
+ sdc1 f15,TrDblF15(s8) //
+ sdc1 f17,TrDblF17(s8) //
+ sdc1 f19,TrDblF19(s8) //
+ sdc1 f21,TrDblF21(s8) //
+ sdc1 f23,TrDblF23(s8) //
+ sdc1 f25,TrDblF25(s8) //
+ sdc1 f27,TrDblF27(s8) //
+ sdc1 f29,TrDblF29(s8) //
+ sdc1 f31,TrDblF31(s8) //
+
+#endif
+
+ sdc1 f2,TrFltF2(s8) // save even floating registers
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ j ra // return
+ sdc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiSaveVolatileFloatState)
+
+ SBTTL("Restore Volatile Floating Registers")
+//++
+//
+// Routine Desription:
+//
+// This routine is called to restore the volatile floating registers.
+//
+// N.B. This routine uses a special argument passing mechanism and destroys
+// no registers. It is assumed that floating register f0 is restored by
+// the caller.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreVolatileFloatState)
+
+ .set noreorder
+ .set noat
+
+#if defined(_EXTENDED_FLOAT)
+
+ ldc1 f1,TrDblF1(s8) // save odd floating registers
+ ldc1 f3,TrDblF3(s8) //
+ ldc1 f5,TrDblF5(s8) //
+ ldc1 f7,TrDblF7(s8) //
+ ldc1 f9,TrDblF9(s8) //
+ ldc1 f11,TrDblF11(s8) //
+ ldc1 f13,TrDblF13(s8) //
+ ldc1 f15,TrDblF15(s8) //
+ ldc1 f17,TrDblF17(s8) //
+ ldc1 f19,TrDblF19(s8) //
+ ldc1 f21,TrDblF21(s8) //
+ ldc1 f23,TrDblF23(s8) //
+ ldc1 f25,TrDblF25(s8) //
+ ldc1 f27,TrDblF27(s8) //
+ ldc1 f29,TrDblF29(s8) //
+ ldc1 f31,TrDblF31(s8) //
+
+#endif
+
+ ldc1 f2,TrFltF2(s8) // restore floating registers f2 - f19
+ ldc1 f4,TrFltF4(s8) //
+ ldc1 f6,TrFltF6(s8) //
+ ldc1 f8,TrFltF8(s8) //
+ ldc1 f10,TrFltF10(s8) //
+ ldc1 f12,TrFltF12(s8) //
+ ldc1 f14,TrFltF14(s8) //
+ ldc1 f16,TrFltF16(s8) //
+ j ra // return
+ ldc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiRestoreVolatileFloatState
diff --git a/private/ntos/ke/mips/xxspinlk.s b/private/ntos/ke/mips/xxspinlk.s
new file mode 100644
index 000000000..fee420f28
--- /dev/null
+++ b/private/ntos/ke/mips/xxspinlk.s
@@ -0,0 +1,540 @@
+// TITLE("Spin Locks")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// spinlock.s
+//
+// Abstract:
+//
+// This module implements the routines for acquiring and releasing
+// spin locks.
+//
+// Author:
+//
+// David N. Cutler (davec) 23-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Initialize Executive Spin Lock")
+//++
+//
+// VOID
+// KeInitializeSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function initialzies an executive spin lock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeInitializeSpinLock)
+
+ sw zero,0(a0) // clear spin lock value
+ j ra // return
+
+ .end KeInitializeSpinlock
+
+ SBTTL("Acquire Executive Spin Lock")
+//++
+//
+// VOID
+// KeAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and acquires
+// the specified executive spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// N.B. The Old IRQL MUST be stored after the lock is acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeAcquireSpinLock)
+
+//
+// Disable interrupts and attempt to acquire the specified spinlock.
+//
+
+ li a2,DISPATCH_LEVEL // set new IRQL level
+
+10: DISABLE_INTERRUPTS(t2) // disable interrupts
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+20: ll t1,0(a0) // get current lock value
+ move t3,t0 // set ownership value
+ bne zero,t1,30f // if ne, spin lock owned
+ sc t3,0(a0) // set spin lock owned
+ beq zero,t3,20b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to DISPATCH_LEVEL and acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ sb v0,0(a1) // store old IRQL
+ j ra // return
+
+#if !defined(NT_UP)
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ b 10b // try again
+#endif
+
+ .end KeAcquireSpinLock
+
+ SBTTL("Acquire SpinLock and Raise to Dpc")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToDpc (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to dispatcher level and acquires
+// the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned at the fucntion value.
+//
+//--
+
+ LEAF_ENTRY(KiAcquireSpinLockRaiseIrql)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToDpc)
+
+ li a1,DISPATCH_LEVEL // set new IRQL level
+ b 10f // finish in common code
+
+
+ SBTTL("Acquire SpinLock and Raise to Synch")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToSynch (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to synchronization level and
+// acquires the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned at the fucntion value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToSynch)
+
+//
+// Disable interrupts and attempt to acquire the specified spinlock.
+//
+
+ lbu a1,KiSynchIrql // set new IRQL level
+
+10: DISABLE_INTERRUPTS(t2) // disable interrupts
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+20: ll t1,0(a0) // get current lock value
+ move t3,t0 // set ownership value
+ bne zero,t1,30f // if ne, spin lock owned
+ sc t3,0(a0) // set spin lock owned
+ beq zero,t3,20b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to synchronization level and acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a1) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a1,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+#if !defined(NT_UP)
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ b 10b // try again
+
+#endif
+
+ .end KiAcquireSpinLockRaiseIrql
+
+ SBTTL("Release Executive Spin Lock")
+//++
+//
+// VOID
+// KeReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function releases an executive spin lock and lowers the IRQL
+// to its previous value.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// OldIrql (a1) - Supplies the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KeReleaseSpinLock)
+
+//
+// Release the specified spinlock.
+//
+
+#if !defined(NT_UP)
+
+ sw zero,0(a0) // set spin lock not owned
+
+#endif
+
+//
+// Lower the IRQL to the specified level.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+ and a1,a1,0xff // isolate old IRQL
+ lbu t0,KiPcr + PcIrqlTable(a1) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a1,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeReleaseSpinLock
+
+ SBTTL("Try To Acquire Executive Spin Lock")
+//++
+//
+// BOOLEAN
+// KeTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and attempts
+// to acquires the specified executive spinlock. If the spinlock can be
+// acquired, then TRUE is returned. Otherwise, the IRQL is restored to
+// its previous value and FALSE is returned.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// N.B. The Old IRQL MUST be stored after the lock is acquired.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KeTryToAcquireSpinLock)
+
+//
+// Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ li a2,DISPATCH_LEVEL // set new IRQL level
+ lbu t0,KiPcr + PcIrqlTable(a2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu t2,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+//
+// Try to acquire the specified spinlock.
+//
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move v0,t0 // set ownership value
+ bne zero,t1,20f // if ne, spin lock owned
+ sc v0,0(a0) // set spin lock owned
+ beq zero,v0,10b // if eq, store conditional failure
+
+#else
+
+ li v0,TRUE // set return value
+
+#endif
+
+//
+// The attempt to acquire the specified spin lock succeeded.
+//
+
+ sb t2,0(a1) // store old IRQL
+ j ra // return
+
+//
+// The attempt to acquire the specified spin lock failed. Lower IRQL to its
+// previous value and return FALSE.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+#if !defined(NT_UP)
+
+20: lbu t0,KiPcr + PcIrqlTable(t2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb t2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ li v0,FALSE // set return value
+ j ra // return
+
+#endif
+
+ .end KeTryToAcquireSpinLock
+
+ SBTTL("Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function acquires a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiAcquireSpinLock)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move t2,t0 // set ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(a0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failure
+
+#endif
+
+ j ra // return
+
+ .end KiAcquireSpinLock
+
+ SBTTL("Release Kernel Spin Lock")
+//++
+//
+// VOID
+// KiReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function releases a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiReleaseSpinLock)
+
+ ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
+
+#if !defined(NT_UP)
+
+
+ sw zero,0(a0) // set spin lock not owned
+
+#endif
+
+ j ra // return
+
+ .end KiReleaseSpinLock
+
+ SBTTL("Try To Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquires the specified kernel spinlock. If
+// the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
+// is returned.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KiTryToAcquireSpinLock)
+
+#if !defined(NT_UP)
+
+ li v0,FALSE // assume attempt to acquire will fail
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move t2,t0 // set ownership value
+ bne zero,t1,20f // if ne, spin lock owned
+ sc t2,0(a0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failure
+
+#endif
+
+ li v0,TRUE // set return value
+20: j ra // return
+
+ .end KiTryToAcquireSpinLock