summaryrefslogtreecommitdiffstats
path: root/private/ntos/ke
diff options
context:
space:
mode:
authorAdam <you@example.com>2020-05-17 05:51:50 +0200
committerAdam <you@example.com>2020-05-17 05:51:50 +0200
commite611b132f9b8abe35b362e5870b74bce94a1e58e (patch)
treea5781d2ec0e085eeca33cf350cf878f2efea6fe5 /private/ntos/ke
downloadNT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.gz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.bz2
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.lz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.xz
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.tar.zst
NT4.0-e611b132f9b8abe35b362e5870b74bce94a1e58e.zip
Diffstat (limited to 'private/ntos/ke')
-rw-r--r--private/ntos/ke/alpha/alignem.c393
-rw-r--r--private/ntos/ke/alpha/allproc.c376
-rw-r--r--private/ntos/ke/alpha/apcint.s112
-rw-r--r--private/ntos/ke/alpha/apcuser.c148
-rw-r--r--private/ntos/ke/alpha/buserror.c113
-rw-r--r--private/ntos/ke/alpha/byteem.c334
-rw-r--r--private/ntos/ke/alpha/byteme.s128
-rw-r--r--private/ntos/ke/alpha/callback.c242
-rw-r--r--private/ntos/ke/alpha/callout.s404
-rw-r--r--private/ntos/ke/alpha/clock.s637
-rw-r--r--private/ntos/ke/alpha/ctxsw.s1457
-rw-r--r--private/ntos/ke/alpha/dmpstate.c574
-rw-r--r--private/ntos/ke/alpha/exceptn.c1088
-rw-r--r--private/ntos/ke/alpha/floatem.c4183
-rw-r--r--private/ntos/ke/alpha/flush.c548
-rw-r--r--private/ntos/ke/alpha/flushtb.c566
-rw-r--r--private/ntos/ke/alpha/genalpha.c1850
-rw-r--r--private/ntos/ke/alpha/getsetrg.c1081
-rw-r--r--private/ntos/ke/alpha/initkr.c499
-rw-r--r--private/ntos/ke/alpha/intobj.c435
-rw-r--r--private/ntos/ke/alpha/intsup.s899
-rw-r--r--private/ntos/ke/alpha/ipi.c179
-rw-r--r--private/ntos/ke/alpha/irql.s101
-rw-r--r--private/ntos/ke/alpha/miscs.s363
-rw-r--r--private/ntos/ke/alpha/mpipi.s546
-rw-r--r--private/ntos/ke/alpha/pcr.s214
-rw-r--r--private/ntos/ke/alpha/regsav.s407
-rw-r--r--private/ntos/ke/alpha/services.stb66
-rw-r--r--private/ntos/ke/alpha/sources41
-rw-r--r--private/ntos/ke/alpha/spinlock.s571
-rw-r--r--private/ntos/ke/alpha/start.s785
-rw-r--r--private/ntos/ke/alpha/table.stb84
-rw-r--r--private/ntos/ke/alpha/tb.s281
-rw-r--r--private/ntos/ke/alpha/threadbg.s152
-rw-r--r--private/ntos/ke/alpha/thredini.c327
-rw-r--r--private/ntos/ke/alpha/timindex.s111
-rw-r--r--private/ntos/ke/alpha/trap.s1423
-rw-r--r--private/ntos/ke/alpha/trigger.c580
-rw-r--r--private/ntos/ke/alpha/vdm.c54
-rw-r--r--private/ntos/ke/alpha/xxalign.s405
-rw-r--r--private/ntos/ke/apcobj.c364
-rw-r--r--private/ntos/ke/apcsup.c342
-rw-r--r--private/ntos/ke/balmgr.c824
-rw-r--r--private/ntos/ke/bugcheck.c728
-rw-r--r--private/ntos/ke/channel.c1868
-rw-r--r--private/ntos/ke/config.c207
-rw-r--r--private/ntos/ke/debug.c531
-rw-r--r--private/ntos/ke/devquobj.c445
-rw-r--r--private/ntos/ke/dirs24
-rw-r--r--private/ntos/ke/dpcobj.c436
-rw-r--r--private/ntos/ke/dpcsup.c427
-rw-r--r--private/ntos/ke/eventobj.c532
-rw-r--r--private/ntos/ke/genxx.inc807
-rw-r--r--private/ntos/ke/i386/abios.h147
-rw-r--r--private/ntos/ke/i386/abiosa.asm615
-rw-r--r--private/ntos/ke/i386/abiosc.c767
-rw-r--r--private/ntos/ke/i386/allproc.c397
-rw-r--r--private/ntos/ke/i386/alr.inc87
-rw-r--r--private/ntos/ke/i386/apcuser.c169
-rw-r--r--private/ntos/ke/i386/biosa.asm273
-rw-r--r--private/ntos/ke/i386/biosc.c269
-rw-r--r--private/ntos/ke/i386/callback.c252
-rw-r--r--private/ntos/ke/i386/callout.asm432
-rw-r--r--private/ntos/ke/i386/clockint.asm881
-rw-r--r--private/ntos/ke/i386/cpu.asm1037
-rw-r--r--private/ntos/ke/i386/cpu.inc61
-rw-r--r--private/ntos/ke/i386/ctxswap.asm1923
-rw-r--r--private/ntos/ke/i386/cyrix.c350
-rw-r--r--private/ntos/ke/i386/dmpstate.c363
-rw-r--r--private/ntos/ke/i386/emv86.asm1973
-rw-r--r--private/ntos/ke/i386/emxcptn.asm657
-rw-r--r--private/ntos/ke/i386/exceptn.c1270
-rw-r--r--private/ntos/ke/i386/flush.c170
-rw-r--r--private/ntos/ke/i386/flushtb.c565
-rw-r--r--private/ntos/ke/i386/gdtsup.c174
-rw-r--r--private/ntos/ke/i386/geni386.c812
-rw-r--r--private/ntos/ke/i386/i386init.c223
-rw-r--r--private/ntos/ke/i386/i386pcr.asm200
-rw-r--r--private/ntos/ke/i386/instemul.asm2873
-rw-r--r--private/ntos/ke/i386/int.asm132
-rw-r--r--private/ntos/ke/i386/intobj.c767
-rw-r--r--private/ntos/ke/i386/intsup.asm774
-rw-r--r--private/ntos/ke/i386/iopm.c529
-rw-r--r--private/ntos/ke/i386/kernlini.c1581
-rw-r--r--private/ntos/ke/i386/ki386.h34
-rw-r--r--private/ntos/ke/i386/kimacro.inc1288
-rw-r--r--private/ntos/ke/i386/largepag.c179
-rw-r--r--private/ntos/ke/i386/ldtsup.c392
-rw-r--r--private/ntos/ke/i386/ldtsup2.asm164
-rw-r--r--private/ntos/ke/i386/mi.inc43
-rw-r--r--private/ntos/ke/i386/misc.c164
-rw-r--r--private/ntos/ke/i386/mpipia.asm435
-rw-r--r--private/ntos/ke/i386/mtrr.c1887
-rw-r--r--private/ntos/ke/i386/mtrr.h124
-rw-r--r--private/ntos/ke/i386/newsysbg.asm1150
-rw-r--r--private/ntos/ke/i386/p2w.asm69
-rw-r--r--private/ntos/ke/i386/procstat.asm323
-rw-r--r--private/ntos/ke/i386/services.nap123
-rw-r--r--private/ntos/ke/i386/services.stb131
-rw-r--r--private/ntos/ke/i386/sources47
-rw-r--r--private/ntos/ke/i386/spindbg.asm162
-rw-r--r--private/ntos/ke/i386/spininst.asm943
-rw-r--r--private/ntos/ke/i386/spinlock.asm466
-rw-r--r--private/ntos/ke/i386/table.stb102
-rw-r--r--private/ntos/ke/i386/threadbg.asm99
-rw-r--r--private/ntos/ke/i386/thredini.c634
-rw-r--r--private/ntos/ke/i386/timindex.asm171
-rw-r--r--private/ntos/ke/i386/trap.asm5486
-rw-r--r--private/ntos/ke/i386/trapc.c545
-rw-r--r--private/ntos/ke/i386/vdm.c1641
-rw-r--r--private/ntos/ke/i386/vdmint21.c228
-rw-r--r--private/ntos/ke/i386/vdmp.h73
-rw-r--r--private/ntos/ke/kernldat.c623
-rw-r--r--private/ntos/ke/ki.h1128
-rw-r--r--private/ntos/ke/kiinit.c301
-rw-r--r--private/ntos/ke/mips/alignem.c375
-rw-r--r--private/ntos/ke/mips/alignx.s312
-rw-r--r--private/ntos/ke/mips/allproc.c392
-rw-r--r--private/ntos/ke/mips/apcuser.c140
-rw-r--r--private/ntos/ke/mips/branchem.c311
-rw-r--r--private/ntos/ke/mips/buserror.c309
-rw-r--r--private/ntos/ke/mips/callback.c237
-rw-r--r--private/ntos/ke/mips/callout.s411
-rw-r--r--private/ntos/ke/mips/dmpstate.c713
-rw-r--r--private/ntos/ke/mips/exceptn.c896
-rw-r--r--private/ntos/ke/mips/floatem.c4599
-rw-r--r--private/ntos/ke/mips/flush.c820
-rw-r--r--private/ntos/ke/mips/genmips.c1015
-rw-r--r--private/ntos/ke/mips/getsetrg.c1179
-rw-r--r--private/ntos/ke/mips/initkr.c463
-rw-r--r--private/ntos/ke/mips/intobj.c434
-rw-r--r--private/ntos/ke/mips/services.stb64
-rw-r--r--private/ntos/ke/mips/sources41
-rw-r--r--private/ntos/ke/mips/table.stb61
-rw-r--r--private/ntos/ke/mips/threadbg.s128
-rw-r--r--private/ntos/ke/mips/thredini.c285
-rw-r--r--private/ntos/ke/mips/timindex.s111
-rw-r--r--private/ntos/ke/mips/vdm.c52
-rw-r--r--private/ntos/ke/mips/x4ctxsw.s1497
-rw-r--r--private/ntos/ke/mips/x4mpipi.s451
-rw-r--r--private/ntos/ke/mips/x4sqrt.s113
-rw-r--r--private/ntos/ke/mips/x4start.s968
-rw-r--r--private/ntos/ke/mips/x4trap.s4622
-rw-r--r--private/ntos/ke/mips/xxapcint.s123
-rw-r--r--private/ntos/ke/mips/xxclock.s592
-rw-r--r--private/ntos/ke/mips/xxflshtb.c593
-rw-r--r--private/ntos/ke/mips/xxintsup.s713
-rw-r--r--private/ntos/ke/mips/xxirql.s218
-rw-r--r--private/ntos/ke/mips/xxmiscs.s289
-rw-r--r--private/ntos/ke/mips/xxmpipi.c209
-rw-r--r--private/ntos/ke/mips/xxregsv.s151
-rw-r--r--private/ntos/ke/mips/xxspinlk.s540
-rw-r--r--private/ntos/ke/miscc.c679
-rw-r--r--private/ntos/ke/mp/makefile6
-rw-r--r--private/ntos/ke/mp/makefile.inc4
-rw-r--r--private/ntos/ke/mp/sources29
-rw-r--r--private/ntos/ke/mutntobj.c344
-rw-r--r--private/ntos/ke/ppc/alignem.c888
-rw-r--r--private/ntos/ke/ppc/allproc.c423
-rw-r--r--private/ntos/ke/ppc/apcuser.c194
-rw-r--r--private/ntos/ke/ppc/callback.c241
-rw-r--r--private/ntos/ke/ppc/callout.s352
-rw-r--r--private/ntos/ke/ppc/clock.s639
-rw-r--r--private/ntos/ke/ppc/ctxswap.s2942
-rw-r--r--private/ntos/ke/ppc/dmpstate.c797
-rw-r--r--private/ntos/ke/ppc/exceptn.c961
-rw-r--r--private/ntos/ke/ppc/flush.c940
-rw-r--r--private/ntos/ke/ppc/flushtb.c274
-rw-r--r--private/ntos/ke/ppc/genppc.c990
-rw-r--r--private/ntos/ke/ppc/getsetrg.c516
-rw-r--r--private/ntos/ke/ppc/initkr.c742
-rw-r--r--private/ntos/ke/ppc/intobj.c469
-rw-r--r--private/ntos/ke/ppc/intsup.s862
-rw-r--r--private/ntos/ke/ppc/ipi.c205
-rw-r--r--private/ntos/ke/ppc/irql.s98
-rw-r--r--private/ntos/ke/ppc/miscasm.s656
-rw-r--r--private/ntos/ke/ppc/mpipi.s657
-rw-r--r--private/ntos/ke/ppc/pcr.s80
-rw-r--r--private/ntos/ke/ppc/procstat.s309
-rw-r--r--private/ntos/ke/ppc/real0.s6289
-rw-r--r--private/ntos/ke/ppc/services.stb81
-rw-r--r--private/ntos/ke/ppc/sources32
-rw-r--r--private/ntos/ke/ppc/spinlock.s598
-rw-r--r--private/ntos/ke/ppc/table.stb72
-rw-r--r--private/ntos/ke/ppc/threadbg.s219
-rw-r--r--private/ntos/ke/ppc/thredini.c342
-rw-r--r--private/ntos/ke/ppc/timindex.s175
-rw-r--r--private/ntos/ke/ppc/vdm.c54
-rw-r--r--private/ntos/ke/procobj.c858
-rw-r--r--private/ntos/ke/profobj.c807
-rw-r--r--private/ntos/ke/queueobj.c814
-rw-r--r--private/ntos/ke/raisexcp.c264
-rw-r--r--private/ntos/ke/semphobj.c222
-rw-r--r--private/ntos/ke/services.tab212
-rw-r--r--private/ntos/ke/sources.inc76
-rw-r--r--private/ntos/ke/suspend.c312
-rw-r--r--private/ntos/ke/tests/mipsflt/flpt.c8333
-rw-r--r--private/ntos/ke/tests/mipsflt/flpt.cmd1
-rw-r--r--private/ntos/ke/tests/mipsflt/flpt.h631
-rw-r--r--private/ntos/ke/tests/mipsflt/flpt.rsp14
-rw-r--r--private/ntos/ke/tests/mipsflt/flpt2.c2167
-rw-r--r--private/ntos/ke/tests/mipsflt/mips/flptx.s1527
-rw-r--r--private/ntos/ke/tests/x86div/i386/test.c173
-rw-r--r--private/ntos/ke/tests/x86div/i386/testa.asm284
-rw-r--r--private/ntos/ke/tests/x86div/makefile6
-rw-r--r--private/ntos/ke/tests/x86div/sources23
-rw-r--r--private/ntos/ke/tests/xcphnd/makefile6
-rw-r--r--private/ntos/ke/tests/xcphnd/sources36
-rw-r--r--private/ntos/ke/tests/xcphnd/xcpt4.c2400
-rw-r--r--private/ntos/ke/thredobj.c2216
-rw-r--r--private/ntos/ke/thredsup.c884
-rw-r--r--private/ntos/ke/timerobj.c367
-rw-r--r--private/ntos/ke/timersup.c306
-rw-r--r--private/ntos/ke/up/makefile6
-rw-r--r--private/ntos/ke/up/makefile.inc8
-rw-r--r--private/ntos/ke/up/sources27
-rw-r--r--private/ntos/ke/wait.c1776
-rw-r--r--private/ntos/ke/waitsup.c378
-rw-r--r--private/ntos/ke/xipi.c244
-rw-r--r--private/ntos/ke/yield.c118
220 files changed, 143252 insertions, 0 deletions
diff --git a/private/ntos/ke/alpha/alignem.c b/private/ntos/ke/alpha/alignem.c
new file mode 100644
index 000000000..921e80235
--- /dev/null
+++ b/private/ntos/ke/alpha/alignem.c
@@ -0,0 +1,393 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ alignem.c
+
+Abstract:
+
+ This module implements the code necessary to emulate unaligned data
+ references.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+ Joe Notarangelo 14-May-1992
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Function prototypes for emulation routines
+//
+ULONGLONG
+KiEmulateLoadLong(
+ IN PULONG UnalignedAddress
+ );
+
+ULONGLONG
+KiEmulateLoadQuad(
+ IN PUQUAD UnalignedAddress
+ );
+
+ULONGLONG
+KiEmulateLoadFloatIEEESingle(
+ IN PULONG UnalignedAddress
+ );
+
+ULONGLONG
+KiEmulateLoadFloatIEEEDouble(
+ IN PUQUAD UnalignedAddress
+ );
+
+VOID
+KiEmulateStoreLong(
+ IN PULONG UnalignedAddress,
+ IN ULONGLONG Data
+ );
+
+VOID
+KiEmulateStoreQuad(
+ IN PUQUAD UnalignedAddress,
+ IN ULONGLONG Data
+ );
+
+VOID
+KiEmulateStoreFloatIEEESingle(
+ IN PULONG UnalignedAddress,
+ IN ULONGLONG Data
+ );
+
+VOID
+KiEmulateStoreFloatIEEEDouble(
+ IN PUQUAD UnalignedAddress,
+ IN ULONGLONG Data
+ );
+
+
+
+
+BOOLEAN
+KiEmulateReference (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN BOOLEAN QuadwordOnly
+ )
+
+/*++
+
+Routine Description:
+
+ Routine emulates an unaligned data reference from user part
+ of the address space.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to the exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame
+
+ QuadwordOnly - Supplies a boolean which controls whether both longword
+ and quadword references are to be emulated or quadword references
+ only.
+
+Return Value:
+
+ True is returned if reference is successfully emulated,
+ otherwise False is returned.
+
+--*/
+
+{
+
+ ULONGLONG Data;
+ PVOID EffectiveAddress;
+ PVOID ExceptionAddress;
+ ULONG Fa;
+ ULONG Opcode;
+ KPROCESSOR_MODE PreviousMode;
+ ULONG Ra;
+ KIRQL OldIrql;
+
+ //
+ // Call out to profile interrupt if alignment profiling is active
+ //
+ if (KiProfileAlignmentFixup) {
+
+ if (++KiProfileAlignmentFixupCount >= KiProfileAlignmentFixupInterval) {
+
+ KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
+ KiProfileAlignmentFixupCount = 0;
+ KeProfileInterruptWithSource(TrapFrame, ProfileAlignmentFixup);
+ KeLowerIrql(OldIrql);
+
+ }
+ }
+
+ //
+ // Save original exception address in case another exception occurs
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // The ExceptionInformation in the ExceptionRecord has already
+ // recorded information we need to emulate the access.
+ //
+ // ExceptionInformation:
+ // [0] = opcode
+ // [1] = destination register
+ // [2] = effective address of access
+
+ Opcode = ExceptionRecord->ExceptionInformation[0];
+ Ra = ExceptionRecord->ExceptionInformation[1];
+ Fa = Ra + 32; // convert to floating register name for floating opcodes
+ EffectiveAddress = (PVOID)ExceptionRecord->ExceptionInformation[2];
+
+ //
+ // Capture previous mode from trap frame not current thread.
+ //
+
+ PreviousMode = (KPROCESSOR_MODE)(((PSR *)(&TrapFrame->Psr))->MODE);
+
+ //
+ // Any exception that occurs during the attempted emulation will cause
+ // the emulation to be aborted. The new exception code and information
+ // will be copied to the original exception record and FALSE will be
+ // returned. If the unaligned access was not from kernel mode then
+ // probe the effective address before performing the emulation.
+ //
+
+ try {
+
+ switch (Opcode) {
+
+ //
+ // load longword
+ //
+
+ case LDL_OP:
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if( PreviousMode != KernelMode ){
+ ProbeForRead( EffectiveAddress,
+ sizeof(LONG),
+ sizeof(UCHAR) );
+ }
+ Data = KiEmulateLoadLong( EffectiveAddress );
+ KiSetRegisterValue( Ra,
+ Data,
+ ExceptionFrame,
+ TrapFrame );
+
+ break;
+
+ //
+ // load quadword
+ //
+
+ case LDQ_OP:
+ if( PreviousMode != KernelMode ){
+ ProbeForRead( EffectiveAddress,
+ sizeof(LONGLONG),
+ sizeof(UCHAR) );
+ }
+ Data = KiEmulateLoadQuad( EffectiveAddress );
+ KiSetRegisterValue( Ra,
+ Data,
+ ExceptionFrame,
+ TrapFrame );
+
+ break;
+
+ //
+ // load IEEE single float
+ //
+
+ case LDS_OP:
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if( PreviousMode != KernelMode ){
+ ProbeForRead( EffectiveAddress,
+ sizeof(float),
+ sizeof(UCHAR) );
+ }
+ Data = KiEmulateLoadFloatIEEESingle( EffectiveAddress );
+ KiSetRegisterValue( Fa,
+ Data,
+ ExceptionFrame,
+ TrapFrame );
+
+ break;
+
+ //
+ // load IEEE double float
+ //
+
+ case LDT_OP:
+ if( PreviousMode != KernelMode ){
+ ProbeForRead( EffectiveAddress,
+ sizeof(DOUBLE),
+ sizeof(UCHAR) );
+ }
+ Data = KiEmulateLoadFloatIEEEDouble( EffectiveAddress );
+ KiSetRegisterValue( Fa,
+ Data,
+ ExceptionFrame,
+ TrapFrame );
+
+ break;
+
+ //
+ // Load word unsigned.
+ //
+
+ case LDWU_OP :
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(EffectiveAddress,
+ sizeof(SHORT),
+ sizeof(UCHAR));
+ }
+ Data = (ULONGLONG)*(UNALIGNED USHORT *)EffectiveAddress;
+ KiSetRegisterValue(Ra,
+ Data,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // store longword
+ //
+
+ case STL_OP:
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if( PreviousMode != KernelMode ){
+ ProbeForWrite( EffectiveAddress,
+ sizeof(LONG),
+ sizeof(UCHAR) );
+ }
+ Data = KiGetRegisterValue( Ra,
+ ExceptionFrame,
+ TrapFrame );
+ KiEmulateStoreLong( EffectiveAddress, (ULONG)Data );
+
+ break;
+
+ //
+ // store quadword
+ //
+
+ case STQ_OP:
+ if( PreviousMode != KernelMode ){
+ ProbeForWrite( EffectiveAddress,
+ sizeof(LONGLONG),
+ sizeof(UCHAR) );
+ }
+ Data = KiGetRegisterValue( Ra,
+ ExceptionFrame,
+ TrapFrame );
+ KiEmulateStoreQuad( EffectiveAddress, Data );
+
+ break;
+
+ //
+ // store IEEE float single
+ //
+
+ case STS_OP:
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if( PreviousMode != KernelMode ){
+ ProbeForWrite( EffectiveAddress,
+ sizeof(float),
+ sizeof(UCHAR) );
+ }
+ Data = KiGetRegisterValue( Fa,
+ ExceptionFrame,
+ TrapFrame );
+ KiEmulateStoreFloatIEEESingle( EffectiveAddress, Data );
+
+ break;
+
+ //
+ // store IEEE float double
+ //
+
+ case STT_OP:
+ if( PreviousMode != KernelMode ){
+ ProbeForWrite( EffectiveAddress,
+ sizeof(DOUBLE),
+ sizeof(UCHAR) );
+ }
+ Data = KiGetRegisterValue( Fa,
+ ExceptionFrame,
+ TrapFrame );
+ KiEmulateStoreFloatIEEEDouble( EffectiveAddress, Data );
+
+ break;
+
+ //
+ // Store word.
+ //
+
+ case STW_OP :
+ if (QuadwordOnly != FALSE) {
+ return FALSE;
+ }
+ if (PreviousMode != KernelMode) {
+ ProbeForWrite(EffectiveAddress,
+ sizeof(SHORT),
+ sizeof(UCHAR));
+ }
+ Data = KiGetRegisterValue(Ra,
+ ExceptionFrame,
+ TrapFrame);
+ *(UNALIGNED USHORT *)EffectiveAddress = (USHORT)Data;
+
+ break;
+
+ //
+ // all other instructions are not emulated
+ //
+
+ default:
+ return FALSE;
+ }
+
+ TrapFrame->Fir += 4;
+ return TRUE;
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+
+ return FALSE;
+ }
+}
diff --git a/private/ntos/ke/alpha/allproc.c b/private/ntos/ke/alpha/allproc.c
new file mode 100644
index 000000000..e18826eee
--- /dev/null
+++ b/private/ntos/ke/alpha/allproc.c
@@ -0,0 +1,376 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ allproc.c
+
+Abstract:
+
+ This module allocates and intializes kernel resources required
+ to start a new processor, and passes a complete processor state
+ structure to the HAL to obtain a new processor.
+
+Author:
+
+ David N. Cutler 29-Apr-1993
+ Joe Notarangelo 30-Nov-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+
+#pragma alloc_text(INIT, KeStartAllProcessors)
+
+#endif
+
+//
+// Define macro to round up to 64-byte boundary and define block sizes.
+//
+
+#define ROUND_UP(x) ((sizeof(x) + 64) & (~64))
+#define BLOCK1_SIZE ((3 * KERNEL_STACK_SIZE) + PAGE_SIZE)
+#define BLOCK2_SIZE (ROUND_UP(KPRCB) + ROUND_UP(ETHREAD) + 64)
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ );
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiStartProcessor (
+ IN PLOADER_PARAMETER_BLOCK Loaderblock
+ );
+
+
+VOID
+KeStartAllProcessors(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called during phase 1 initialize on the master boot
+ processor to start all of the other registered processors.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG MemoryBlock1;
+ ULONG MemoryBlock2;
+ ULONG Number;
+ ULONG PcrAddress;
+ ULONG PcrPage;
+ PKPRCB Prcb;
+ KPROCESSOR_STATE ProcessorState;
+ PRESTART_BLOCK RestartBlock;
+ BOOLEAN Started;
+
+#if !defined(NT_UP)
+
+ //
+ // If the registered number of processors is greater than the maximum
+ // number of processors supported, then only allow the maximum number
+ // of supported processors.
+ //
+
+ if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
+ KeRegisteredProcessors = MAXIMUM_PROCESSORS;
+ }
+
+ //
+ // Initialize the processor state that will be used to start each of
+ // processors. Each processor starts in the system initialization code
+ // with address of the loader parameter block as an argument.
+ //
+
+ RtlZeroMemory(&ProcessorState, sizeof(KPROCESSOR_STATE));
+ ProcessorState.ContextFrame.IntA0 = (ULONGLONG)(LONG)KeLoaderBlock;
+ ProcessorState.ContextFrame.Fir = (ULONGLONG)(LONG)KiStartProcessor;
+
+ Number = 1;
+
+ while (Number < KeRegisteredProcessors) {
+
+ //
+ // Allocate a DPC stack, an idle thread kernel stack, a panic
+ // stack, a PCR page, a processor block, and an executive thread
+ // object. If the allocation fails or the allocation cannot be
+ // made from unmapped nonpaged pool, then stop starting processors.
+ //
+
+ MemoryBlock1 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK1_SIZE);
+ if (((PVOID)MemoryBlock1 == NULL) ||
+ ((MemoryBlock1 & 0xc0000000) != KSEG0_BASE)) {
+ if ((PVOID)MemoryBlock1 != NULL) {
+ ExFreePool((PVOID)MemoryBlock1);
+ }
+
+ break;
+ }
+
+ MemoryBlock2 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK2_SIZE);
+ if (((PVOID)MemoryBlock2 == NULL) ||
+ ((MemoryBlock2 & 0xc0000000) != KSEG0_BASE)) {
+ ExFreePool((PVOID)MemoryBlock1);
+ if ((PVOID)MemoryBlock2 != NULL) {
+ ExFreePool((PVOID)MemoryBlock2);
+ }
+
+ break;
+ }
+
+ //
+ // Zero both blocks of allocated memory.
+ //
+
+ RtlZeroMemory((PVOID)MemoryBlock1, BLOCK1_SIZE);
+ RtlZeroMemory((PVOID)MemoryBlock2, BLOCK2_SIZE);
+
+ //
+ // Set address of interrupt stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Alpha.PanicStack = MemoryBlock1 +
+ (1 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of idle thread kernel stack in loader parameter block.
+ //
+
+ KeLoaderBlock->KernelStack = MemoryBlock1 + (2 * KERNEL_STACK_SIZE);
+
+ ProcessorState.ContextFrame.IntSp =
+ (ULONGLONG)(LONG)KeLoaderBlock->KernelStack;
+
+ //
+ // Set address of panic stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Alpha.DpcStack = MemoryBlock1 +
+ (3 * KERNEL_STACK_SIZE);
+
+ //
+ // Set the page frame of the PCR page in the loader parameter block.
+ //
+
+ PcrAddress = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+ PcrPage = (PcrAddress ^ KSEG0_BASE) >> PAGE_SHIFT;
+ KeLoaderBlock->u.Alpha.PcrPage = PcrPage;
+
+ //
+ // Set the address of the processor block and executive thread in the
+ // loader parameter block.
+ //
+
+ KeLoaderBlock->Prcb = (MemoryBlock2 + 63) & ~63;
+ KeLoaderBlock->Thread = KeLoaderBlock->Prcb + ROUND_UP(KPRCB);
+
+ //
+ // Attempt to start the next processor. If attempt is successful,
+ // then wait for the processor to get initialized. Otherwise,
+ // deallocate the processor resources and terminate the loop.
+ //
+
+ Started = HalStartNextProcessor(KeLoaderBlock, &ProcessorState);
+
+ if (Started == FALSE) {
+
+ ExFreePool((PVOID)MemoryBlock1);
+ ExFreePool((PVOID)MemoryBlock2);
+ break;
+
+ } else {
+
+ //
+ // Wait until boot is finished on the target processor before
+ // starting the next processor. Booting is considered to be
+ // finished when a processor completes its initialization and
+ // drops into the idle loop.
+ //
+
+ Prcb = (PKPRCB)(KeLoaderBlock->Prcb);
+ RestartBlock = Prcb->RestartBlock;
+ while (RestartBlock->BootStatus.BootFinished == 0) {
+ KiMb();
+ }
+ }
+
+ Number += 1;
+ }
+
+#endif
+
+ //
+ // Reset and synchronize the performance counters of all processors.
+ //
+
+ KiCalibratePerformanceCounter();
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function resets and synchronizes the performance counter on all
+ processors in the configuration.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG Count = 1;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQl to DISPATCH_LEVEL to avoid a possible context switch.
+ //
+
+#if !defined(NT_UP)
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+
+ //
+ // Initialize the reset performance counter packet, compute the target
+ // set of processors, and send the packet to the target processors, if
+ // any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ Count = (LONG)KeNumberProcessors;
+ KiIpiSendPacket(TargetProcessors,
+ KiCalibratePerformanceCounterTarget,
+ &Count,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Reset the performance counter on current processor.
+ //
+
+ HalCalibratePerformanceCounter((volatile PLONG)&Count);
+
+ //
+ // Wait until all target processors have reset and synchronized their
+ // performance counters.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for reseting the performance counter.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Count - Supplies a pointer to the number of processors in the host
+ configuration.
+
+ Parameter2 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Reset and synchronize the perfromance counter on the current processor
+ // and clear the reset performance counter address to signal the source to
+ // continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalCalibratePerformanceCounter((volatile PLONG)Count);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/alpha/apcint.s b/private/ntos/ke/alpha/apcint.s
new file mode 100644
index 000000000..3a4d5fc8a
--- /dev/null
+++ b/private/ntos/ke/alpha/apcint.s
@@ -0,0 +1,112 @@
+// TITLE("Asynchronous Procedure Call (APC) Interrupt")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// apcint.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// Asynchronous Procedure Call (APC) interrupt.
+//
+// Author:
+//
+// David N. Cutler (davec) 3-Apr-1990
+// Joe Notarangelo 15-Jul-1992 alpha version
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL("Asynchronous Procedure Call Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at APC_LEVEL. Its function is to allocate an exception frame and call
+// the kernel APC delivery routine to deliver kernel mode APCs and to check
+// if a user mode APC should be delivered. If a user mode APC should be
+// delivered, then the kernel APC delivery routine constructs a context
+// frame on the user stack and alters the exception and trap frames so that
+// control will be transfered to the user APC dispatcher on return from the
+// interrupt.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved. The remainder of the machine state is saved if and only
+// if the previous mode was user mode. It is assumed that none of the
+// APC delivery code, nor any of the kernel mode APC routines themselves
+// use any floating point instructions.
+//
+// Arguments:
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiApcInterrupt, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate exception frame
+ stq ra, ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the volatile floating state and determine the previous mode.
+//
+
+ bsr ra, KiSaveVolatileFloatState // save volatile floats
+ ldl t0, TrPsr(fp) // get saved processor status
+ and t0, PSR_MODE_MASK, a0 // isolate previous mode
+ beq a0, 10f // if eq, kernel mode
+
+//
+// The previous mode was user.
+//
+// Save the nonvolatile machine state so a context record can be
+// properly constructed to deliver an APC to user mode if required.
+// It is also necessary to save the volatile floating state for
+// suspend/resume operations.
+//
+
+ stq s0, ExIntS0(sp) // save nonvolatile integer state
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+
+ bsr ra, KiSaveNonVolatileFloatState
+
+//
+// Attempt to deliver an APC.
+//
+
+10:
+ bis sp, zero, a1 // set address of exception frame
+ bis fp, zero, a2 // set address of trap frame
+ bsr ra, KiDeliverApc // call APC delivery routine
+
+//
+// Restore the volatile floating state and return from the interrupt.
+//
+ bsr ra, KiRestoreVolatileFloatState
+
+ ldq ra, ExIntRa(sp) // restore return address
+ lda sp, ExceptionFrameLength(sp) // deallocate exception frame
+ ret zero, (ra) // return
+
+ .end KiApcInterrupt
diff --git a/private/ntos/ke/alpha/apcuser.c b/private/ntos/ke/alpha/apcuser.c
new file mode 100644
index 000000000..fe34f467f
--- /dev/null
+++ b/private/ntos/ke/alpha/apcuser.c
@@ -0,0 +1,148 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ apcuser.c
+
+Abstract:
+
+ This module implements the machine dependent code necessary to initialize
+ a user mode APC.
+
+Author:
+
+ David N. Cutler (davec) 23-Apr-1990
+
+Environment:
+
+ Kernel mode only, IRQL APC_LEVEL.
+
+Revision History:
+
+ Thomas Van Baak (tvb) 13-May-1992
+
+ Adapted for Alpha AXP.
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the context for a user mode APC.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ NormalRoutine - Supplies a pointer to the user mode APC routine.
+
+ NormalContext - Supplies a pointer to the user context for the APC
+ routine.
+
+ SystemArgument1 - Supplies the first system supplied value.
+
+ SystemArgument2 - Supplies the second system supplied value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord;
+ EXCEPTION_RECORD ExceptionRecord;
+ LONG Length;
+ ULONG UserStack;
+
+ //
+ // Move the user mode state from the trap and exception frames to the
+ // context frame.
+ //
+
+ ContextRecord.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextRecord);
+
+ //
+ // Transfer the context information to the user stack, initialize the
+ // APC routine parameters, and modify the trap frame so execution will
+ // continue in user mode at the user mode APC dispatch routine.
+ //
+
+ try {
+
+ //
+ // Compute length of context record and new aligned user stack pointer.
+ //
+
+ Length = (sizeof(CONTEXT) + 15) & (~15);
+ UserStack = ((ULONG)ContextRecord.IntSp & (~15)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack, Length, sizeof(QUAD));
+ RtlMoveMemory((PULONG)UserStack, &ContextRecord, sizeof(CONTEXT));
+
+ //
+ // Set the address of the user APC routine, the APC parameters, the
+ // new frame pointer, and the new stack pointer in the current trap
+ // frame. Set the continuation address so control will be transferred
+ // to the user APC dispatcher.
+ //
+ // N.B. It is not possible to pass 64 bit arguments to the routine.
+ // N.B. ULONG becomes canonical longword with (ULONGLONG)(LONG) cast.
+ //
+ //
+
+ TrapFrame->IntSp = (ULONGLONG)(LONG)UserStack;
+ TrapFrame->IntFp = (ULONGLONG)(LONG)UserStack;
+ TrapFrame->IntA0 = (ULONGLONG)(LONG)NormalContext;
+ TrapFrame->IntA1 = (ULONGLONG)(LONG)SystemArgument1;
+ TrapFrame->IntA2 = (ULONGLONG)(LONG)SystemArgument2;
+ TrapFrame->IntA3 = (ULONGLONG)(LONG)NormalRoutine;
+ TrapFrame->Fir = (ULONGLONG)(LONG)KeUserApcDispatcher;
+
+ //
+ // If an exception occurs, then copy the exception information to an
+ // exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(TrapFrame->Fir);
+ KiDispatchException(&ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ UserMode,
+ TRUE);
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/alpha/buserror.c b/private/ntos/ke/alpha/buserror.c
new file mode 100644
index 000000000..c3cd28b30
--- /dev/null
+++ b/private/ntos/ke/alpha/buserror.c
@@ -0,0 +1,113 @@
+/*++
+
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ buserror.c
+
+Abstract:
+
+ This module implements the code necessary to process machine checks.
+
+Author:
+
+ Joe Notarangelo 11-Feb-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+
+
+VOID
+KiMachineCheck (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process a machine check. If the vendor
+ has supplied a machine check handler with its HAL then the machine
+ check handler is called. If the routine returns TRUE indicating
+ that the error has been handled then execution resumes, otherwise,
+ a bugcheck is raised.
+
+ If no machine check handler is registered or it does not indicate
+ that the error has been handled, then this routine will attempt
+ default handling. Default handling consists of checking the
+ machine check status in the exception record. If the status indicates
+ that the machine check is correctable or retryable then return and
+ resume execution, otherwise a bugcheck is raised.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ if( ((ULONG)PCR->MachineCheckError != 0) &&
+ (PCR->MachineCheckError)(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame) ) {
+
+ //
+ // The HAL has handled the error.
+ //
+
+ return;
+
+ } else {
+
+ //
+ // Either there is no HAL handler, or it did not handle the
+ // error.
+ //
+
+ if( ExceptionRecord->ExceptionInformation[0] != 0 ){
+
+ //
+ // The error is either correctable or retryable, resume
+ // execution.
+ //
+
+#if DBG
+
+ DbgPrint( "MCHK: resuming correctable or retryable error\n" );
+
+#endif //DBG
+
+ return;
+
+ }
+ }
+
+
+ //
+ // The error was not handled and is not correctable or retryable.
+ //
+
+ KeBugCheck(DATA_BUS_ERROR);
+}
+
diff --git a/private/ntos/ke/alpha/byteem.c b/private/ntos/ke/alpha/byteem.c
new file mode 100644
index 000000000..dc35ef60a
--- /dev/null
+++ b/private/ntos/ke/alpha/byteem.c
@@ -0,0 +1,334 @@
+/*++
+
+Copyright (c) 1995 Digital Equipment Corporation
+
+Module Name:
+
+ byteem.c
+
+Abstract:
+
+ This module implements the code necessary to emulate the new set of Alpha
+ byte and word instructions defined by ECO 81.
+
+ N.B. This file must be compiled without the use of byte/word instructions
+ to avoid fatal recursive exceptions.
+
+Author:
+
+ Wim Colgate (colgate) 18-May-1995
+ Thomas Van Baak (tvb) 18-May-1995
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define function prototypes for emulation routines written in assembler.
+//
+
+VOID
+KiInterlockedStoreByte (
+ IN PUCHAR Address,
+ IN UCHAR Data
+ );
+
+VOID
+KiInterlockedStoreWord (
+ IN PUSHORT Address,
+ IN USHORT Data
+ );
+
+BOOLEAN
+KiEmulateByteWord (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine emulates Alpha instructions defined by ECO 81. This includes
+ the load byte unsigned, store byte, load word unsigned, store word, sign
+ extend byte, and sign extend word instructions.
+
+ If a misaligned word access is detected the illegal instruction exception
+ record is converted into data misalignment exception record, no emulation
+ is performed, and a value of FALSE is returned. It is expected that the
+ call to this function is followed by a check for a data misalignment
+ exception and a call to the data misalignment emulation function if
+ appropriate.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to the exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the instruction is successfully emulated,
+ otherwise a value of FALSE is returned.
+
+--*/
+
+{
+ ULONGLONG Data;
+ ULONGLONG EffectiveAddress;
+ PVOID ExceptionAddress;
+ ALPHA_INSTRUCTION Instruction;
+ KIRQL OldIrql;
+ KPROCESSOR_MODE PreviousMode;
+
+ //
+ // Save original exception address in case another exception occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation will cause
+ // the emulation to be aborted. The new exception code and information
+ // will be copied to the original exception record and FALSE will be
+ // returned. If the memory access was not from kernel mode then probe
+ // the effective address before performing the emulation.
+ //
+
+ try {
+
+ //
+ // Get faulting instruction and case on instruction type.
+ //
+
+ Instruction = *((PALPHA_INSTRUCTION)ExceptionAddress);
+ switch (Instruction.Memory.Opcode) {
+
+ //
+ // Load/store operations.
+ //
+
+ case LDBU_OP :
+ case LDWU_OP :
+ case STB_OP :
+ case STW_OP :
+
+ //
+ // Capture previous mode from trap frame not current thread.
+ //
+
+ PreviousMode = (KPROCESSOR_MODE)(((PSR *)(&TrapFrame->Psr))->MODE);
+
+ //
+ // Compute effective address and if the address is non-canonical
+ // then change the exception code to STATUS_ACCESS_VIOLATION and
+ // return FALSE.
+ //
+
+ EffectiveAddress = (ULONGLONG)Instruction.Memory.MemDisp +
+ KiGetRegisterValue(Instruction.Memory.Rb,
+ ExceptionFrame,
+ TrapFrame);
+
+ if (EffectiveAddress != (ULONGLONG)(PVOID)EffectiveAddress) {
+ ExceptionRecord->ExceptionCode = STATUS_ACCESS_VIOLATION;
+ ExceptionRecord->NumberParameters = 0;
+ return FALSE;
+ }
+
+ //
+ // Case on individual load/store instruction type.
+ //
+
+ switch (Instruction.Memory.Opcode) {
+
+ //
+ // Load byte unsigned.
+ //
+
+ case LDBU_OP :
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(EffectiveAddress,
+ sizeof(UCHAR),
+ sizeof(UCHAR));
+ }
+ Data = (ULONGLONG)*(PUCHAR)EffectiveAddress;
+ KiSetRegisterValue(Instruction.Memory.Ra,
+ Data,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Load word unsigned.
+ //
+
+ case LDWU_OP :
+ if (EffectiveAddress & 0x1) {
+ goto AlignmentFault;
+ }
+ if (PreviousMode != KernelMode) {
+ ProbeForRead((PUSHORT)EffectiveAddress,
+ sizeof(USHORT),
+ sizeof(UCHAR));
+ }
+ Data = (ULONGLONG)*(PUSHORT)EffectiveAddress;
+ KiSetRegisterValue(Instruction.Memory.Ra,
+ Data,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Store byte.
+ //
+
+ case STB_OP :
+ if (PreviousMode != KernelMode) {
+ ProbeForWrite((PUCHAR)EffectiveAddress,
+ sizeof(UCHAR),
+ sizeof(UCHAR));
+ }
+ Data = KiGetRegisterValue(Instruction.Memory.Ra,
+ ExceptionFrame,
+ TrapFrame);
+ KiInterlockedStoreByte((PUCHAR)EffectiveAddress,
+ (UCHAR)Data);
+ break;
+
+ //
+ // Store word.
+ //
+
+ case STW_OP :
+ if (EffectiveAddress & 0x1) {
+ goto AlignmentFault;
+ }
+ if (PreviousMode != KernelMode) {
+ ProbeForWrite((PUSHORT)EffectiveAddress,
+ sizeof(USHORT),
+ sizeof(UCHAR));
+ }
+ Data = KiGetRegisterValue(Instruction.Memory.Ra,
+ ExceptionFrame,
+ TrapFrame);
+ KiInterlockedStoreWord((PUSHORT)EffectiveAddress,
+ (USHORT)Data);
+ break;
+ }
+
+ break;
+
+ //
+ // Sign extend operations.
+ //
+
+ case SEXT_OP :
+ switch (Instruction.OpReg.Function) {
+
+ //
+ // Sign extend byte.
+ //
+
+ case SEXTB_FUNC :
+ Data = KiGetRegisterValue(Instruction.OpReg.Rb,
+ ExceptionFrame,
+ TrapFrame);
+ KiSetRegisterValue(Instruction.OpReg.Rc,
+ (ULONGLONG)(CHAR)Data,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Sign extend word.
+ //
+
+ case SEXTW_FUNC :
+ Data = KiGetRegisterValue(Instruction.OpReg.Rb,
+ ExceptionFrame,
+ TrapFrame);
+ KiSetRegisterValue(Instruction.OpReg.Rc,
+ (ULONGLONG)(SHORT)Data,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // All other functions are not emulated.
+ //
+
+ default :
+ return FALSE;
+ }
+
+ break;
+
+ //
+ // All other instructions are not emulated.
+ //
+
+ default :
+ return FALSE;
+ }
+
+#if 0
+ //
+ // Call out to profile interrupt if byte/word emulation profiling is
+ // active.
+ //
+
+ if (KiProfileByteWordEmulation != FALSE) {
+ if (++KiProfileByteWordEmulationCount >=
+ KiProfileByteWordEmulationInterval) {
+
+ KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
+ KiProfileByteWordEmulationCount = 0;
+ KeProfileInterruptWithSource(TrapFrame,
+ ProfileByteWordEmulation);
+ KeLowerIrql(OldIrql);
+ }
+ }
+#endif
+
+ TrapFrame->Fir += 4;
+
+ return TRUE;
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+
+ return FALSE;
+ }
+
+AlignmentFault :
+
+ //
+ // A misaligned word access has been encountered. Change the illegal
+ // instruction exception record into data misalignment exception record
+ // (the format is defined by PALcode) and return FALSE.
+ //
+
+ ExceptionRecord->ExceptionCode = STATUS_DATATYPE_MISALIGNMENT;
+ ExceptionRecord->NumberParameters = 3;
+ ExceptionRecord->ExceptionInformation[0] = Instruction.Memory.Opcode;
+ ExceptionRecord->ExceptionInformation[1] = Instruction.Memory.Ra;
+ ExceptionRecord->ExceptionInformation[2] = (ULONG)EffectiveAddress;
+
+ return FALSE;
+}
diff --git a/private/ntos/ke/alpha/byteme.s b/private/ntos/ke/alpha/byteme.s
new file mode 100644
index 000000000..67c934d4b
--- /dev/null
+++ b/private/ntos/ke/alpha/byteme.s
@@ -0,0 +1,128 @@
+// TITLE("Byte and Short Emulation")
+//++
+//
+//
+// Copyright (c) 1995 Digital Equipment Corporation
+//
+// Module Name:
+//
+// byteme.s
+//
+// Abstract:
+//
+// This module implements the code to perform atomic store operations
+// on bytes and shorts (words).
+//
+// Atomic byte and short opcodes have been added into the Alpha
+// architecture as of Alpha 21164A or EV56. In chips 21164 and prior,
+// an illegal instruction will occur and an exception will lead them here.
+//
+// Author:
+//
+// Wim Colgate, May 18th, 1995
+// Tom Van Baak, May 18th, 1995
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//++
+//
+// VOID
+// KiInterlockedStoreByte(
+// IN PUCHAR Address,
+// IN UCHAR Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the data byte specified by Data to the location
+// specified by Address. The architecture requires byte granularity,
+// so locking is necessary.
+//
+// Arguments:
+//
+// Address(a0) - Supplies a pointer to byte data value.
+// Data(a1) - Supplied the byte data value to store.
+//
+// Return Value:
+//
+// None
+//
+//--
+
+ LEAF_ENTRY(KiInterlockedStoreByte)
+
+ bic a0, 3, t0 // clear low 3 bits
+ and a0, 3, t1 // mask of three low bits
+10:
+ ldl_l t2, 0(t0) // load locked full longword
+ insbl a1, t1, t3 // insert byte low
+ mskbl t2, t1, t2 // mask byte low
+ bis t2, t3, t2 // merge data
+ stl_c t2, 0(t0) // store conditional
+ beq t2, 20f // failed to store, retry, forward
+ // branch for optimistic prediction
+
+ ret zero, (ra), 1 // return
+
+20:
+ br zero, 10b // try again
+
+
+ .end KiInterlockedStoreByte
+
+
+
+
+//++
+//
+// VOID
+// KiInterlockedStoreWord(
+// IN PUSHORT Address,
+// IN USHORT Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the short data specified by Data to the aligned
+// location specified by Address. The architecture requires word granularity,
+// so locking is necessary.
+//
+// Arguments:
+//
+// Address(a0) - Supplies a pointer to an aligned short data value.
+// Data(a1) - Supplied the short data value to store.
+//
+// Return Value:
+//
+// None
+//
+//--
+
+ LEAF_ENTRY(KiInterlockedStoreWord)
+
+ bic a0, 2, t0 // clear low short address bit
+ and a0, 2, t1 // mask of low short address bit
+10:
+ ldl_l t2, 0(t0) // load locked full longword
+ inswl a1, t1, t3 // insert word low
+ mskwl t2, t1, t2 // mask word low
+ bis t2, t3, t2 // merge data
+ stl_c t2, 0(t0) // store conditional
+ beq t2, 20f // failed to store, retry, forward
+ // branch for optimistic prediction
+
+ ret zero, (ra), 1 // return
+
+20:
+ br zero, 10b // try again
+
+ .end KiInterlockedStoreWord
+
diff --git a/private/ntos/ke/alpha/callback.c b/private/ntos/ke/alpha/callback.c
new file mode 100644
index 000000000..afaf12b3b
--- /dev/null
+++ b/private/ntos/ke/alpha/callback.c
@@ -0,0 +1,242 @@
+/*++
+
+Copyright (c) 1994 Microsoft Corporation
+
+Module Name:
+
+ callback.c
+
+Abstract:
+
+ This module implements stubs for the user mode call back services.
+
+Author:
+
+ David N. Cutler (davec) 29-Oct-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KeUserModeCallback (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function call out from kernel mode to a user mode function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied
+ to the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that receives
+ the address of the output buffer.
+
+ Outputlength - Supplies a pointer to a variable that receives
+ the length of the output buffer.
+
+Return Value:
+
+ If the callout cannot be executed, then an error status is
+ returned. Otherwise, the status returned by the callback function
+ is returned.
+
+--*/
+
+{
+ PUCALLOUT_FRAME CalloutFrame;
+ ULONG Length;
+ ULONGLONG OldStack;
+ NTSTATUS Status;
+ PKTRAP_FRAME TrapFrame;
+ PULONG UserStack;
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // Get the user mode stack pointer and attempt to copy input buffer
+ // to the user stack.
+ //
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ OldStack = TrapFrame->IntSp;
+ try {
+
+ //
+ // Compute new user mode stack address, probe for writability,
+ // and copy the input buffer to the user stack.
+ //
+ // N.B. Alpha requires stacks to be 16-byte aligned, therefore
+ // the input length must be rounded up to a 16-byte boundary.
+ //
+
+ Length = (InputLength +
+ 16 - 1 + sizeof(UCALLOUT_FRAME)) & ~(16 - 1);
+
+ CalloutFrame = (PUCALLOUT_FRAME)(OldStack - Length);
+ ProbeForWrite(CalloutFrame, Length, sizeof(QUAD));
+ RtlCopyMemory(CalloutFrame + 1, InputBuffer, InputLength);
+
+ //
+ // Allocate stack frame and fill in callout arguments.
+ //
+
+ CalloutFrame->Buffer = (PVOID)(CalloutFrame + 1);
+ CalloutFrame->Length = InputLength;
+ CalloutFrame->ApiNumber = ApiNumber;
+ CalloutFrame->Sp = OldStack;
+ CalloutFrame->Ra = TrapFrame->IntRa;
+
+ //
+ // If an exception occurs during the probe of the user stack, then
+ // always handle the exception and return the exception code as the
+ // status value.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call user mode.
+ //
+
+ TrapFrame->IntSp = (ULONGLONG)(LONG)CalloutFrame;
+ Status = KiCallUserMode(OutputBuffer, OutputLength);
+ TrapFrame->IntSp = OldStack;
+
+ //
+ // When returning from user mode, any drawing done to the GDI TEB
+ // batch must be flushed.
+ //
+
+ if (((PTEB)KeGetCurrentThread()->Teb)->GdiBatchCount > 0) {
+
+ //
+ // call GDI batch flush routine
+ //
+
+ KeGdiFlushUserBatch();
+ }
+
+ return Status;
+}
+
+
+NTSTATUS
+NtW32Call (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ OUT PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls a W32 function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied to
+ the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that recevies the
+ output buffer address.
+
+ Outputlength - Supplies a pointer to a variable that recevies the
+ output buffer length.
+
+Return Value:
+
+ TBS.
+
+--*/
+
+{
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ NTSTATUS Status;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // If the current thread is not a GUI thread, then fail the service
+ // since the thread does not have a large stack.
+ //
+
+ if (KeGetCurrentThread()->Win32Thread == (PVOID)&KeServiceDescriptorTable[0]) {
+ return STATUS_NOT_IMPLEMENTED;
+ }
+
+ //
+ // Probe the output buffer address and length for writeability.
+ //
+
+ try {
+ ProbeForWriteUlong((PULONG)OutputBuffer);
+ ProbeForWriteUlong(OutputLength);
+
+ //
+ // If an exception occurs during the probe of the output buffer or
+ // length, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call out to user mode specifying the input buffer and API number.
+ //
+
+ Status = KeUserModeCallback(ApiNumber,
+ InputBuffer,
+ InputLength,
+ &ValueBuffer,
+ &ValueLength);
+
+ //
+ // If the callout is successful, then the output buffer address and
+ // length.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *OutputBuffer = ValueBuffer;
+ *OutputLength = ValueLength;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ return Status;
+}
diff --git a/private/ntos/ke/alpha/callout.s b/private/ntos/ke/alpha/callout.s
new file mode 100644
index 000000000..82069b07d
--- /dev/null
+++ b/private/ntos/ke/alpha/callout.s
@@ -0,0 +1,404 @@
+// TITLE("Call Out to User Mode")
+//++
+//
+// Copyright (c) 1994 Microsoft Corporation
+//
+// Module Name:
+//
+// callout.s
+//
+// Abstract:
+//
+// This module implements the code necessary to call out from kernel
+// mode to user mode.
+//
+// Author:
+//
+// John Vert (jvert) 2-Nov-1994
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeUserCallbackDispatcher
+
+
+ SBTTL("Call User Mode Function")
+//++
+//
+// NTSTATUS
+// KiCallUserMode (
+// IN PVOID *OutputBuffer,
+// IN PULONG OutputLength
+// )
+//
+// Routine Description:
+//
+// This function calls a user mode function.
+//
+// N.B. This function calls out to user mode and the NtCallbackReturn
+// function returns back to the caller of this function. Therefore,
+// the stack layout must be consistent between the two routines.
+//
+// Arguments:
+//
+// OutputBuffer (a0) - Supplies a pointer to the variable that receivies
+// the address of the output buffer.
+//
+// OutputLength (a1) - Supplies a pointer to a variable that receives
+// the length of the output buffer.
+//
+// Return Value:
+//
+// The final status of the call out function is returned as the status
+// of the function.
+//
+// N.B. This function does not return to its caller. A return to the
+// caller is executed when a NtCallbackReturn system service is
+// executed.
+//
+// N.B. This function does return to its caller if a kernel stack
+// expansion is required and the attempted expansion fails.
+//
+//--
+// .struct 0
+//CuF2: .space 8 // saved floating registers f2 - f9
+//CuF3: .space 8
+//CuF4: .space 8
+//CuF5: .space 8
+//CuF6: .space 8
+//CuF7: .space 8
+//CuF8: .space 8
+//CuF9: .space 8
+//CuS0: .space 8 // saved integer registers s0 - s5
+//CuS1: .space 8
+//CuS2: .space 8
+//CuS3: .space 8
+//CuS4: .space 8
+//CuS5: .space 8
+//CuFP: .space 8
+//CuCbStk:.space 8 // saved callback stack address
+//CuInStk:.space 8 // saved initial stack address
+//CuTrFr: .space 8 // saved callback trap frame address
+//CuTrFir:.space 8
+//CuRa: .space 8 // saved return address
+//CuA0: .space 8 // saved argument registers a0-a2
+//CuA1: .space 8
+//CuFrameLength:
+
+ NESTED_ENTRY(KiCallUserMode, CuFrameLength, zero)
+
+ lda sp, -CuFrameLength(sp) // allocate stack frame
+ stq ra, CuRa(sp) // save return address
+
+//
+// Save nonvolatile integer registers.
+//
+ stq s0, CuS0(sp) // save integer registers s0 - s5
+ stq s1, CuS1(sp) //
+ stq s2, CuS2(sp) //
+ stq s3, CuS3(sp) //
+ stq s4, CuS4(sp) //
+ stq s5, CuS5(sp) //
+ stq fp, CuFP(sp) // save FP
+
+//
+// Save nonvolatile floating registers.
+//
+ stt f2, CuF2(sp) // save floating registers f2 - f9
+ stt f3, CuF3(sp) //
+ stt f4, CuF4(sp) //
+ stt f5, CuF5(sp) //
+ stt f6, CuF6(sp) //
+ stt f7, CuF7(sp) //
+ stt f8, CuF8(sp) //
+ stt f9, CuF9(sp) //
+
+ PROLOGUE_END
+
+//
+// Save argument registers
+//
+ stq a0, CuA0(sp) // save output buffer address
+ stq a1, CuA1(sp) // save output length address
+
+//
+// Check if sufficient room is available on the kernel stack for another
+// system call.
+//
+
+ GET_CURRENT_THREAD
+ bis v0, zero, t0 // get current thread in t0
+ ldl t1, ThInitialStack(t0) // get initial stack address
+ ldl t2, ThStackLimit(t0) // get current stack limit
+ subq sp, KERNEL_LARGE_STACK_COMMIT, t3 // compute bottom address
+ cmpult t2, t3, t4 // check if limit exceeded
+ bne t4, 10f // if ne, limit not exceeded
+ bis sp, zero, a0 // set current kernel stack address
+ bsr ra, MmGrowKernelStack // attempt to grow the kernel stack
+ bne v0, 20f // if ne, attempt to grow failed
+ GET_CURRENT_THREAD
+ bis v0, zero, t0 // get current thread in t0
+ ldl t1, ThInitialStack(t0) // get initial stack address
+
+10:
+ ldl fp, ThTrapFrame(t0) // get trap frame address
+ ldl t2, ThCallbackStack(t0) // get callback stack address
+ stl t1, CuInStk(sp) // save initial stack address
+ stl fp, CuTrFr(sp) // save trap frame address
+ stl t2, CuCbStk(sp) // save callback stack address
+ stl sp, ThCallbackStack(t0) // set callback stack address
+
+//
+// Restore state and callback to user mode.
+//
+
+ stl sp, ThInitialStack(t0) // reset initial stack address
+
+ ldq t3, TrFir(fp) // get old PC
+ stl t3, CuTrFir(sp) // save old PC
+ ldl t4, KeUserCallbackDispatcher // get continuation address
+ stq t4, TrFir(fp) // set continuation address
+
+//
+// If a user mode APC is pending, then request an APC interrupt.
+//
+ ldq_u t1, ThApcState+AsUserApcPending(t0) // get user APC pending
+ extbl t1, (ThApcState+AsUserApcPending) % 8, t1
+ ZeroByte( ThAlerted(t0) ) // clear kernel mode alerted
+ cmovne t1, APC_INTERRUPT, a1 // if pending set APC interrupt
+
+//
+// Set initial kernel stack for this thread
+//
+ bis sp, zero, a0
+ SET_INITIAL_KERNEL_STACK
+
+ ldl a0, TrPsr(fp) // get previous processor status
+//
+// a0 = previous psr
+// a1 = sfw interrupt requests
+
+ RETURN_FROM_SYSTEM_CALL // return to user mode
+
+ ret zero, (ra)
+
+//
+// An attempt to grow the kernel stack failed.
+//
+
+20:
+ ldq ra, CuRa(sp) // restore return address
+ lda sp, CuFrameLength(sp) // deallocate stack frame
+ ret zero, (ra)
+ .end KiCallUserMode
+
+
+ SBTTL("Switch Kernel Stack")
+//++
+//
+// PVOID
+// KeSwitchKernelStack (
+// IN PVOID StackBase,
+// IN PVOID StackLimit
+// )
+//
+// Routine Description:
+//
+// This function switches to the specified large kernel stack.
+//
+// N.B. This function can ONLY be called when there are no variables
+// in the stack that refer to other variables in the stack, i.e.,
+// there are no pointers into the stack.
+//
+// Arguments:
+//
+// StackBase (a0) - Supplies a pointer to the base of the new kernel
+// stack.
+//
+// StackLimit (a1) - supplies a pointer to the limit of the new kernel
+// stack.
+//
+// Return Value:
+//
+// The old kernel stack is returned as the function value.
+//
+//--
+ .struct 0
+SsRa: .space 8 // saved return address
+SsSp: .space 8 // saved new stack pointer
+SsA0: .space 8 // saved argument registers a0-a1
+SsA1: .space 8
+SsFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KeSwitchKernelStack, SsFrameLength, zero)
+
+ lda sp, -SsFrameLength(sp) // allocate stack frame
+ stq ra, SsRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the address of the new stack and copy the old stack to the new
+// stack.
+//
+ GET_CURRENT_THREAD // get current thread in v0
+ stq a0, SsA0(sp) // save new kernel stack base address
+ stq a1, SsA1(sp) // save new kernel stack limit address
+ ldl a2, ThStackBase(v0) // get current stack base address
+ ldl a3, ThTrapFrame(v0) // get current trap frame address
+ addl a3, a0, a3 // relocate current trap frame address
+ subl a3, a2, a3 //
+ stl a3, ThTrapFrame(v0) //
+ bis sp, zero, a1 // set source address of copy
+ subl a2, sp, a2 // compute length of copy
+ subl a0, a2, a0 // set destination address of copy
+ stq a0, SsSp(sp) // save new stack pointer address
+ bsr ra, RtlMoveMemory // copy old stack to new stack
+
+//
+// Switch to new kernel stack and return the address of the old kernel stack
+//
+ GET_CURRENT_THREAD // get current thread in v0
+
+ DISABLE_INTERRUPTS
+
+ ldl t0, ThStackBase(v0) // get old kernel stack base address
+ ldq a0, SsA0(sp) // get new kernel stack base address
+ ldq a1, SsA1(sp) // get new kernel stack limit address
+ stl a0, ThInitialStack(v0) // set new initial stack address
+ stl a0, ThStackBase(v0) // set new stack base address
+ stl a1, ThStackLimit(v0) // set new stack limit address
+ ldil t1, TRUE // set large kernel stack TRUE
+ StoreByte(t1, ThLargeStack(v0))
+
+ ldq sp, SsSp(sp) // set initial stack address
+ SET_INITIAL_KERNEL_STACK // set PAL's version of initial kernel stack
+
+ ENABLE_INTERRUPTS
+
+ ldq ra, SsRa(sp) // restore return address
+ lda sp, SsFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+ .end KeSwitchKernelStack
+
+ SBTTL("Return from User Mode Callback")
+//++
+//
+// NTSTATUS
+// NtCallbackReturn (
+// IN PVOID OutputBuffer OPTIONAL,
+// IN ULONG OutputLength,
+// IN NTSTATUS Status
+// )
+//
+// Routine Description:
+//
+// This function returns from a user mode callout to the kernel
+// mode caller of the user mode callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode and the KiCallUserMode function calls out to user mode.
+// Therefore, the stack layout must be consistent between the
+// two routines.
+//
+// Arguments:
+//
+// OutputBuffer - Supplies an optional pointer to an output buffer.
+//
+// OutputLength - Supplies the length of the output buffer.
+//
+// Status - Supplies the status value returned to the caller of the
+// callback function.
+//
+// Return Value:
+//
+// If the callback return cannot be executed, then an error status is
+// returned. Otherwise, the specified callback status is returned to
+// the caller of the callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode is a callout is currently active.
+//
+//--
+
+ LEAF_ENTRY(NtCallbackReturn)
+
+ GET_CURRENT_THREAD // get current thread address
+ ldl t1, ThCallbackStack(v0) // get callback stack address
+ beq t1, 10f // if eq, no callback stack present
+
+//
+// Restore nonvolatile integer registers
+//
+ ldq s0, CuS0(t1) // restore integer registers s0 - s5
+ ldq s1, CuS1(t1) //
+ ldq s2, CuS2(t1) //
+ ldq s3, CuS3(t1) //
+ ldq s4, CuS4(t1) //
+ ldq s5, CuS5(t1) //
+ ldq fp, CuFP(t1) // restore FP
+
+//
+// Restore nonvolatile floating registers
+//
+ ldt f2, CuF2(t1) // restore floating registers f2 - f9
+ ldt f3, CuF3(t1) //
+ ldt f4, CuF4(t1) //
+ ldt f5, CuF5(t1) //
+ ldt f6, CuF6(t1) //
+ ldt f7, CuF7(t1) //
+ ldt f8, CuF8(t1) //
+ ldt f9, CuF9(t1) //
+
+//
+// Restore the trap frame and callback stack addresses, and store the output
+// buffer address and length.
+//
+ ldl t2, CuTrFr(t1) // get previous trap frame address
+ ldl t3, CuCbStk(t1) // get previous callback stack address
+ ldl t4, CuA0(t1) // get address to store output address
+ ldl t5, CuA1(t1) // get address to store output length
+ ldl t6, CuTrFir(t1) // get old trap frame PC
+ stl t2, ThTrapFrame(v0) // restore trap frame address
+ stl t3, ThCallbackStack(v0) // restore callback stack address
+ stl a0, 0(t4) // store output buffer address
+ stl a1, 0(t5) // store output buffer length
+ stq t6, TrFir(t2) // restore old trap frame PC
+
+//
+// **** this is the place where the current stack would be trimmed back.
+//
+
+//
+// Restore initial stack pointer, trim stackback to callback frame,
+// deallocate callback stack frame, and return to callback caller.
+//
+ ldl a0, CuInStk(t1) // get previous initial stack
+ stl a0, ThInitialStack(v0)
+ SET_INITIAL_KERNEL_STACK
+ bis t1, zero, sp // trim stack callback frame
+ bis a2, zero, v0 // set callback service status
+
+ ldq ra, CuRa(sp) // restore return address
+ lda sp, CuFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+//
+// No callback is currently active.
+//
+10: ldil v0, STATUS_NO_CALLBACK_ACTIVE // set service status
+ ret zero, (ra) // return
+
+ .end NtCallbackReturn
diff --git a/private/ntos/ke/alpha/clock.s b/private/ntos/ke/alpha/clock.s
new file mode 100644
index 000000000..b75c26ec6
--- /dev/null
+++ b/private/ntos/ke/alpha/clock.s
@@ -0,0 +1,637 @@
+// r TITLE("Interval and Profile Clock Interrupts")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// clock.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// interval and profile clock interrupts.
+//
+// Author:
+//
+// David N. Cutler (davec) 27-Mar-1990
+// Joe Notarangelo 06-Apr-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+
+//++
+//
+// VOID
+// KeUpdateSystemTime (
+// IN PKTRAP_FRAME TrapFrame,
+// IN ULONG TimeIncrement
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the system time and check to
+// determine if a timer has expired.
+//
+// N.B. This routine is executed on a single processor in a multiprocess
+// system. The remainder of the processors only execute the quantum end
+// and runtime update code.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Time Increment (a1) - Supplies the time increment in 100ns units.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateSystemTime)
+
+//
+// Update the interrupt time.
+//
+
+ zap a1, 0xf0, a1 // zero extend time increment
+ lda a2, KiTickOffset // get tick offset value
+ ldl a3, 0(a2) //
+ ldil t8, SharedUserData // get shared user data address
+ ldq t9, UsInterruptTime(t8) //
+ addq a1, t9, t9 // add time increment value
+ stq t9, UsInterruptTime(t8) // store interrupt time value
+ subq a3, a1, a3 // subtract time increment
+ lda v0, KeTickCount // get tick count value
+ ldq t6, 0(v0) //
+ lda t0, KiTimerTableListHead // get base address of timer table
+ stl a3, 0(a2) // store tick offset value
+ bgt a3, 10f // if gt, tick not completed
+ ldl a4, KeMaximumIncrement // get maximum increment value
+
+//
+// Update system time.
+//
+
+ lda t1, KeTimeAdjustment // get time adjustment value
+ ldl t1, 0(t1) //
+ ldq t3, UsSystemTime(t8) // get system time value
+ addq t1, t3, t3 // add time increment value
+ stq t3, UsSystemTime(t8) // store system time value
+
+//
+// Update the tick count.
+//
+
+ addq t6, 1, t1 // increment tick count value
+ stq t1, 0(v0) // store tick count value
+ stl t1, UsTickCountLow(t8)
+
+//
+// Compute next tick offset value.
+//
+
+ addq a3, a4, a4 // add maximum increment to residue
+ stl a4, 0(a2) // store tick offset value
+
+//
+// Check to determine if a timer has expired at the current hand value.
+//
+
+ and t6, TIMER_TABLE_SIZE - 1, v0 // reduce to table table index
+ s8addl v0, t0, t2 // compute timer table listhead address
+ ldl t3, LsFlink(t2) // get address of first timer in list
+ cmpeq t2, t3, t4 // compare fist with listhead address
+ bne t4, 5f // if ne, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+
+ ldq t4,TiDueTime - TiTimerListEntry(t3) // get due time
+ cmpule t4, t9, t5 // is expiration time <= system time
+ bne t5, 20f // if ne, timer has expired
+
+//
+// Check to determine if a timer has expired at the next hand value.
+//
+
+5: addq t6, 1, t6 // advance hand value to next entry
+10: and t6, TIMER_TABLE_SIZE - 1, v0 // reduce to table table index
+ s8addl v0, t0, t2 // compute timer table listhead address
+ ldl t3, LsFlink(t2) // get address of first timer in list
+ cmpeq t2, t3, t4 // compare fist with listhead address
+ bne t4, 40f // if ne, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+
+ ldq t4, TiDueTime - TiTimerListEntry(t3) // get due time
+ cmpule t4, t9, t5 // is expiration time <= system time
+ beq t5, 40f // if eq, timer has not expired
+
+//
+// Put timer expiration DPC in the system DPC list and initiate a dispatch
+// interrupt on the current processor.
+//
+
+20: lda t2, KiTimerExpireDpc // get expiration DPC address
+
+ DISABLE_INTERRUPTS // turn off interrupts
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = base address of PRCB
+
+ lda t3, PbDpcListHead(v0) // get DPC listhead address
+ lda t1, PbDpcLock(v0) // get address of spin lock
+
+#if !defined(NT_UP)
+
+30: ldl_l t4, 0(t1) // get current lock value
+ bis t1, zero, t5 // set ownership value
+ bne t4, 50f // if ne, spin lock owned
+ stl_c t5, 0(t1) // set spin lock owned
+ beq t5, 50f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+#endif
+
+ ldl t4, DpLock(t2) // get DPC inserted state
+ bne t4, 35f // if ne, DPC entry already inserted
+ ldl t4, LsBlink(t3) // get address of last entry in list
+ stl t1, DpLock(t2) // set DPC inserted state
+ stl t6, DpSystemArgument1(t2) // set timer table hand value
+ addl t2, DpDpcListEntry, t2 // compute address of DPC list entry
+ stl t2, LsBlink(t3) // set address of new last entry
+ stl t2, LsFlink(t4) // set next link in old last entry
+ stl t3, LsFlink(t2) // set address of next entry
+ stl t4, LsBlink(t2) // set address of previous entry
+ ldl t5, PbDpcQueueDepth(v0) // get current DPC queue depth
+ addl t5, 1, t7 // increment DPC queue depth
+ stl t7, PbDpcQueueDepth(v0) // set updated DPC queue depth
+
+//
+// N.B. Since an interrupt must be active, simply set the software interrupt
+// request bit in the PRCB to request a dispatch interrupt directly from
+// the interrupt exception handler.
+//
+
+ ldil t11, DISPATCH_INTERRUPT // a0 = level of interrupt to request
+ stl t11, PbSoftwareInterrupts(v0) // request a DISPATCH sfw interrupt
+35:
+
+#if !defined(NT_UP)
+
+ mb // insure all previous writes go
+ // before the lock is released
+ stl zero, 0(t1) // set spin lock not owned
+
+#endif
+
+ ENABLE_INTERRUPTS
+
+40:
+
+ ble a3, KeUpdateRunTime // if le, full tick
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+50: ldl t4, 0(t1) // get lock value
+ beq t4, 30b // retry spinlock if now available
+ br zero, 50b // retry in cache until lock ready
+
+#endif
+
+ .end KeUpdateSystemTime
+
+//++
+//
+// VOID
+// KeUpdateRunTime (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the runtime of the current
+// thread, update the runtime of the current thread's process, and decrement
+// the current thread's quantum.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateRunTime)
+ GET_CURRENT_THREAD // v0 = current thread address
+ bis v0, zero, t0 // t0 = current thread address
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = processor block address
+ bis v0, zero, t5 // t5 = processor block address
+
+//
+// Update the current DPC rate.
+//
+// A running average of the DPC rate is used. The number of DPCs requested
+// in the previous tick is added to the current DPC rate and divided by two.
+// This becomes the new DPC rate.
+//
+ ldl t1, PbDpcCount(t5) // get current DPC count
+ ldl t6, PbLastDpcCount(t5) // get last DPC count
+ subl t1, t6, t7 // compute difference
+ ldl t2, PbDpcRequestRate(t5) // get old DPC request rate
+ addl t7, t2, t3 // compute average
+ srl t3, 1, t4 //
+ stl t4, PbDpcRequestRate(t5) // store new DPC request rate
+ stl t1, PbLastDpcCount(t5) // update last DPC count
+
+ ldl t2, ThApcState + AsProcess(t0) // get address of current proc
+ ldl t3, TrPsr(a0) // get saved processor status
+ and t3, PSR_MODE_MASK, t6 // isolate previous mode
+ bne t6, 30f // if ne, previous mode was user
+
+//
+// If a DPC is active, then increment the time spent executing DPC routines.
+// Otherwise, if the old IRQL is greater than DPC level, then increment the
+// time spent executing interrupt service routines. Otherwise, increment
+// the time spent in kernel mode for the current thread.
+//
+
+ srl t3, PSR_IRQL, t6 // t6 = previous Irql
+ ldl v0, PbDpcRoutineActive(t5) // v0 = DPC active flag
+ subl t6, DISPATCH_LEVEL, t6 // previous Irql - DPC level
+ blt t6, 20f // if lt then charge against thread
+
+ lda t8, PbInterruptTime(t5) // compute interrupt time address
+ bgt t6, 10f // if gt, increment interrupt time
+ lda t8, PbDpcTime(t5) // compute DPC time address
+ beq v0, 20f // if eq, not executing DPC
+
+//
+// Update the time spent executing DPC or interrupt level
+//
+// t8 = address of time to increment
+//
+
+10:
+ ldl t11, 0(t8) // get processor time
+ addl t11, 1, t11 // increment processor time
+ stl t11, 0(t8) // update processor time
+ lda t6, PbKernelTime(t5) // compute address of kernel time
+ br zero, 45f // update kernel time
+
+//
+// Update the time spent in kernel mode for the current thread and the current
+// thread's process.
+//
+
+20:
+ ldl t11, ThKernelTime(t0) // get kernel time
+ addl t11, 1, t11 // increment kernel time
+ stl t11, ThKernelTime(t0) // store updated kernel time
+ lda t2, PrKernelTime(t2) // compute process kernel time address
+ lda t6, PbKernelTime(t5) // compute processor kernel time addr
+ br zero, 40f // join comon code
+
+//
+// Update the time spend in user mode for the current thread and the current
+// thread's process.
+//
+
+30:
+ ldl t11, ThUserTime(t0) // get user time
+ addl t11, 1, t11 // increment user time
+ stl t11, ThUserTime(t0) // store updated user time
+ lda t2, PrUserTime(t2) // compute process user time address
+ lda t6, PbUserTime(t5) // compute processor user time address
+
+//
+// Update the time spent in kernel/user mode for the current thread's process
+//
+
+40:
+#if !defined(NT_UP)
+
+ ldl_l t11, 0(t2) // get process time
+ addl t11, 1, t11 // increment process time
+ stl_c t11, 0(t2) // store updated process time
+ beq t11, 41f // if eq, store conditional failed
+ mb // synchronize subsequent reads
+
+#else
+ ldl t11,0(t2) // get process time
+ addl t11, 1, t11 // increment process time
+ stl t11,0(t2) // store updated process time
+#endif
+
+//
+// A DPC is not active. If there are DPCs in the DPC queue and a DPC
+// interrupt has not been requested, request a dispatch interrupt in
+// order to initiate the batch processing of the pending DPCs in the
+// DPC queue.
+//
+// N.B. Since an interrupt must be active, the software interrupt request
+// bit in the PRCB can be set to request a dispatch interrupt directly from
+// the interrupt exception handler.
+//
+// Pushing DPCs from the clock interrupt indicates that the current maximum
+// DPC queue depth is too high. If the DPC rate does not exceed the ideal
+// rate, decrement the maximum DPC queue depth and
+// reset the threshold to its original value.
+//
+ ldl t1, PbDpcQueueDepth(t5) // get current queue depth
+ beq t1, 45f // skip if queue is empty
+ ldl t2, PbDpcInterruptRequested(t5) // get dpc interrupt request flag
+ bne t2, 45f // skip if flag is set
+ ldil a0, DISPATCH_INTERRUPT // a0 = request software interrupt
+ stl a0, PbSoftwareInterrupts(t5)
+ ldl t3, PbMaximumDpcQueueDepth(t5) // get current DPC queue depth
+ subl t3, 1, t4 // decrement
+ ldl t2, PbDpcRequestRate(t5) // get old DPC request rate
+ ldl t1, KiIdealDpcRate // get ideal DPC rate
+ cmpult t2, t1, t2 // compare current with ideal
+ ldl t1, KiAdjustDpcThreshold // get system threshold default
+ stl t1, PbAdjustDpcThreshold(t5) // reset processor threshold default
+ beq t4, 50f // if queue depth==0, skip decrement
+ beq t2, 50f // if rate not lt ideal rate, skip decrement
+ stl t4, PbMaximumDpcQueueDepth(t5) // set current DPC queue depth
+ br zero, 50f // rejoin common code
+
+45:
+//
+// There is no need to push a DPC from the clock interrupt. This indicates that
+// the current maximum DPC queue depth may be too low. Decrement the threshold
+// indicator, and if the new threshold is zero, and the current maximum queue
+// depth is less than the maximum, increment the maximum DPC queue
+// depth.
+//
+ ldl t1, PbAdjustDpcThreshold(t5) // get current threshold
+ subl t1, 1, t2 // decrement threshold
+ stl t2, PbAdjustDpcThreshold(t5) // update current threshold
+ bne t2, 50f // if threshold nez, skip
+
+ ldl t1, KiAdjustDpcThreshold // get system threshold default
+ stl t1, PbAdjustDpcThreshold(t5) // reset processor threshold default
+
+ ldl t3, PbMaximumDpcQueueDepth(t5) // get current DPC queue depth
+ ldl t1, KiMaximumDpcQueueDepth // get maximum DPC queue depth
+ cmpult t3, t1, t2 // compare
+ beq t2, 50f // if current not lt maximum, skip
+ addl t3, 1, t4 // increment queue depth
+ stl t4, PbMaximumDpcQueueDepth(t5) // update current DPC queue depth
+
+50:
+//
+// Update the time spent in kernel/user mode for the current processor.
+//
+// t5 = pointer to processor time to increment
+//
+
+ ldl t11, 0(t6) // get processor time
+ addl t11, 1, t11 // increment processor time
+ stl t11, 0(t6) // store updated processor time
+
+//
+// If the current thread is not the idle thread, decrement its
+// quantum and check to determine if a quantum end has occurred.
+//
+ ldl t6, PbIdleThread(t5)
+ cmpeq t6, t0, t7
+ bne t7, 70f // if nez, current thread is idle thread
+
+ LoadByte(t7, ThQuantum(t0)) // get current thread quantum
+ sll t7, 56, t9
+ sra t9, 56, t7
+ subl t7, CLOCK_QUANTUM_DECREMENT, t7 // decrement quantum
+ StoreByte( t7, ThQuantum(t0) ) // store thread quantum
+ bgt t7, 60f // if gtz, quantum remaining
+
+//
+// Put processor specific quantum end DPC in the system DPC list and initiate
+// a dispatch interrupt on the current processor.
+//
+// N.B. Since an interrupt must be active, simply set the software interrupt
+// request bit in the PRCB to request a dispatch interrupt directly from
+// the interrupt exception handler.
+//
+
+ stl sp, PbQuantumEnd(t5) // set quantum end indicator
+
+ ldil a0, DISPATCH_INTERRUPT // a0 = request sfw interrupt
+ stl a0, PbSoftwareInterrupts(t5) // request a sfw interrupt
+
+60:
+ ret zero, (ra) // return
+
+70:
+ bis zero, zero, t7
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+41: br zero, 40b // retry spin lock
+
+#endif
+
+ .end KeUpdateRunTime
+
+
+//++
+//
+// VOID
+// KeProfileInterruptWithSource (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE ProfileSource
+// )
+//
+// VOID
+// KeProfileInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// profile timer. Its function is to update the profile information for
+// the currently active profile objects.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// ProfileSource (a1) - Supplies the source of the profile interrupt
+// KeProfileInterrupt is an alternate entry for backwards
+// compatibility that sets the source to zero (ProfileTime)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+PfS0: .space 8 // saved integer register s0
+PfRa: .space 8 // return address
+ .space 2 * 8 // profile frame length
+ProfileFrameLength:
+
+ NESTED_ENTRY(KeProfileInterrupt, ProfileFrameLength, zero)
+
+ bis zero, zero, a1 // set profile source to ProfileTime
+
+ ALTERNATE_ENTRY(KeProfileInterruptWithSource)
+
+ lda sp, -ProfileFrameLength(sp) // allocate stack frame
+ stq ra, PfRa(sp) // save return address
+
+#if !defined(NT_UP)
+
+ stq s0, PfS0(sp) // save integer register s0
+
+#endif
+
+ PROLOGUE_END
+
+
+#if !defined(NT_UP)
+
+ lda s0, KiProfileLock // get address of profile lock
+10: ldl_l t0, 0(s0) // get current lock value
+ bis s0, zero, t1 // set ownership value
+ bne t0, 15f // if ne, spin lock owned
+ stl_c t1, 0(s0) // set spin lock owned
+ beq t1, 15f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+#endif
+
+ GET_CURRENT_THREAD // v0 = current thread address
+ ldl a2, ThApcState + AsProcess(v0) // get address of current process
+ addl a2, PrProfileListHead, a2 // compute profile listhead addr
+ bsr ra, KiProcessProfileList // process profile list
+
+ lda a2, KiProfileListHead // get profile listhead address
+ bsr ra, KiProcessProfileList // process profile list
+
+#if !defined(NT_UP)
+
+ mb // insure all previous writes go
+ // before the lock is released
+ stl zero, 0(s0) // set spin lock not owned
+ ldq s0, PfS0(sp) // restore s0
+
+#endif
+
+ ldq ra, PfRa(sp) // restore return address
+ lda sp, ProfileFrameLength(sp) // deallocate stack frame
+
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+15: ldl t0, 0(s0) // get current lock value
+ beq t0, 10b // lock available. retry spinlock
+ br zero, 15b // spin in cache until lock ready
+
+#endif
+
+
+ .end KeProfileInterrupt
+
+
+//++
+//
+// VOID
+// KiProcessProfileList (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE Source,
+// IN PLIST_ENTRY ListHead
+// )
+//
+// Routine Description:
+//
+// This routine is called to process a profile list.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Source (a1) - Supplies profile source to match
+//
+// ListHead (a2) - Supplies a pointer to a profile list.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiProcessProfileList)
+
+ ldl a3, LsFlink(a2) // get address of next entry
+ cmpeq a2, a3, t0 // end of list ?
+ bne t0, 30f // if ne[true], end of list
+ ldl t0, TrFir(a0) // get interrupt PC address
+
+ GET_PROCESSOR_CONTROL_REGION_BASE // get pcr base
+ ldl t6, PcSetMember(v0) // get processor member
+
+//
+// Scan profile list and increment profile buckets as appropriate.
+//
+
+10: ldl t1, PfRangeBase - PfProfileListEntry(a3) // get base of range
+ ldl t2, PfRangeLimit - PfProfileListEntry(a3) // get limit of range
+ ldl t4, PfSource - PfProfileListEntry(a3) // get source
+ ldl t7, PfAffinity - PfProfileListEntry(a3) // get affinity
+ zapnot t4, 3, t4 // source is a SHORT
+ cmpeq t4, a1, t5 // check against profile source
+ and t7, t6, v0 // check against processor
+ beq t5, 20f // if ne, profile source doesn't match
+ beq v0, 20f // if ne, processor doesn't match
+ cmpult t0, t1, v0 // check against range base
+ cmpult t0, t2, t3 // check against range limit
+ bne v0, 20f // if ne, less than range base
+ beq t3, 20f // if eq, not less than range limit
+ subl t0, t1, t1 // compute offset in range
+ ldl t2, PfBucketShift - PfProfileListEntry(a3) // get shift count
+ ldl v0, PfBuffer - PfProfileListEntry(a3) // prof buffer addr
+ zap t1, 0xf0, t1 // force bucket offset to 32bit unit
+ srl t1, t2, t3 // compute bucket offset
+ bic t3, 0x3, t3 // clear low order offset bits
+ addl v0, t3, t3 // compute bucket address
+ ldl v0, 0(t3) // increment profile bucket
+ addl v0, 1, v0 //
+ stl v0, 0(t3) //
+20: ldl a3, LsFlink(a3) // get address of next entry
+ cmpeq a2, a3, t1 // end of list?
+ beq t1, 10b // if eq[false], more entries
+
+30: ret zero, (ra) // return
+
+ .end KiProcessProfileList
diff --git a/private/ntos/ke/alpha/ctxsw.s b/private/ntos/ke/alpha/ctxsw.s
new file mode 100644
index 000000000..1dd9e0fa6
--- /dev/null
+++ b/private/ntos/ke/alpha/ctxsw.s
@@ -0,0 +1,1457 @@
+
+
+// TITLE("Context Swap")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// ctxsw.s
+//
+// Abstract:
+//
+// This module implements the ALPHA machine dependent code necessary to
+// field the dispatch interrupt and to perform kernel initiated context
+// switching.
+//
+// Author:
+//
+// David N. Cutler (davec) 1-Apr-1991
+// Joe Notarangelo 05-Jun-1992
+//
+// Environment:
+//
+// Kernel mode only, IRQL DISPATCH_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+// #define _COLLECT_SWITCH_DATA_ 1
+
+
+ SBTTL("Switch To Thread ")
+// NTSTATUS
+// KiSwitchToThread (
+// IN PKTHREAD NextThread
+// IN ULONG WaitReason,
+// IN ULONG WaitMode,
+// IN PKEVENT WaitObject
+// )
+//
+// Routine Description:
+//
+// This function performs an optimal switch to the specified target thread
+// if possible. No timeout is associated with the wait, thus the issuing
+// thread will wait until the wait event is signaled or an APC is deliverd.
+//
+// N.B. This routine is called with the dispatcher database locked.
+//
+// N.B. The wait IRQL is assumed to be set for the current thread and the
+// wait status is assumed to be set for the target thread.
+//
+// N.B. It is assumed that if a queue is associated with the target thread,
+// then the concurrency count has been incremented.
+//
+// N.B. Control is returned from this function with the dispatcher database
+// unlocked.
+//
+// Arguments:
+//
+// NextThread - Supplies a pointer to a dispatcher object of type thread.
+//
+// WaitReason - supplies the reason for the wait operation.
+//
+// WaitMode - Supplies the processor wait mode.
+//
+// WaitObject - Supplies a pointer to a dispatcher object of type event
+// or semaphore.
+//
+// Return Value:
+//
+// The wait completion status. A value of STATUS_SUCCESS is returned if
+// the specified object satisfied the wait. A value of STATUS_USER_APC is
+// returned if the wait was aborted to deliver a user APC to the current
+// thread.
+//--
+
+ NESTED_ENTRY(KiSwitchToThread, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate context frame
+ stq ra, ExIntRa(sp) // save return address
+
+ stq s0, ExIntS0(sp) // save non-volatile integer registers
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+ stq fp, ExIntFp(sp) //
+
+ PROLOGUE_END
+//
+// Save the wait reason, the wait mode, and the wait object address.
+//
+// N.B. - Fill fields in the exception frame are used to save the
+// client event address and the wait mode
+//
+ stl a1, ExPsr + 4(sp) // save wait reason
+ stl a2, ExPsr + 8(sp) // save wait mode
+ stl a3, ExPsr +12(sp) // save wait object address
+
+//
+// If the target thread's kernel stack is resident, the target thread's
+// process is in the balance set, the target thread can can run on the
+// current processor, and another thread has not already been selected
+// to run on the current processor, then do a direct dispatch to the
+// target thread bypassing all the general wait logic, thread priorities
+// permiting.
+//
+
+ ldl s4, ThApcState + AsProcess(a0) // get target process address
+ ldq_u t0, ThKernelStackResident(a0) // get kernel stack resident
+ extbl t0, ThKernelStackResident % 8, t7 // extract byte field
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // get address of PRCB
+ bis v0, zero, s0 // save PRCB in s0
+ ldq_u t1, PrState(s4) // get target process state
+ extbl t1, PrState % 8, t8 // extract byte field
+ ldl s1, PbCurrentThread(v0) // get current thread address
+ beq t7, LongWay // if eq, kernel stack not resident
+ xor t8, ProcessInMemory, t6 // check if process in memory
+ bis a0, zero, s2 // set target thread address
+ bne t6, LongWay // if ne, process not in memory
+
+#if !defined(NT_UP)
+
+ ldl t0,PbNextThread(s0) // get address of next thread
+ ldl t1,PbSetMember(s0) // get processor set member
+ ldl t2,ThAffinity(s2) // get target thread affinity
+ bne t0, LongWay // if ne, next thread selected
+ and t1,t2,t3 // check for compatible affinity
+ beq t3, LongWay // if eq, affinity not compatible
+
+#endif
+
+//
+// Compute the new thread priority.
+//
+// N.B. This code takes advantage of the fact that ThPriorityDecrement and
+// ThBasePriority are contained in the same dword of the KTHREAD object.
+//
+
+#if ((ThBasePriority / 4) != (ThPriorityDecrement / 4))
+#error "ThBasePriority and ThPriorityDecrement have moved"
+#endif
+
+ ldq_u t12, ThPriority(s1) // get client thread priority
+ extbl t12, ThPriority % 8, t4 // extract byte field
+ ldq_u t11, ThPriority(s2) // get server thread priority
+ extbl t11, ThPriority % 8, t5 // extract byte field
+ cmpult t4, LOW_REALTIME_PRIORITY, v0 // check if realtime client
+ cmpult t5, LOW_REALTIME_PRIORITY, t10 // check if realtime server
+ beq v0, 60f // if eq, realtime client
+ ldq_u t9, ThPriorityDecrement(s2) // get priority decrement value
+ extbl t9, ThPriorityDecrement % 8, t6 // extract priority decrement byte
+ extbl t9, ThBasePriority % 8, t7 // extract base priority byte
+ beq t10, 65f // if eq, realtime server
+ addq t7, 1, t8 // compute boosted priority
+ bne t6, 30f // if ne, server boost active
+
+//
+// Both the client and the server are not realtime and a priority boost
+// is not currently active for the server. Under these conditions an
+// optimal switch to the server can be performed if the base priority
+// of the server is above a minimum threshold or the boosted priority
+// of the server is not less than the client priority.
+//
+ cmpult t8, t4, v0 // check if high enough boost
+ cmpult t8, LOW_REALTIME_PRIORITY, t10 // check if less than realtime
+ lda t12, ThPriority(s2) // get address of thread priority byte
+ bne v0, 20f // if ne, boosted priority less
+ mskbl t11, t12, t11 // clear priority byte
+ cmoveq t10,LOW_REALTIME_PRIORITY-1,t8 // set maximum server priority
+ insbl t8, t12, t10 // get priority byte into position
+ bis t10, t11, t11 // merge
+ bic t12, 3, t12 // get longword address
+ extll t11, t12, t9 // extract stored longword
+ stl t9, 0(t12) // store new priority
+ br zero, 70f
+
+//
+// The boosted priority of the server is less than the current priority of
+// the client. If the server base priority is above the required threshold,
+// then a optimal switch to the server can be performed by temporarily
+// raising the priority of the server to that of the client.
+//
+//
+// N.B. This code takes advantage of the fact that ThPriorityDecrement,
+// ThBasePriority, ThDecrementCount, and ThQuantum are contained in
+// the same dword of the KTHREAD object.
+//
+
+#if ((ThBasePriority / 4) != (ThPriorityDecrement / 4))
+#error "ThBasePriority and ThPriorityDecrement have moved"
+#endif
+#if ((ThBasePriority / 4) != (ThDecrementCount / 4))
+#error "ThBasePriority and ThDecrementCount have moved"
+#endif
+#if ((ThBasePriority / 4) != (ThQuantum / 4))
+#error "ThBasePriority and ThQuantum have moved"
+#endif
+
+20:
+ cmpult t7, BASE_PRIORITY_THRESHOLD, v0 // check if above threshold
+ subq t4, t7, t11 // compute priority decrement value
+ bne v0, LongWay // if ne[TRUE], priority below threshold
+ lda t10, ROUND_TRIP_DECREMENT_COUNT(zero) // get system decrement
+ mskbl t9, ThPriorityDecrement % 8, t9 // zero ThPriorityDecrement in source
+ mskbl t9, ThDecrementCount % 8, t9 // zero ThDecrementCount in source
+ insbl t11, ThPriorityDecrement % 8, t11 // extract new priority decrement byte
+ insbl t10, ThDecrementCount % 8, t10 // extract new DecrementCount
+ bis t9, t11, t9 // merge previous and priority decrement
+ bis t9, t10, t9 // merge ThDecrementCount
+ lda t12, ThBasePriority(s2) // get address to store result
+ bic t12, 3, t12 // make longword address
+ extll t9, t12, t10 // extract stored longword
+ stl t10, 0(t12) // store updated values
+ StoreByte(t4, ThPriority(s2)) //
+ br zero, 70f //
+
+//
+// A server boost has previously been applied to the server thread. Count
+// down the decrement count to determine if another optimal server switch
+// is allowed.
+//
+//
+// N.B. This code takes advantage of the fact that ThPriorityDecrement and
+// ThDecrementCount are contained in the same dword of the KTHREAD object.
+//
+
+#if ((ThDecrementCount / 4) != (ThPriorityDecrement / 4))
+#error "ThDecrementCount and ThPriorityDecrement have moved"
+#endif
+
+30:
+ extbl t9, ThDecrementCount % 8, a5 // get original count
+ lda t12, ThDecrementCount(s2) //
+ mskbl t9, t12, t11 // clear count byte
+ subq a5, 1, a5 // decrement original count
+ insbl a5, t12, a5 // get new count into position
+ bic t12, 3, t12 // get the longword address
+ bis t11, a5, t11 // merge in new count
+ extll t11, t12, t11 // get the longword to store
+ stl t11, 0(t12) // store updated count
+ beq a5, 40f // optimal switches exhausted
+
+//
+// Another optimal switch to the server is allowed provided that the
+// server priority is not less than the client priority.
+//
+
+ cmpult t5, t4, v0 // check if server lower priority
+ beq v0, 70f // if eq[FALSE], server not lower
+ br zero, LongWay //
+
+//
+// The server has exhausted the number of times an optimal switch may
+// be performed without reducing it priority. Reduce the priority of
+// the server to its original unboosted value minus one.
+//
+
+40:
+ StoreByte( zero, ThPriorityDecrement(s2) ) // clear server priority decr
+ StoreByte( t7, ThPriority(s2) ) // set server priority to base
+ br zero, LongWay //
+
+//
+// The client is realtime. In order for an optimal switch to occur, the
+// server must also be realtime and run at a high or equal priority.
+//
+
+60:
+ cmpult t5, t4, v0 // check if server is lower priority
+ bne v0, LongWay // if ne, server is lower priority
+65:
+ ldq_u t12, PrThreadQuantum(s4)
+ extbl t12, PrThreadQuantum % 8, t11 // get process quantum value
+ StoreByte( t11, ThQuantum(s2) ) // set server thread quantum
+
+//
+// An optimal switch to the server can be executed.
+//
+
+//
+// Set the next processor for the server thread.
+//
+70:
+#if !defined(NT_UP)
+ ldl t1, PbNumber(s0) // set server next processor number
+ StoreByte(t1, ThNextProcessor(s2))
+#endif
+
+//
+// Set the address of the wait block list in the client thread, complete
+// the initialization of the builtin event wait block, and insert the wait
+// block in client event wait list.
+//
+
+ lda t3, EVENT_WAIT_BLOCK_OFFSET(s1) // compute wait block address
+ stl t3, ThWaitBlockList(s1) // set address of wait block list
+ stl zero, ThWaitStatus(s1) // set initial wait status
+ stl a3, WbObject(t3) // set address of wait object
+ stl t3, WbNextWaitBlock(t3) // set next wait block address
+ ldah t1, WaitAny(zero) // get wait type and wait key
+ stl t1, WbWaitKey(t3) // set wait type and wait key
+ lda t2, EvWaitListHead(a3) // compute event wait listhead address
+ ldl t5, LsBlink(t2) // get backward link of listhead
+ lda t6, WbWaitListEntry(t3) // compute wait block list entry address
+ stl t6, LsBlink(t2) // set backward link of listhead
+ stl t6, LsFlink(t5) // set forward link in last entry
+ stl t2, LsFlink(t6) // set forward link in wait entry
+ stl t5, LsBlink(t6) // set backward link in wait entry
+
+//
+// Set the client thread wait parameters, set the thread state to Waiting,
+// and insert the thread in the wait list.
+//
+ StoreByte( zero, ThAlertable(s1) ) // set alertable FALSE
+ StoreByte( a1, ThWaitReason(s1) )
+ StoreByte( a2, ThWaitMode(s1) ) // set the wait mode
+ ldq_u t7, ThEnableStackSwap(s1) // get kernel stack swap enable
+ extbl t7, ThEnableStackSwap % 8, a3
+ ldl t6, KeTickCount // get low part of tick count
+ stl t6, ThWaitTime(s1) // set thread wait time
+ ldil t3, Waiting // set thread state
+ StoreByte( t3, ThState(s1) ) //
+ lda t1, KiWaitInListHead // get address of wait in listhead
+ beq a2, 75f // if eq, wait mode is kernel
+ beq a3, 75f // if eq, kernel stack swap disabled
+ cmpult t4, LOW_REALTIME_PRIORITY + 9, v0 // check if priority in range
+ bne v0, 76f // if ne, thread priority in range
+75: lda t1, KiWaitOutListHead // get address of wait out listhead
+76: ldl t5, LsBlink(t1) // get backlink of wait listhead
+ lda t6, ThWaitListEntry(s1) // compute client wait list entry addr
+ stl t6, LsBlink(t1) // set backward link of listhead
+ stl t6, LsFlink(t5) // set forward link in last entry
+ stl t1, LsFlink(t6) // set forward link in wait entry
+ stl t5, LsBlink(t6) // set backward link in wait entry
+
+//
+// If the current thread is processing a queue entry, then attempt to
+// activate another thread that is blocked on the queue object.
+//
+// N.B. The next thread address can change if the routine to activate
+// a queue waiter is called.
+//
+
+77: ldl a0, ThQueue(s1) // get queue object address
+ beq a0, 78f // if eq, no queue object attached
+ stl s2, PbNextThread(s0)
+ bsr ra, KiActivateWaiterQueue // attempt to activate a blocked thread
+ ldl s2, PbNextThread(s0) // get next thread address
+ stl zero, PbNextThread(s0) // set next thread address to NULL
+78: stl s2, PbCurrentThread(s0) // set address of current thread object
+ bsr ra, SwapContext // swap context
+
+//
+// On return from SwapContext, s2 is pointer to thread object.
+//
+ ldq_u v0, ThWaitIrql(s2)
+ extbl v0, ThWaitIrql % 8, a0 // get original Irql
+ ldl t0, ThWaitStatus(s2) // get wait completion status
+
+//
+// Lower IRQL to its previous level.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+
+ SWAP_IRQL // v0 = previous Irql
+
+//
+// If the wait was not interrupted to deliver a kernel APC, then return the
+// completion status.
+//
+
+ bis t0, zero, v0 // v0 = wait completion status
+ xor t0, STATUS_KERNEL_APC, t1 // check if awakened for kernel APC
+ bne t1, 90f // if ne, normal wait completion
+
+//
+// Raise IRQL to synchronization level and acquire the dispatcher database lock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ ldl a0, KiSynchIrql // get new IRQL level
+ SWAP_IRQL // v0 = previous Irql
+
+ StoreByte( v0, ThWaitIrql(s2) ) // set client wait Irql
+
+//
+// Acquire the dispatcher database lock.
+//
+
+#if !defined(NT_UP)
+
+ lda t2, KiDispatcherLock // get current lock value address
+80:
+ ldl_l t3, 0(t2) // get current lock value
+ bis s2, zero, t4 // set ownership value
+ bne t3, 85f // if ne, spin lock owned
+ stl_c t4, 0(t2) // set spin lock owned
+ beq t4, 85f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the lock is acquired
+
+#endif
+
+ ldl t1, ExPsr + 4(sp) // restore client event address
+ ldl t2, ExPsr + 8(sp) // restore wait mode
+ br zero, ContinueWait //
+
+//
+// Ready the target thread for execution and wait on the specified wait
+// object.
+//
+LongWay:
+ bsr ra, KiReadyThread // ready thread for execution
+
+//
+// Continue and return the wait completion status.
+//
+// N.B. The wait continuation routine is called with the dispatcher
+// database locked.
+//
+
+ContinueWait:
+ ldl a0, ExPsr+12(sp) // get wait object address
+ ldl a1, ExPsr+4(sp) // get wait reason
+ ldl a2, ExPsr+8(sp) // get wait mode
+ bsr ra, KiContinueClientWait // continue client wait
+90:
+ ldq s0, ExIntS0(sp) // restore registers s0 - fp
+ ldq s1, ExIntS1(sp) //
+ ldq s2, ExIntS2(sp) //
+ ldq s3, ExIntS3(sp) //
+ ldq s4, ExIntS4(sp) //
+ ldq s5, ExIntS5(sp) //
+ ldq fp, ExIntFp(sp) //
+ ldq ra, ExIntRa(sp) // restore return address
+ lda sp, ExceptionFrameLength(sp) // deallocate context frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+85:
+ bis v0, zero, a0 // lower back down to old IRQL
+ SWAP_IRQL
+86:
+ ldl t3, 0(t2) // read current lock value
+ bne t3, 86b // loop in cache until lock available
+ ldl a0, KiSynchIrql // raise back to sync level to retry acquire
+ SWAP_IRQL // restore old IRQL to v0
+ br zero, 80b // retry spinlock acquisition
+
+#endif //NT_UP
+
+
+ .end KiSwitchToThread
+
+ SBTTL("Unlock Dispatcher Database")
+//++
+//
+// VOID
+// KiUnlockDispatcherDatabase (
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database locked. Ifs function is to either unlock the dispatcher
+// database and return or initiate a context switch if another thread
+// has been selected for execution.
+//
+// N.B. A context switch CANNOT be initiated if the previous IRQL
+// is DISPATCH_LEVEL.
+//
+// N.B. This routine is carefully written to be a leaf function. If,
+// however, a context swap should be performed, the routine is
+// switched to a nested fucntion.
+//
+// Arguments:
+//
+// OldIrql (a0) - Supplies the IRQL when the dispatcher database
+// lock was acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KiUnlockDispatcherDatabase)
+
+//
+// Check if a thread has been scheduled to execute on the current processor
+//
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = PRCB
+ cmpult a0, DISPATCH_LEVEL, t1 // check if IRQL below dispatch level
+ ldl t2, PbNextThread(v0) // get next thread address
+ bne t2, 30f // if ne, next thread selected
+
+//
+// Release dispatcher database lock, restore IRQL to its previous level
+// and return
+//
+10:
+
+#if !defined(NT_UP)
+
+ mb
+ stl zero, KiDispatcherLock
+#endif
+ SWAP_IRQL
+ ret zero, (ra)
+
+//
+// A new thread has been selected to run on the current processor, but
+// the new IRQL is not below dispatch level. If the current processor is
+// not executing a DPC, then request a dispatch interrupt on the current
+// processor before releasing the dispatcher lock and restoring IRQL.
+//
+20:
+ ldl t2, PbDpcRoutineActive(v0)
+ bne t2,10b // if eq, DPC active
+
+#if !defined(NT_UP)
+
+ mb
+ stl zero, KiDispatcherLock
+#endif
+ SWAP_IRQL
+
+ ldil a0, DISPATCH_LEVEL // set interrupt request level
+
+ REQUEST_SOFTWARE_INTERRUPT // request DPC interrupt
+
+ ret zero, (ra)
+
+//
+// A new thread has been selected to run on the current processor.
+//
+// If the new IRQL is less than dispatch level, then switch to the new
+// thread.
+//
+30: beq t1, 20b // if eq, not below dispatch level
+
+ .end KiUnlockDispatcherDatabase
+
+//
+// N.B. This routine is carefully written as a nested function.
+// Control only reaches this routine from above.
+//
+// v0 contains the address of PRCB
+// t2 contains the next thread
+//
+ NESTED_ENTRY(KxUnlockDispatcherDatabase, ExceptionFrameLength, zero)
+ lda sp, -ExceptionFrameLength(sp) // allocate context frame
+ stq ra, ExIntRa(sp) // save return address
+ stq s0, ExIntS0(sp) // save integer registers
+ stq s1, ExIntS1(sp)
+ stq s2, ExIntS2(sp)
+ stq s3, ExIntS3(sp)
+ stq s4, ExIntS4(sp)
+ stq s5, ExIntS5(sp)
+ stq fp, ExIntFp(sp)
+ PROLOGUE_END
+
+ bis v0, zero, s0 // set address of PRCB
+ GET_CURRENT_THREAD // get current thread address
+ bis v0, zero, s1
+ bis t2, zero, s2 // set next thread address
+ StoreByte(a0, ThWaitIrql(s1)) // save previous IRQL
+ stl zero, PbNextThread(s0) // clear next thread address
+
+//
+// Reready current thread for execution and swap context to the selected thread.
+//
+// N.B. The return from the call to swap context is directly to the swap
+// thread exit.
+//
+ bis s1, zero, a0 // set address of previous thread object
+ stl s2, PbCurrentThread(s0) // set address of current thread object
+ bsr ra, KiReadyThread // reready thread for execution
+ lda ra, KiSwapThreadExit // set return address
+ jmp SwapContext // swap context
+
+ .end KxUnlockDispatcherDatabase
+
+
+ SBTTL("Swap Thread")
+//++
+//
+// VOID
+// KiSwapThread (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to select the next thread to run on the
+// current processor and to perform a context switch to the thread.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Wait completion status (v0).
+//
+//--
+ NESTED_ENTRY(KiSwapThread, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate context frame
+ stq ra, ExIntRa(sp) // save return address
+ stq s0, ExIntS0(sp) // save integer registers s0 - s5
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+ stq fp, ExIntFp(sp) // save fp
+
+ PROLOGUE_END
+
+ GET_PROCESSOR_CONTROL_REGION_BASE //
+ bis v0, zero, s3 // get PCR in s3
+ ldl s0, PcPrcb(s3) // get address of PRCB
+ ldl s5, KiReadySummary // get ready summary in s5
+ zapnot s5, 0x0f, t0 // clear high 32 bits.
+ GET_CURRENT_THREAD
+ bis v0, zero, s1 // get current thread address
+ ldl s2, PbNextThread(s0) // get next thread address
+
+#if !defined(NT_UP)
+
+ ldl fp, PcSetMember(s3) // get processor affinity mask
+
+#endif
+ stl zero, PbNextThread(s0) // zero next thread address
+ bne s2, 120f // if ne, next thread selected
+
+//
+// Find the highest nibble in the ready summary that contains a set bit
+// and left justify so the nibble is in bits <63:60>.
+//
+ cmpbge zero, t0, s4 // generate 4-bit mask with clear
+ // bits representing nonzero bytes.
+ ldil t2, 7 // initial bit number
+
+ srl s4, 1, t5 // check bits <15:8>
+ cmovlbc t5, 15, t2 // if bit clear, bit number = 15
+
+ srl s4, 2, t6 // check bits <23:16>
+ cmovlbc t6, 23, t2 // if bit clear, bit number = 23
+
+ srl s4, 3, t7 // check bits <31:24>
+ cmovlbc t7, 31, t2 // if bit clear, bit number = 31
+
+ bic t2, 7, t3 // get byte shift from priority
+ srl t0, t3, s4 // isolate highest nonzero byte
+ and s4, 0xf0, t4 // check if high nibble nonzero
+ subq t2, 4, t1 // compute bit number if high nibble zero
+ cmoveq t4, t1, t2 // if eq, high nibble zero
+
+10:
+ ornot zero, t2, t4 // compute left justify shift count
+ sll t0, t4, t0 // left justify ready summary to nibble
+
+//
+// If the next bit is set in the ready summary, then scan the corresponding
+// dispatcher ready queue.
+//
+
+30:
+ blt t0, 50f // if ltz, queue contains an entry
+31:
+ sll t0, 1, t0 // position next ready summary bit
+ subq t2, 1, t2 // decrement ready queue priority
+ bne t0, 30b // if ne, more queues to scan
+
+//
+// All ready queues were scanned without finding a runnable thread so
+// default to the idle thread and set the appropirate bit in idle summary.
+//
+#if defined(_COLLECT_SWITCH_DATA_)
+ lda t0, KeThreadSwitchCounters // get switch counters address
+ ldl v0, TwSwitchToIdle(t0) // increment switch to idle
+ addq v0, 1, v0 //
+ stl v0, TwSwitchToIdle(t0) //
+#endif
+
+#if defined(NT_UP)
+ ldil t0, 1 // get current idle summary
+#else
+ ldl t0, KiIdleSummary // get current idle summary
+ bis t0, fp, t0 // set member bit in idle summary
+#endif
+ stl t0, KiIdleSummary // set new idle summary
+ ldl s2, PbIdleThread(s0) // set address of idle thread
+
+ br zero, 120f // swap context
+
+50:
+ lda t1, KiDispatcherReadyListHead // get ready list head base address
+ s8addq t2, t1, s4 // compute ready queue address
+ ldl t4, LsFlink(s4) // get address of next queue entry
+55:
+ subq t4, ThWaitListEntry, s2 // compute address of thread object
+
+#if !defined(NT_UP)
+
+//
+// If the thread can execute on the current processor, then remove it from
+// the dispatcher ready queue.
+//
+ ldl t5, ThAffinity(s2) // get thread affinity
+ and t5, fp, t6 // the current processor
+ bne t6, 60f // if ne, thread affinity compatible
+ ldl t4, LsFlink(t4) // get address of next entry
+ cmpeq t4, s4, t1 // check for end of list
+ beq t1, 55b // if eq, not end of list
+ br zero, 31b //
+
+60:
+//
+// If the thread last ran on the current processor, the processor is the
+// ideal processor for the thread, the thread has been waiting for longer
+// than a quantum, ot its priority is greater than low realtime plus 9,
+// then select the thread. Otherwise, an attempt is made to find a more
+// appropriate candidate.
+//
+ ldq_u t1, PcNumber(s3) // get current processor number
+ extbl t1, PcNumber % 8, t12 //
+ ldq_u t11, ThNextProcessor(s2) // get thread's last processor number
+ extbl t11, ThNextProcessor % 8, t9 //
+ cmpeq t9, t12, t5 // check thread's last processor
+ bne t5, 110f // if eq, last processor match
+ ldq_u t6, ThIdealProcessor(s2) // get thread's ideal processor number
+ extbl t6, ThIdealProcessor % 8, a3 //
+ cmpeq a3, t12, t8 // check thread's ideal processor
+ bne t8, 100f // if eq, ideal processor match
+ ldl t6, KeTickCount // get low part of tick count
+ ldl t7, ThWaitTime(s2) // get time of thread ready
+ subq t6, t7, t8 // compute length of wait
+ cmpult t8, READY_SKIP_QUANTUM+1, t1 // check if wait time exceeded
+ cmpult t2, LOW_REALTIME_PRIORITY+9, t3 // check if priority in range
+ and t1, t3, v0 // check if priority and time match
+ beq v0, 100f // if eq, select this thread
+
+//
+// Search forward in the ready queue until the end of the list is reached
+// or a more appropriate thread is found.
+//
+
+ ldl t7, LsFlink(t4) // get address of next entry
+80: cmpeq t7, s4, t1 // if eq, end of list
+ bne t1, 100f // select original thread
+ subq t7, ThWaitListEntry, a0 // compute address of thread object
+ ldl a2, ThAffinity(a0) // get thread affinity
+ and a2, fp, t1 // check for compatibile thread affinity
+ beq t1, 85f // if eq, thread affinity not compatible
+ ldq_u t5, ThNextProcessor(a0) // get last processor number
+ extbl t5, ThNextProcessor % 8, t9 //
+ cmpeq t9, t12, t10 // if eq, processor number match
+ bne t10, 90f //
+ ldq_u a1, ThIdealProcessor(a0) // get ideal processor number
+ extbl a1, ThIdealProcessor % 8, a3
+ cmpeq a3, t12, t10 // if eq, ideal processor match
+ bne t10, 90f
+85: ldl t8, ThWaitTime(a0) // get time of thread ready
+ ldl t7, LsFlink(t7) // get address of next entry
+ subq t6, t8, t8 // compute length of wait
+ cmpult t8, READY_SKIP_QUANTUM+1, t5 //
+ bne t5, 80b // if ne, wait time not exceeded
+ br zero, 100f // select original thread
+
+90: bis a0, zero, s2 // set thread address
+ bis t7, zero, t4 // set list entry address
+ bis t5, zero, t11 // copy last processor data
+100: insbl t12, ThNextProcessor % 8, t8 // move next processor into position
+ mskbl t11, ThNextProcessor % 8, t5 // mask next processor position
+ bis t8, t5, t6 // merge
+ stq_u t6, ThNextProcessor(s2) // update next processor
+
+110:
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+ ldq_u t5, ThNextProcessor(s2) // get last processor number
+ extbl t5, ThNextProcessor % 8, t9 //
+ ldq_u a1, ThIdealProcessor(s2) // get ideal processor number
+ extbl a1, ThIdealProcessor % 8, a3
+ lda t0, KeThreadSwitchCounters + TwFindAny // compute address of Any counter
+ addq t0, TwFindIdeal-TwFindAny, t1 // compute address of Ideal counter
+ cmpeq t9, t12, t7 // if eq, last processor match
+ addq t0, TwFindLast-TwFindAny, t6 // compute address of Last counter
+ cmpeq a3, t12, t5 // if eq, ideal processor match
+ cmovne t7, t6, t0 // if last match, use last counter
+ cmovne t5, t1, t0 // if ideal match, use ideal counter
+ ldl v0, 0(t0) // increment counter
+ addq v0, 1, v0 //
+ stl v0, 0(t0) //
+
+#endif
+
+#endif
+
+ ldl t5, LsFlink(t4) // get list entry forward link
+ ldl t6, LsBlink(t4) // get list entry backward link
+ stl t5, LsFlink(t6) // set forward link in previous entry
+ stl t6, LsBlink(t5) // set backward link in next entry
+ cmpeq t6, t5, t7 // if eq, list is empty
+ beq t7, 120f //
+ ldil t1, 1 // compute ready summary set member
+ sll t1, t2, t1 //
+ xor t1, s5, t1 // clear member bit in ready summary
+ stl t1, KiReadySummary //
+//
+// Swap context to the next thread
+//
+
+120:
+ stl s2, PbCurrentThread(s0) // set address of current thread object
+ bsr ra, SwapContext // swap context
+
+ ALTERNATE_ENTRY(KiSwapThreadExit)
+
+//
+// Lower IRQL, deallocate context frame, and return wait completion status.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. The register v0 contains the complement of the kernel APC pending state.
+//
+// N.B. The register s2 contains the address of the new thread.
+//
+ ldl s1, ThWaitStatus(s2) // get wait completion status
+ ldq_u t1, ThWaitIrql(s2) // get original IRQL
+ extbl t1, ThWaitIrql % 8, a0 //
+ bis v0, a0, t3 // check if APC pending and IRQL is zero
+ bne t3, 10f
+//
+// Lower IRQL to APC level and dispatch APC interrupt.
+//
+ ldil a0, APC_LEVEL
+ SWAP_IRQL
+ ldil a0, APC_LEVEL
+ DEASSERT_SOFTWARE_INTERRUPT
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // get PRCB in v0
+ ldl t1, PbApcBypassCount(v0) // increment the APC bypass count
+ addl t1, 1, t2
+ stl t2, PbApcBypassCount(v0) // store result
+ bis zero, zero, a0 // set previous mode to kernel
+ bis zero, zero, a1 // set exception frame address
+ bis zero, zero, a2 // set trap frame address
+ bsr ra, KiDeliverApc // deliver kernel mode APC
+ bis zero, zero, a0 // set original wait IRQL
+
+//
+// Lower IRQL to wait level, set return status, restore registers, and
+// return.
+//
+10:
+ SWAP_IRQL
+
+ bis s1, zero, v0
+
+ ldq ra, ExIntRa(sp) // restore return address
+ ldq s0, ExIntS0(sp) // restore int regs S0-S5
+ ldq s1, ExIntS1(sp) //
+ ldq s2, ExIntS2(sp) //
+ ldq s3, ExIntS3(sp) //
+ ldq s4, ExIntS4(sp) //
+ ldq s5, ExIntS5(sp) //
+ ldq fp, ExIntFp(sp) // restore fp
+
+ lda sp, ExceptionFrameLength(sp) // deallocate context frame
+ ret zero, (ra) // return
+
+
+98:
+ subq t2, 1, t2 // decrement ready queue priority
+ subq s4, 8, s4 // advance to next ready queue
+ sll t0, 1, t0 // position next ready summary bit
+ bne t0, 40b // if ne, more queues to scan
+
+
+ .end KiSwapThread
+
+
+
+ SBTTL("Dispatch Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at DISPATCH_LEVEL. Its function is to process the Deferred Procedure Call
+// (DPC) list, and then perform a context switch if a new thread has been
+// selected for execution on the processor.
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database unlocked. When a return to the caller finally occurs, the
+// IRQL remains at DISPATCH_LEVEL, and the dispatcher database is still
+// unlocked.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved. The volatile floating point registers have not been saved.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to the base of a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ .struct 0
+DpSp: .space 8 // saved stack pointer
+DpBs: .space 8 // base of previous stack
+DpcFrameLength: // DPC frame length
+
+ NESTED_ENTRY(KiDispatchInterrupt, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate context frame
+ stq ra, ExIntRa(sp) // save return address
+//
+// Save the saved registers in case we context switch to a new thread.
+//
+// N.B. - If we don't context switch then we need only restore those
+// registers that we use in this routine, currently those registers
+// are s0, s1
+//
+
+ stq s0, ExIntS0(sp) // save integer registers s0-s6
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+ stq fp, ExIntFp(sp) //
+
+ PROLOGUE_END
+
+//
+// Increment the dispatch interrupt count
+//
+ GET_PROCESSOR_CONTROL_BLOCK_BASE //
+ bis v0, zero, s0 // s0 = base address of PRCB
+ ldl t2, PbDispatchInterruptCount(s0) // get old dispatch interrupt count
+ addl t2, 1, t3 // increment dispatch interrupt count
+ stl t3, PbDispatchInterruptCount(s0) // set new dispatch interrupt count
+
+//
+// Process the DPC List with interrupts off.
+//
+ ldl t0, PbDpcQueueDepth(s0) // get current queue depth
+ beq t0, 20f // no DPCs, check quantum end
+
+PollDpcList:
+ DISABLE_INTERRUPTS
+
+//
+// Save current initial stack address and set new initial stack address.
+//
+ GET_PROCESSOR_CONTROL_REGION_BASE // v0 = PCR address
+
+ ldl a0, PcDpcStack(v0) // get address of DPC stack
+ lda t0, -DpcFrameLength(a0) // allocate DPC frame
+ stq sp, DpSp(t0) // save old stack pointer
+ bis t0, t0, sp // set new stack pointer
+ SET_INITIAL_KERNEL_STACK // a = new, v0 = previous
+ stq v0, DpBs(sp) // save current initial stack
+
+ bsr ra, KiRetireDpcList // process the DPC list
+
+//
+// Switch back to previous stack and restore the initial stack limit.
+//
+ ldq a0, DpBs(sp) // get previous initial stack address
+ SET_INITIAL_KERNEL_STACK // set current initial stack
+ ldq sp, DpSp(sp) // restore stack pointer
+
+ ENABLE_INTERRUPTS
+
+//
+// Check to determine if quantum end has occured.
+//
+20:
+ ldl t0, PbQuantumEnd(s0) // get quantum end indicator
+ beq t0, 25f // if eq, no quantum end request
+ stl zero, PbQuantumEnd(s0) // clear quantum end indicator
+ bsr ra, KiQuantumEnd // process quantum end request
+ beq v0, 50f // if eq, no next thread, return
+ bis v0, zero, s2 // set next thread
+ br zero, 40f // else restore interrupts and return
+
+//
+// Determine if a new thread has been selected for execution on
+// this processor.
+//
+
+25: ldl v0, PbNextThread(s0) // get address of next thread object
+ beq v0, 50f // if eq, no new thread selected
+
+//
+// Lock dispatcher database and reread address of next thread object
+// since it is possible for it to change in mp sysytem
+//
+
+#if !defined(NT_UP)
+
+ lda s1, KiDispatcherLock // get dispatcher base lock address
+#endif
+30:
+ ldl a0, KiSynchIrql
+ SWAP_IRQL
+
+#if !defined(NT_UP)
+ ldl_l t0, 0(s1) // get current lock value
+ bis s1, zero, t1 // t1 = lock ownership value
+ bne t0, 45f // ne => spin lock owned
+ stl_c t1, 0(s1) // set lock to owned
+ beq t1, 45f // zero => stl_c failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+#endif
+
+//
+// Reready current thread for execution and swap context to the selected thread.
+//
+ ldl s2, PbNextThread(s0) // get addr of next thread
+40:
+ GET_CURRENT_THREAD // v0 = address of current thread
+ bis v0, zero, s1 // s1 = address of current thread
+
+ stl zero, PbNextThread(s0) // clear address of next thread
+ bis s1, zero, a0 // parameter to KiReadyThread
+ stl s2, PbCurrentThread(s0) // set address of current thread
+ bsr ra, KiReadyThread // reready thread for execution
+ bsr ra, KiSaveVolatileFloatState
+ bsr ra, SwapContext // swap context
+
+//
+// Restore the saved integer registers that were changed for a context
+// switch only.
+//
+// N.B. - The frame pointer must be restored before the volatile floating
+// state because it is the pointer to the trap frame.
+//
+
+ ldq s2, ExIntS2(sp) // restore s2 - s5
+ ldq s3, ExIntS3(sp) //
+ ldq s4, ExIntS4(sp) //
+ ldq s5, ExIntS5(sp) //
+ ldq fp, ExIntFp(sp) // restore the frame pointer
+ bsr ra, KiRestoreVolatileFloatState
+
+//
+// Restore the remaining saved integer registers and return.
+//
+
+50:
+ ldq s0, ExIntS0(sp) // restore s0 - s1
+ ldq s1, ExIntS1(sp) //
+
+ ldq ra, ExIntRa(sp) // get return address
+ lda sp, ExceptionFrameLength(sp) // deallocate context frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+45:
+//
+// Dispatcher lock is owned, spin on both the the dispatcher lock and
+// the DPC queue going not empty.
+//
+ bis v0, zero, a0 // lower back to original IRQL to wait for locks
+ SWAP_IRQL
+48:
+ ldl t0, 0(s1) // read current dispatcher lock value
+ beq t0, 30b // lock available. retry spinlock
+ ldl t1, PbDpcQueueDepth(s0) // get current DPC queue depth
+ bne t1, PollDpcList // if nez, list not empty
+
+ br zero, 48b // loop in cache until lock available
+
+
+#endif
+
+ .end KiDispatchInterrupt
+
+ SBTTL("Swap Context to Next Thread")
+//++
+//
+// Routine Description:
+//
+// This routine is called to swap context from one thread to the next.
+//
+// Arguments:
+//
+// s0 - Address of Processor Control Block (PRCB).
+// s1 - Address of previous thread object.
+// s2 - Address of next thread object.
+// sp - Pointer to a exception frame.
+//
+// Return value:
+//
+// v0 - complement of Kernel APC pending.
+// s2 - Address of current thread object.
+//
+//--
+
+ NESTED_ENTRY(SwapContext, 0, zero)
+
+ stq ra, ExSwapReturn(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Set new thread's state to running. Note this must be done
+// under the dispatcher lock so that KiSetPriorityThread sees
+// the correct state.
+//
+ ldil t0, Running // set state of new thread to running
+ StoreByte( t0, ThState(s2) ) //
+
+#if !defined(NT_UP)
+//
+// Acquire the context swap lock so the address space of the old thread
+// cannot be deleted and then release the dispatcher database lock.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+ lda t0, KiContextSwapLock // get context swap lock value address
+10:
+ ldl_l t1, 0(t0) // get current lock value
+ bis t0, zero, t2 // set ownership value
+ bne t1, 11f // if ne, lock already owned
+ stl_c t2, 0(t0) // set lock ownership value
+ beq t2, 11f // if eq, store conditional failed
+ mb // synchronize reads and writes
+ stl zero, KiDispatcherLock // set lock not owned
+
+#endif
+
+#if defined(PERF_DATA)
+
+//
+// Accumulate the total time spent in a thread.
+//
+ bis zero,zero,a0 // optional frequency not required
+ bsr ra, KeQueryPerformanceCounter // 64-bit cycle count in v0
+ ldq t0, PbStartCount(s0) // get starting cycle count
+ stq v0, PbStartCount(s0) // set starting cycle count
+
+ ldl t1, EtPerformanceCountHigh(s1) // get accumulated cycle count high
+ sll t1, 32, t2
+ ldl t3, EtPerformanceCountLow(s1) // get accumulated cycle count low
+ zap t3, 0xf0, t4 // zero out high dword sign extension
+ bis t2, t4, t3
+
+ subq v0, t0, t5 // compute elapsed cycle count
+ addq t5, t3, t4 // compute new cycle count
+
+ stl t4, EtPerformanceCountLow(s1) // set new cycle count in thread
+ srl t4, 32, t2
+ stl t2, EtPerformanceCountHigh(s1)
+
+#endif
+
+
+ bsr ra, KiSaveNonVolatileFloatState // save nv floating state
+
+ ALTERNATE_ENTRY(SwapFromIdle)
+
+//
+// Get address of old and new process objects.
+//
+
+ ldl s5, ThApcState + AsProcess(s1) // get address of old process
+ ldl s4, ThApcState + AsProcess(s2) // get address of new process
+
+
+//
+// Save the current PSR in the context frame, store the kernel stack pointer
+// in the previous thread object, load the new kernel stack pointer from the
+// new thread object, load the ptes for the new kernel stack in the DTB
+// stack, select and new process id and swap to the new process, and restore
+// the previous PSR from the context frame.
+//
+
+ DISABLE_INTERRUPTS // disable interrupts
+ // v0 = current psr
+
+ ldl a0, ThInitialStack(s2) // get initial kernel stack pointer
+ stl sp, ThKernelStack(s1) // save old kernel stack pointer
+
+ bis s2, zero, a1 // new thread address
+ ldl a2, ThTeb(s2) // get address of user TEB
+
+#ifdef NT_UP
+
+//
+// On uni-processor systems keep the global current thread address
+// up to date.
+//
+ stl a1, KiCurrentThread // save new current thread
+
+#endif //NT_UP
+
+
+//
+// If the old process is the same as the new process, then there is no need
+// to change the address space. The a3 parameter indicates that the address
+// space is not to be swapped if it is less than zero. Otherwise, a3 will
+// contain the pfn of the PDR for the new address space.
+//
+
+ ldil a3, -1 // assume no address space change
+ bis zero, zero, a4 // assume ASN = 0
+ bis zero, 1, a5 // assume ASN wrap
+ bis zero, zero, t3 // show MAX ASN=0
+ cmpeq s5, s4, t0 // old process = new process?
+ bne t0, 40f // if ne[true], no address space swap
+
+#if !defined(NT_UP)
+//
+// Update the processor set masks. Clear the processor set member
+// number in the old process and set the processor member number in the
+// new process.
+//
+ GET_PROCESSOR_CONTROL_REGION_BASE // get PCR pointer in v0
+ ldl t0, PcSetMember(v0) // get processor set mask
+ ldl t1, PrActiveProcessors(s5) // get old active processor set
+ ldl t2, PrActiveProcessors(s4) // get new active processor set
+ bic t1, t0, t3 // clear processor member in set
+ bis t2, t0, t4 // set processor member in set
+ stl t3, PrActiveProcessors(s5) // set old active processor set
+ stl t4, PrActiveProcessors(s4) // set new active processor set
+
+#endif
+ ldl a3, PrDirectoryTableBase(s4) // get page directory PDE
+ srl a3, PTE_PFN, a3 // pass pfn only
+
+//
+// If the maximum address space number is zero, then we know to assign
+// ASN of zero to this process, just do it.
+//
+ ldl t3, KiMaximumPid // get MAX ASN
+ beq t3, 40f // if eq, only ASN=0
+//
+// If the process sequence number matches the master sequence number then
+// use the process ASN. Otherwise, allocate a new ASN. When allocating
+// a new ASN check for ASN wrapping and handle it.
+//
+ bis zero, zero, a5 // assume tbiap = FALSE
+ GET_PROCESSOR_CONTROL_REGION_BASE
+
+ ldl t4, PcCurrentPid(v0) // get current processor PID
+ addl t4, 1, a4 // increment PID
+ cmpule a4, t3, t6 // is new PID le max?
+ cmoveq t6, t3, a5 // if eq[false], set tbiap indicator
+ cmoveq t6, zero, a4 // if eq[false], new PID is zero
+
+ stl a4, PcCurrentPid(v0) // set current processor PID
+40:
+//
+// Release the context swap lock, swap context, and enable interrupts
+//
+
+#if !defined(NT_UP)
+ mb // synchronize all previous writes
+ // before releasing the spinlock
+ stl zero, KiContextSwapLock // set spin lock not owned
+
+#endif
+ // a0 = initial ksp of new thread
+ // a1 = new thread address
+ // a2 = new TEB
+ // a3 = PDR of new address space or -1
+ // a4 = new ASN
+ // a5 = ASN wrap indicator
+ SWAP_THREAD_CONTEXT // swap thread
+
+ ldl sp, ThKernelStack(s2) // get new kernel stack pointer
+
+
+ ENABLE_INTERRUPTS // turn on interrupts
+
+//
+// If the new thread has a kernel mode APC pending, then request an
+// APC interrupt.
+//
+
+ ldil v0, 1 // set no apc pending
+ LoadByte(t0, ThApcState + AsKernelApcPending(s2)) // get kernel APC pendng
+ ldl t2, ExPsr(sp) // get previous processor status
+ beq t0, 50f // if eq no apc pending
+
+ ldil a0, APC_INTERRUPT // request an apc interrupt
+ REQUEST_SOFTWARE_INTERRUPT //
+ bis zero, zero, v0 // set APC pending
+50:
+
+//
+// Count number of context switches
+//
+ ldl t1, PbContextSwitches(s0) // increment number of switches
+ addl t1, 1, t1 //
+ stl t1, PbContextSwitches(s0) // store result
+ ldl t0, ThContextSwitches(s2) // increment number of context
+ addq t0, 1, t0 // switches for thread
+ stl t0, ThContextSwitches(s2) // store result
+
+//
+// Restore the nonvolatile floating state.
+//
+
+ bsr ra, KiRestoreNonVolatileFloatState
+
+//
+// load RA and return with address of current thread in s2
+//
+
+ ldq ra, ExSwapReturn(sp) // get return address
+ ret zero, (ra) // return
+
+11:
+ ldl t1, 0(t0) // spin in cache until lock looks free
+ beq t1, 10b
+ br zero, 11b // retry
+
+ .end SwapContext
+
+
+
+
+ SBTTL("Swap Process")
+//++
+//
+// BOOLEAN
+// KiSwapProcess (
+// IN PKPROCESS NewProcess
+// IN PKPROCESS OldProcess
+// )
+//
+// Routine Description:
+//
+// This function swaps the address space from one process to another by
+// assigning a new ASN if necessary and calling the palcode to swap
+// the privileged portion of the process context (the page directory
+// base pointer and the ASN). This function also maintains the processor
+// set for both processes in the switch.
+//
+// Arguments:
+//
+// NewProcess (a0) - Supplies a pointer to a control object of type process
+// which represents the new process to switch to.
+//
+// OldProcess (a1) - Supplies a pointer to a control object of type process
+// which represents the old process to switch from..
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSwapProcess)
+
+//
+// Acquire the context swap lock, clear the processor set member in he old
+// process, set the processor member in the new process, and release the
+// context swap lock.
+//
+ GET_PROCESSOR_CONTROL_REGION_BASE // get PCR pointer in v0
+
+#if !defined(NT_UP)
+ lda t7, KiContextSwapLock // get context swap lock address
+10:
+ ldl_l t0, 0(t7) // get current lock value
+ bis t7, zero, t1 // set ownership value
+ bne t0, 15f // if ne, lock already owned
+ stl_c t1, 0(t7) // set lock ownership value
+ beq t1, 15f // if eq, store conditional failed
+ mb // synchronize subsequent reads
+
+ ldl t0, PcSetMember(v0) // get processor set mask
+ ldl t1, PrActiveProcessors(a1) // get old active processor set
+ ldl t2, PrActiveProcessors(a0) // get new active processor set
+ bic t1, t0, t1 // clear processor member in set
+ bis t2, t0, t2 // set processor member in set
+ stl t1, PrActiveProcessors(a1) // set old active processor set
+ stl t2, PrActiveProcessors(a0) // set new active processor set
+
+ mb // synchronize subsequent writes
+ stl zero, 0(t7) // clear lock value
+#endif
+
+//
+// If the maximum address space number is zero, then we know to assign
+// ASN of zero to this process, just do it.
+//
+ bis zero, zero, a1 // assume ASN = 0
+ ldil a2, TRUE // assume tbiap = TRUE
+
+ ldl t3, KiMaximumPid // get MAX ASN
+ beq t3, 30f // if eq, only ASN=0
+//
+// If the process sequence number matches the master sequence number then
+// use the process ASN. Otherwise, allocate a new ASN. When allocating
+// a new ASN check for ASN wrapping and handle it.
+//
+
+ ldl t4, PcCurrentPid(v0) // get current processor PID
+ addl t4, 1, a1 // increment PID
+ cmpule a1, t3, t6 // is new PID le max?
+ cmovne t6, zero, a2 // if ne[true], clear tbiap indicator
+ cmoveq t6, zero, a1 // if eq[false], new PID is zero
+
+ stl a1, PcCurrentPid(v0) // set current processor PID
+
+30:
+
+ ldl a0, PrDirectoryTableBase(a0) // get page directory PDE
+ srl a0, PTE_PFN, a0 // pass pfn only
+
+ bis a2, zero, v0 // set wrap indicator return value
+
+ // a0 = pfn of new page directory base
+ // a1 = new address space number
+ // a2 = tbiap indicator
+ SWAP_PROCESS_CONTEXT // swap address space
+
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+15:
+ ldl t0, 0(t7) // spin in cache until lock looks free
+ beq t0, 10b // lock is unowned, retry acquisition
+ br zero, 15b
+#endif
+
+ .end KiSwapProcess
+
diff --git a/private/ntos/ke/alpha/dmpstate.c b/private/ntos/ke/alpha/dmpstate.c
new file mode 100644
index 000000000..90da7ac61
--- /dev/null
+++ b/private/ntos/ke/alpha/dmpstate.c
@@ -0,0 +1,574 @@
+/*++
+
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ dmpstate.c
+
+Abstract:
+
+ This module implements the architecture specific routine that dumps
+ the machine state when a bug check occurs and no debugger is hooked
+ to the system. It is assumed that it is called from bug check.
+
+Author:
+
+ David N. Cutler (davec) 17-Jan-1992
+
+Environment:
+
+ Kernel mode.
+
+Revision History:
+
+ Joe Notarangelo 04-Feb-1992 Alpha-adaptation
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ );
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ );
+
+VOID
+KiMachineCheck (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+//
+// Define external data.
+//
+
+extern LIST_ENTRY PsLoadedModuleList;
+
+VOID
+KeDumpMachineState (
+ IN PKPROCESSOR_STATE ProcessorState,
+ IN PCHAR Buffer,
+ IN PULONG BugCheckParameters,
+ IN ULONG NumberOfParameters,
+ IN PKE_BUGCHECK_UNICODE_TO_ANSI UnicodeToAnsiRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function formats and displays the machine state at the time of the
+ to bug check.
+
+Arguments:
+
+ ProcessorState - Supplies a pointer to a processor state record.
+
+ Buffer - Supplies a pointer to a buffer to be used to output machine
+ state information.
+
+ BugCheckParameters - Supplies additional bugcheck information
+
+ NumberOfParameters - sizeof BugCheckParameters array
+
+ UnicodeToAnsiRoutine - Supplies a pointer to a routine to convert Unicode strings
+ to Ansi strings without touching paged translation tables.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PCONTEXT ContextRecord;
+ ULONG ControlPc;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ UNICODE_STRING DllName;
+ FRAME_POINTERS EstablisherFrame;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PVOID ImageBase;
+ ULONG Index;
+ BOOLEAN InFunction;
+ ULONG LastStack;
+ ULONG NextPc;
+ ULONG StackLimit;
+ UCHAR AnsiBuffer[ 32 ];
+
+ //
+ // Virtually unwind to the caller of bug check.
+ //
+
+ ContextRecord = &ProcessorState->ContextFrame;
+ LastStack = (ULONG)ContextRecord->IntSp;
+ ControlPc = (ULONG)ContextRecord->IntRa - 4;
+ NextPc = ControlPc;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if (FunctionEntry != NULL) {
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ }
+
+ //
+ // At this point the context record contains the machine state at the
+ // call to bug check.
+ //
+ // Put out the system version and the title line with the PSR and FSR.
+ //
+
+ sprintf(Buffer,
+ "\nMicrosoft Windows NT [0x%08x]\n", NtBuildNumber);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "Machine State at Call to Bug Check PC : %08lX PSR : %08lX\n\n",
+ ContextRecord->IntRa,
+ ContextRecord->Psr);
+
+ HalDisplayString(Buffer);
+
+#ifdef DUMP_INTEGER_STATE
+
+ //
+ // Format and output the integer registers.
+ //
+
+ sprintf(Buffer,
+ "V0 : 0x%016Lx T0 : 0x%016Lx T1 : 0x%016Lx\n",
+ ContextRecord->IntV0,
+ ContextRecord->IntT0,
+ ContextRecord->IntT1);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T2 : 0x%016Lx T3 : 0x%016Lx T4 : 0x%016Lx\n",
+ ContextRecord->IntT2,
+ ContextRecord->IntT3,
+ ContextRecord->IntT4);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T5 : 0x%016Lx T6 : 0x%016Lx T7 : 0x%016Lx\n",
+ ContextRecord->IntT5,
+ ContextRecord->IntT6,
+ ContextRecord->IntT7);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S0 : 0x%016Lx S1 : 0x%016Lx S2 : 0x%016Lx\n",
+ ContextRecord->IntS0,
+ ContextRecord->IntS1,
+ ContextRecord->IntS2);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S3 : 0x%016Lx S4 : 0x%016Lx S5 : 0x%016Lx\n",
+ ContextRecord->IntS3,
+ ContextRecord->IntS4,
+ ContextRecord->IntS5);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "Fp : 0x%016Lx A0 : 0x%016Lx A1 : 0x%016Lx\n",
+ ContextRecord->IntFp,
+ ContextRecord->IntA0,
+ ContextRecord->IntA1);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "A2 : 0x%016Lx A3 : 0x%016Lx A4 : 0x%016Lx\n",
+ ContextRecord->IntA2,
+ ContextRecord->IntA3,
+ ContextRecord->IntA4);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "A5 : 0x%016Lx T8 : 0x%016Lx T9 : 0x%016Lx\n",
+ ContextRecord->IntA5,
+ ContextRecord->IntT8,
+ ContextRecord->IntT9);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T10: 0x%016Lx T11: 0x%016Lx T12: 0x%016Lx\n",
+ ContextRecord->IntT10,
+ ContextRecord->IntT11,
+ ContextRecord->IntT12);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "At : 0x%016Lx Gp : 0x%016Lx Sp : 0x%016Lx\n",
+ ContextRecord->IntAt,
+ ContextRecord->IntGp,
+ ContextRecord->IntSp);
+
+ HalDisplayString(Buffer);
+
+#endif //DUMP_INTEGER_STATE
+
+#ifdef DUMP_FLOATING_STATE
+
+ //
+ // Format and output the floating registers.
+ //
+
+ sprintf(Buffer,
+ "F0 : 0x%016Lx F1 : 0x%016Lx F2 : 0x%016Lx\n",
+ ContextRecord->FltF0,
+ ContextRecord->FltF1,
+ ContextRecord->FltF2);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F3 : 0x%016Lx F4 : 0x%016Lx F5 : 0x%016Lx\n",
+ ContextRecord->FltF3,
+ ContextRecord->FltF4,
+ ContextRecord->FltF5);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F6 : 0x%016Lx F7 : 0x%016Lx F8 : 0x%016Lx\n",
+ ContextRecord->FltF6,
+ ContextRecord->FltF7,
+ ContextRecord->FltF8);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F9 : 0x%016Lx F10: 0x%016Lx F11: 0x%016Lx\n",
+ ContextRecord->FltF9,
+ ContextRecord->FltF10,
+ ContextRecord->FltF11);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F12: 0x%016Lx F13: 0x%016Lx F14: 0x%016Lx\n",
+ ContextRecord->FltF12,
+ ContextRecord->FltF13,
+ ContextRecord->FltF14);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F15: 0x%016Lx F16: 0x%016Lx F17: 0x%016Lx\n",
+ ContextRecord->FltF15,
+ ContextRecord->FltF16,
+ ContextRecord->FltF17);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F18: 0x%016Lx F19: 0x%016Lx F20: 0x%016Lx\n",
+ ContextRecord->FltF18,
+ ContextRecord->FltF19,
+ ContextRecord->FltF20);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F21: 0x%016Lx F22: 0x%016Lx F23: 0x%016Lx\n",
+ ContextRecord->FltF21,
+ ContextRecord->FltF22,
+ ContextRecord->FltF23);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F24: 0x%016Lx F25: 0x%016Lx F26: 0x%016Lx\n",
+ ContextRecord->FltF24,
+ ContextRecord->FltF25,
+ ContextRecord->FltF26);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F27: 0x%016Lx F28: 0x%016Lx F29: 0x%016Lx\n",
+ ContextRecord->FltF27,
+ ContextRecord->FltF28,
+ ContextRecord->FltF29);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F30: 0x%016Lx F31: 0x%016Lx\n",
+ ContextRecord->FltF30,
+ ContextRecord->FltF31);
+
+ HalDisplayString(Buffer);
+
+#endif //DUMP_FLOATING_STATE
+
+ //
+ // Output short stack back trace with base address.
+ //
+
+ DllName.Length = 0;
+ DllName.Buffer = L"";
+ if (FunctionEntry != NULL) {
+ StackLimit =(ULONG)KeGetCurrentThread()->KernelStack;
+ HalDisplayString("Callee-Sp Return-Ra Dll Base - Name\n");
+ for (Index = 0; Index < 8; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ sprintf(Buffer,
+ " %08lX %08lX : %08lX - %s\n",
+ ContextRecord->IntSp,
+ NextPc + 4,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( (ImageBase != NULL) ? &DataTableEntry->BaseDllName : &DllName,
+ AnsiBuffer, sizeof( AnsiBuffer )));
+
+
+
+ HalDisplayString(Buffer);
+ if ((NextPc != ControlPc) || (ContextRecord->IntSp != LastStack)) {
+ ControlPc = NextPc;
+ LastStack = (ULONG)ContextRecord->IntSp;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if ((FunctionEntry != NULL) && (LastStack < StackLimit)) {
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ } else {
+ NextPc = (ULONG)ContextRecord->IntRa;
+ }
+
+ } else {
+ break;
+ }
+ }
+ }
+
+ //
+ // Output other useful information.
+ //
+
+ sprintf(Buffer,
+ "\nIRQL : %d, DPC Active : %s\n",
+ KeGetCurrentIrql(),
+ KeIsExecutingDpc() ? "TRUE" : "FALSE");
+
+ HalDisplayString(Buffer);
+ return;
+}
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches the currently active function tables for an entry
+ that corresponds to the specified PC value.
+
+Arguments:
+
+ ControlPc - Supplies the address of an instruction within the specified
+ function.
+
+Return Value:
+
+ If there is no entry in the function table for the specified PC, then
+ NULL is returned. Otherwise, the address of the function table entry
+ that corresponds to the specified PC is returned.
+
+--*/
+
+{
+
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PRUNTIME_FUNCTION FunctionTable;
+ ULONG SizeOfExceptionTable;
+ LONG High;
+ PVOID ImageBase;
+ LONG Low;
+ LONG Middle;
+ USHORT i;
+
+ //
+ // Search for the image that includes the specified PC value.
+ //
+
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ //
+ // If an image is found that includes the specified PC, then locate the
+ // function table for the image.
+ //
+
+ if (ImageBase != NULL) {
+ FunctionTable = (PRUNTIME_FUNCTION)RtlImageDirectoryEntryToData(
+ ImageBase, TRUE, IMAGE_DIRECTORY_ENTRY_EXCEPTION,
+ &SizeOfExceptionTable);
+
+ //
+ // If a function table is located, then search the function table
+ // for a function table entry for the specified PC.
+ //
+
+ if (FunctionTable != NULL) {
+
+ //
+ // Initialize search indicies.
+ //
+
+ Low = 0;
+ High = (SizeOfExceptionTable / sizeof(RUNTIME_FUNCTION)) - 1;
+
+ //
+ // Perform binary search on the function table for a function table
+ // entry that subsumes the specified PC.
+ //
+
+ while (High >= Low) {
+
+ //
+ // Compute next probe index and test entry. If the specified PC
+ // is greater than of equal to the beginning address and less
+ // than the ending address of the function table entry, then
+ // return the address of the function table entry. Otherwise,
+ // continue the search.
+ //
+
+ Middle = (Low + High) >> 1;
+ FunctionEntry = &FunctionTable[Middle];
+ if (ControlPc < FunctionEntry->BeginAddress) {
+ High = Middle - 1;
+
+ } else if (ControlPc >= FunctionEntry->EndAddress) {
+ Low = Middle + 1;
+
+ } else {
+ return FunctionEntry;
+ }
+ }
+ }
+ }
+
+ //
+ // A function table entry for the specified PC was not found.
+ //
+
+ return NULL;
+}
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base of an image that contains the
+ specified PcValue. An image contains the PcValue if the PcValue
+ is within the ImageBase, and the ImageBase plus the size of the
+ virtual image.
+
+Arguments:
+
+ PcValue - Supplies a PcValue.
+
+ BaseOfImage - Returns the base address for the image containing the
+ PcValue. This value must be added to any relative addresses in
+ the headers to locate portions of the image.
+
+ DataTableEntry - Suppies a pointer to a variable that receives the
+ address of the data table entry that describes the image.
+
+Return Value:
+
+ NULL - No image was found that contains the PcValue.
+
+ NON-NULL - Returns the base address of the image that contain the
+ PcValue.
+
+--*/
+
+{
+
+ PLIST_ENTRY ModuleListHead;
+ PLDR_DATA_TABLE_ENTRY Entry;
+ PLIST_ENTRY Next;
+ ULONG Bounds;
+ PVOID ReturnBase, Base;
+
+ //
+ // If the module list has been initialized, then scan the list to
+ // locate the appropriate entry.
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ ReturnBase = NULL;
+ Next = ModuleListHead->Flink;
+ if (Next != NULL) {
+ while (Next != ModuleListHead) {
+ Entry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ Base = Entry->DllBase;
+ Bounds = (ULONG)Base + Entry->SizeOfImage;
+ if ((ULONG)PcValue >= (ULONG)Base && (ULONG)PcValue < Bounds) {
+ *DataTableEntry = Entry;
+ ReturnBase = Base;
+ break;
+ }
+ }
+ }
+
+ *BaseOfImage = ReturnBase;
+ return ReturnBase;
+}
diff --git a/private/ntos/ke/alpha/exceptn.c b/private/ntos/ke/alpha/exceptn.c
new file mode 100644
index 000000000..79ff6dda9
--- /dev/null
+++ b/private/ntos/ke/alpha/exceptn.c
@@ -0,0 +1,1088 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993, 1994 Digital Equipment Corporation
+
+Module Name:
+
+ exceptn.c
+
+Abstract:
+
+ This module implements the code necessary to dispatch exceptions to the
+ proper mode and invoke the exception dispatcher.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Thomas Van Baak (tvb) 12-May-1992
+
+ Adapted for Alpha AXP.
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiMachineCheck (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+//
+// Alpha data misalignment exception (auto alignment fixup) control.
+//
+// If KiEnableAlignmentFaultExceptions is false, then no alignment
+// exceptions are raised and all misaligned user and kernel mode data
+// references are emulated. This is consistent with NT/Alpha version
+// 3.1 behavior.
+//
+// Otherwise if KiEnableAlignmentFaultExceptions is true, then the
+// current thread automatic alignment fixup enable determines whether
+// emulation is attempted in user mode. This is consistent with NT/Mips
+// behavior.
+//
+// N.B. This default value may be reset from the Registry during init.
+//
+
+ULONG KiEnableAlignmentFaultExceptions = FALSE;
+
+//
+// In addition, for NT 3.1 to 3.5 transition, the following controls enable
+// kernel and user mode auto alignment for quadword loads and stores and
+// are relevant only when KiEnableAlignmentFaultExceptions is true.
+//
+// N.B. These default values may be reset from the Registry during init.
+//
+
+ULONG KiForceQuadwordFixupsKernel = FALSE;
+ULONG KiForceQuadwordFixupsUser = FALSE;
+
+//
+// Alpha byte/word emulation exception control.
+//
+// if KiEnableByteWordInstructionEmulation is false, then an 'illegal
+// instruction' exception is raised when a byte or word instruction
+// execution is attempted.
+//
+// if KiEnableByteWordInstructionEmulation ois true, then the byte or
+// word instruction will be emulated.
+//
+// N.B. This default value may be reset from the Registry during init.
+//
+
+ULONG KiEnableByteWordInstructionEmulation = FALSE;
+
+#if DBG
+
+//
+// Alignment fixups are counted by mode and reported at intervals.
+//
+// N.B. Set masks to 0 to see every exception (set to 0x7 to see every
+// 8th, etc.).
+//
+
+ULONG KiKernelFixupCount = 0;
+ULONG KiKernelFixupMask = 0x7f;
+
+ULONG KiUserFixupCount = 0;
+ULONG KiUserFixupMask = 0x3ff;
+
+#endif
+
+VOID
+KeContextFromKframes (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PCONTEXT ContextFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified trap and exception
+ frames into the specified context frame according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame from which context
+ should be copied into the context record.
+
+ ContextFrame - Supplies a pointer to the context frame that receives the
+ context copied from the trap and exception frames.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, ra, sp, FIR, and PSR from trap frame.
+ //
+
+ ContextFrame->IntGp = TrapFrame->IntGp;
+ ContextFrame->IntSp = TrapFrame->IntSp;
+ ContextFrame->IntRa = TrapFrame->IntRa;
+ ContextFrame->Fir = TrapFrame->Fir;
+ ContextFrame->Psr = TrapFrame->Psr;
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set volatile integer registers v0 and t0 - t7 from trap frame.
+ //
+
+ ContextFrame->IntV0 = TrapFrame->IntV0;
+ ContextFrame->IntT0 = TrapFrame->IntT0;
+ ContextFrame->IntT1 = TrapFrame->IntT1;
+ ContextFrame->IntT2 = TrapFrame->IntT2;
+ ContextFrame->IntT3 = TrapFrame->IntT3;
+ ContextFrame->IntT4 = TrapFrame->IntT4;
+ ContextFrame->IntT5 = TrapFrame->IntT5;
+ ContextFrame->IntT6 = TrapFrame->IntT6;
+ ContextFrame->IntT7 = TrapFrame->IntT7;
+
+ //
+ // Set nonvolatile integer registers s0 - s5 from exception frame.
+ //
+
+ ContextFrame->IntS0 = ExceptionFrame->IntS0;
+ ContextFrame->IntS1 = ExceptionFrame->IntS1;
+ ContextFrame->IntS2 = ExceptionFrame->IntS2;
+ ContextFrame->IntS3 = ExceptionFrame->IntS3;
+ ContextFrame->IntS4 = ExceptionFrame->IntS4;
+ ContextFrame->IntS5 = ExceptionFrame->IntS5;
+
+ //
+ // Set volatile integer registers a0 - a5, and t8 - t11 from trap
+ // frame.
+ //
+
+ ContextFrame->IntA0 = TrapFrame->IntA0;
+ ContextFrame->IntA1 = TrapFrame->IntA1;
+ ContextFrame->IntA2 = TrapFrame->IntA2;
+ ContextFrame->IntA3 = TrapFrame->IntA3;
+ ContextFrame->IntA4 = TrapFrame->IntA4;
+ ContextFrame->IntA5 = TrapFrame->IntA5;
+
+ ContextFrame->IntT8 = TrapFrame->IntT8;
+ ContextFrame->IntT9 = TrapFrame->IntT9;
+ ContextFrame->IntT10 = TrapFrame->IntT10;
+ ContextFrame->IntT11 = TrapFrame->IntT11;
+
+ //
+ // Set volatile integer registers fp, t12 and at from trap frame.
+ // Set integer register zero.
+ //
+
+ ContextFrame->IntFp = TrapFrame->IntFp;
+ ContextFrame->IntT12 = TrapFrame->IntT12;
+ ContextFrame->IntAt = TrapFrame->IntAt;
+ ContextFrame->IntZero = 0;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set volatile floating registers f0 - f1 from trap frame.
+ // Set volatile floating registers f10 - f30 from trap frame.
+ // Set floating zero register f31 to 0.
+ //
+
+ ContextFrame->FltF0 = TrapFrame->FltF0;
+ ContextFrame->FltF1 = TrapFrame->FltF1;
+ RtlMoveMemory(&ContextFrame->FltF10, &TrapFrame->FltF10,
+ sizeof(ULONGLONG) * 21);
+ ContextFrame->FltF31 = 0;
+
+ //
+ // Set nonvolatile floating registers f2 - f9 from exception frame.
+ //
+
+ ContextFrame->FltF2 = ExceptionFrame->FltF2;
+ ContextFrame->FltF3 = ExceptionFrame->FltF3;
+ ContextFrame->FltF4 = ExceptionFrame->FltF4;
+ ContextFrame->FltF5 = ExceptionFrame->FltF5;
+ ContextFrame->FltF6 = ExceptionFrame->FltF6;
+ ContextFrame->FltF7 = ExceptionFrame->FltF7;
+ ContextFrame->FltF8 = ExceptionFrame->FltF8;
+ ContextFrame->FltF9 = ExceptionFrame->FltF9;
+
+ //
+ // Set floating point control register from trap frame.
+ // Clear software floating point control register in context frame
+ // (if necessary, it can be set to the proper value by the caller).
+ //
+
+ ContextFrame->Fpcr = TrapFrame->Fpcr;
+ ContextFrame->SoftFpcr = 0;
+ }
+
+ return;
+}
+
+VOID
+KeContextToKframes (
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN PCONTEXT ContextFrame,
+ IN ULONG ContextFlags,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified context frame into
+ the specified trap and exception frames according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that receives the volatile
+ context from the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that receives
+ the nonvolatile context from the context record.
+
+ ContextFrame - Supplies a pointer to a context frame that contains the
+ context that is to be copied into the trap and exception frames.
+
+ ContextFlags - Supplies the set of flags that specify which parts of the
+ context frame are to be copied into the trap and exception frames.
+
+ PreviousMode - Supplies the processor mode for which the trap and exception
+ frames are being built.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, sp, ra, FIR, and PSR in trap frame.
+ //
+
+ TrapFrame->IntGp = ContextFrame->IntGp;
+ TrapFrame->IntSp = ContextFrame->IntSp;
+ TrapFrame->IntRa = ContextFrame->IntRa;
+ TrapFrame->Fir = ContextFrame->Fir;
+ TrapFrame->Psr = SANITIZE_PSR(ContextFrame->Psr, PreviousMode);
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set volatile integer registers v0 and t0 - t7 in trap frame.
+ //
+
+ TrapFrame->IntV0 = ContextFrame->IntV0;
+ TrapFrame->IntT0 = ContextFrame->IntT0;
+ TrapFrame->IntT1 = ContextFrame->IntT1;
+ TrapFrame->IntT2 = ContextFrame->IntT2;
+ TrapFrame->IntT3 = ContextFrame->IntT3;
+ TrapFrame->IntT4 = ContextFrame->IntT4;
+ TrapFrame->IntT5 = ContextFrame->IntT5;
+ TrapFrame->IntT6 = ContextFrame->IntT6;
+ TrapFrame->IntT7 = ContextFrame->IntT7;
+
+ //
+ // Set nonvolatile integer registers s0 - s5 in exception frame.
+ //
+
+ ExceptionFrame->IntS0 = ContextFrame->IntS0;
+ ExceptionFrame->IntS1 = ContextFrame->IntS1;
+ ExceptionFrame->IntS2 = ContextFrame->IntS2;
+ ExceptionFrame->IntS3 = ContextFrame->IntS3;
+ ExceptionFrame->IntS4 = ContextFrame->IntS4;
+ ExceptionFrame->IntS5 = ContextFrame->IntS5;
+
+ //
+ // Set volatile integer registers a0 - a5, and t8 - t11 in trap frame.
+ //
+
+ TrapFrame->IntA0 = ContextFrame->IntA0;
+ TrapFrame->IntA1 = ContextFrame->IntA1;
+ TrapFrame->IntA2 = ContextFrame->IntA2;
+ TrapFrame->IntA3 = ContextFrame->IntA3;
+ TrapFrame->IntA4 = ContextFrame->IntA4;
+ TrapFrame->IntA5 = ContextFrame->IntA5;
+
+ TrapFrame->IntT8 = ContextFrame->IntT8;
+ TrapFrame->IntT9 = ContextFrame->IntT9;
+ TrapFrame->IntT10 = ContextFrame->IntT10;
+ TrapFrame->IntT11 = ContextFrame->IntT11;
+
+ //
+ // Set volatile integer registers fp, t12 and at in trap frame.
+ //
+
+ TrapFrame->IntFp = ContextFrame->IntFp;
+ TrapFrame->IntT12 = ContextFrame->IntT12;
+ TrapFrame->IntAt = ContextFrame->IntAt;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set volatile floating registers f0 - f1 in trap frame.
+ // Set volatile floating registers f10 - f30 in trap frame.
+ //
+
+ TrapFrame->FltF0 = ContextFrame->FltF0;
+ TrapFrame->FltF1 = ContextFrame->FltF1;
+ RtlMoveMemory(&TrapFrame->FltF10, &ContextFrame->FltF10,
+ sizeof(ULONGLONG) * 21);
+
+ //
+ // Set nonvolatile floating registers f2 - f9 in exception frame.
+ //
+
+ ExceptionFrame->FltF2 = ContextFrame->FltF2;
+ ExceptionFrame->FltF3 = ContextFrame->FltF3;
+ ExceptionFrame->FltF4 = ContextFrame->FltF4;
+ ExceptionFrame->FltF5 = ContextFrame->FltF5;
+ ExceptionFrame->FltF6 = ContextFrame->FltF6;
+ ExceptionFrame->FltF7 = ContextFrame->FltF7;
+ ExceptionFrame->FltF8 = ContextFrame->FltF8;
+ ExceptionFrame->FltF9 = ContextFrame->FltF9;
+
+ //
+ // Set floating point control register in trap frame.
+ //
+
+ TrapFrame->Fpcr = ContextFrame->Fpcr;
+ }
+
+ return;
+}
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to dispatch an exception to the proper mode and
+ to cause the exception dispatcher to be called.
+
+ If the exception is a data misalignment, the previous mode is user, this
+ is the first chance for handling the exception, and the current thread
+ has enabled automatic alignment fixup, then an attempt is made to emulate
+ the unaligned reference. Data misalignment exceptions are never emulated
+ for kernel mode.
+
+ If the exception is a floating not implemented exception, then an attempt
+ is made to emulate the floating operation. If the exception is an
+ arithmetic exception, then an attempt is made to convert the imprecise
+ exception into a precise exception, and then emulate the floating
+ operation in order to obtain the proper IEEE results and exceptions.
+ Floating exceptions are never emulated for kernel mode.
+
+ If the exception is neither a data misalignment nor a floating point
+ exception and the previous mode is kernel, then the exception
+ dispatcher is called directly to process the exception. Otherwise the
+ exception record, exception frame, and trap frame contents are copied
+ to the user mode stack. The contents of the exception frame and trap
+ are then modified such that when control is returned, execution will
+ commence in user mode in a routine which will call the exception
+ dispatcher.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ FirstChance - Supplies a boolean variable that specifies whether this
+ is the first (TRUE) or second (FALSE) time that this exception has
+ been processed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextFrame;
+ EXCEPTION_RECORD ExceptionRecord1;
+ PEXC_SUM ExceptionSummary;
+ LONG Length;
+ ULONG SoftFpcr;
+ ULONGLONG UserStack1;
+ ULONGLONG UserStack2;
+
+ //
+ // If the exception is an illegal instruction exception, then check for
+ // a byte/word instruction that should be emulated.
+ //
+ // N.B. The exception code STATUS_ILLEGAL_INSTRUCTION may be converted
+ // into STATUS_DATATYPE_MISALIGNMENT in the case of unaligned word
+ // access.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_ILLEGAL_INSTRUCTION) {
+ if (KiEnableByteWordInstructionEmulation == TRUE) {
+ if (KiEmulateByteWord(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame) != FALSE) {
+ KeGetCurrentPrcb()->KeByteWordEmulationCount += 1;
+ goto Handled2;
+ }
+ }
+ }
+
+ //
+ // If the exception is a data misalignment, the previous mode was user,
+ // this is the first chance for handling the exception, and the current
+ // thread has enabled automatic alignment fixup, then attempt to emulate
+ // the unaligned reference.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_DATATYPE_MISALIGNMENT) &&
+ (FirstChance != FALSE)) {
+
+#if DBG
+
+ //
+ // Count alignment faults by mode and display them at intervals.
+ //
+
+ if (PreviousMode == KernelMode) {
+ KiKernelFixupCount += 1;
+ if ((KiKernelFixupCount & KiKernelFixupMask) == 0) {
+ DbgPrint("KI: Kernel Fixup: Pid=0x%.3lx, Pc=%.8lx, Address=%.8lx ... Total=%ld\n",
+ PsGetCurrentProcess()->UniqueProcessId,
+ ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[2],
+ KiKernelFixupCount);
+ }
+
+ } else {
+ KiUserFixupCount += 1;
+ if ((KiUserFixupCount & KiUserFixupMask) == 0) {
+ DbgPrint("KI: User Fixup: Pid=0x%.3lx, Pc=%.8lx, Address=%.8lx ... Total=%ld\n",
+ PsGetCurrentProcess()->UniqueProcessId,
+ ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[2],
+ KiUserFixupCount);
+ }
+ }
+
+#endif
+
+ //
+ // If alignment fault exceptions are not enabled, then no exception
+ // should be raised and the data reference should be emulated.
+ //
+
+ if (KiEnableAlignmentFaultExceptions == FALSE) {
+ if (KiEmulateReference(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ FALSE) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+
+ } else if ((KeGetCurrentThread()->AutoAlignment != FALSE) ||
+ (KeGetCurrentThread()->ApcState.Process->AutoAlignment != FALSE)) {
+ //
+ // The current thread has enabled automatic alignment fixup. Attempt to
+ // emulate both user and kernel references.
+ //
+ if (KiEmulateReference(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ FALSE) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ } else if (PreviousMode == KernelMode) {
+
+ //
+ // Kernel mode.
+ //
+ // If kernel quadword fixups are enabled, then quadword data
+ // references only should be emulated. Otherwise, all kernel
+ // mode alignment faults raise an exception.
+ //
+ if (KiForceQuadwordFixupsKernel != FALSE) {
+ if (KiEmulateReference(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ TRUE) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ }
+
+ } else {
+
+ //
+ // User mode.
+ //
+ // If user quadword fixups are enabled, then quadword data
+ // references only should be emulated. Otherwise, all user mode
+ // alignment faults raise an exception.
+ //
+
+ if (KiForceQuadwordFixupsUser != FALSE) {
+ if (KiEmulateReference(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ TRUE) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ }
+ }
+ }
+
+ //
+ // If the exception is a data bus error then a machine check has
+ // been trapped by the PALcode. The error will be forwarded to the
+ // HAL eventually for logging or handling. If the handler returns
+ // it is assumed that the HAL successfully handled the error and
+ // execution may resume.
+ //
+ // N.B. A special exception code is used to signal a data bus error.
+ // This code is equivalent to the bug check code merged with a
+ // reserved facility code and the reserved bit set.
+ //
+
+ if (ExceptionRecord->ExceptionCode == (DATA_BUS_ERROR | 0xdfff0000)) {
+ KiMachineCheck(ExceptionRecord, ExceptionFrame, TrapFrame);
+ goto Handled2;
+ }
+
+ //
+ // Initialize the copy of the software FPCR. The proper value is set
+ // if a floating emulation operation is performed. Case on arithmetic
+ // exception codes that require special handling by the kernel.
+ //
+
+ SoftFpcr = 0;
+ switch (ExceptionRecord->ExceptionCode) {
+
+ //
+ // If the exception is a gentrap, then attempt to translate the
+ // Alpha specific gentrap value to a status code value. This
+ // exception is a precise trap.
+ //
+ // N.B. STATUS_ALPHA_GENTRAP is a pseudo status code generated by
+ // PALcode when a callpal gentrap is executed. The status is
+ // visible in user mode only when the gentrap code value is
+ // unrecognized.
+ //
+
+ case STATUS_ALPHA_GENTRAP :
+ switch (ExceptionRecord->ExceptionInformation[0]) {
+ case GENTRAP_INTEGER_OVERFLOW :
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_OVERFLOW;
+ break;
+
+ case GENTRAP_INTEGER_DIVIDE_BY_ZERO :
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_DIVIDE_BY_ZERO;
+ break;
+
+ case GENTRAP_FLOATING_OVERFLOW :
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ break;
+
+ case GENTRAP_FLOATING_DIVIDE_BY_ZERO :
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ break;
+
+ case GENTRAP_FLOATING_UNDERFLOW :
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ break;
+
+ case GENTRAP_FLOATING_INVALID_OPERAND :
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ break;
+
+ case GENTRAP_FLOATING_INEXACT_RESULT :
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ break;
+ }
+ break;
+
+ //
+ // If the exception is an unimplemented floating operation, then
+ // PALcode has detected a subsetted floating point operation. These
+ // include attempts to use round to plus or minus infinity rounding
+ // modes on EV4. This exception is a fault.
+ //
+ // If the previous mode was user, an attempt is made to emulate the
+ // operation. If the emulation is successful, the continuation
+ // address is incremented to the next instruction.
+ //
+ // N.B. STATUS_ALPHA_FLOATING_NOT_IMPLEMENTED is a pseudo status code
+ // generated by PALcode. The status is never visible outside of
+ // this handler because the floating emulation routine converts
+ // the status code to the proper floating status value.
+ //
+
+ case STATUS_ALPHA_FLOATING_NOT_IMPLEMENTED :
+ if (PreviousMode != KernelMode) {
+ if (KiFloatingException(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ FALSE,
+ &SoftFpcr) != FALSE) {
+ TrapFrame->Fir += 4;
+ goto Handled2;
+ }
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_ILLEGAL_INSTRUCTION;
+ }
+
+ break;
+
+ //
+ // If the exception is an arithmetic exception, then one or more
+ // integer overflow or floating point traps has occurred. This
+ // exception is an imprecise (asynchronous) trap.
+ //
+ // If the previous mode was user, an attempt is made to locate the
+ // original trapping instruction and emulate the instruction.
+ //
+ // N.B. STATUS_ALPHA_ARITHMETIC_EXCEPTION is a pseudo status code
+ // generated by PALcode. The status is never visible outside of
+ // this handler because the floating emulation routine converts
+ // the status code to the proper floating status value.
+ //
+
+ case STATUS_ALPHA_ARITHMETIC_EXCEPTION :
+ if (PreviousMode != KernelMode) {
+ if (KiFloatingException(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ TRUE,
+ &SoftFpcr) != FALSE) {
+ goto Handled2;
+ }
+
+ } else {
+ KiSetFloatingStatus(ExceptionRecord);
+ }
+ break;
+ }
+
+ //
+ // Move machine state from trap and exception frames to a context frame,
+ // and increment the number of exceptions dispatched.
+ //
+ // Explicitly set the value of the software FPCR in the context frame
+ // (because it is not a hardware register and thus not present in the
+ // trap or exception frames).
+ //
+
+ ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+ KeGetCurrentPrcb()->KeExceptionDispatchCount += 1;
+ ContextFrame.SoftFpcr = (ULONGLONG)SoftFpcr;
+
+ //
+ // Select the method of handling the exception based on the previous mode.
+ //
+
+ if (PreviousMode == KernelMode) {
+
+ //
+ // If the kernel debugger is active, the exception is a breakpoint,
+ // the breakpoint is handled by the kernel debugger, and this is the
+ // first chance, then give the kernel debugger a chance to handle
+ // the exception.
+ //
+
+ if ((FirstChance != FALSE) && (KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ KernelMode) != FALSE) &&
+
+ (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ KernelMode,
+ FALSE)) != FALSE)) {
+
+ goto Handled1;
+ }
+
+
+ //
+ // Previous mode was kernel.
+ //
+ // If this is the first chance, then attempt to dispatch the exception
+ // to a frame based handler. If the exception is handled, then continue
+ // execution.
+ //
+ // If this is the second chance or the exception is not handled,
+ // then if the kernel debugger is active, then give the kernel
+ // debugger a second chance to handle the exception. If the kernel
+ // debugger handles the exception, then continue execution. Otherwise
+ // bug check.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (RtlDispatchException(ExceptionRecord, &ContextFrame) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ TRUE)) != FALSE)) {
+
+ goto Handled1;
+ }
+
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+
+ } else {
+
+ //
+ // If the kernel debugger is active, the exception is a breakpoint,
+ // the breakpoint is handled by the kernel debugger, and this is the
+ // first chance, then give the kernel debugger a chance to handle
+ // the exception.
+ //
+
+ if ((FirstChance != FALSE) &&
+ (KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ UserMode) != FALSE) &&
+
+ ((PsGetCurrentProcess()->DebugPort == NULL) ||
+ ((PsGetCurrentProcess()->DebugPort != NULL) &&
+ (ExceptionRecord->ExceptionInformation[0] !=
+ DEBUG_STOP_BREAKPOINT)))) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ UserMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // Previous mode was user.
+ //
+ // If this is the first chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Otherwise
+ // transfer the exception information to the user stack, transition to
+ // user mode, and attempt to dispatch the exception to a frame based
+ // handler. If a frame based handler handles the exception, then continue
+ // execution. Otherwise, execute the raise exception system service
+ // which will call this routine a second time to process the exception.
+ //
+ // If this is the second chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Otherwise
+ // if the current process has a subsystem port, then send a message to
+ // the subsystem port and wait for a reply. If the subsystem handles the
+ // exception, then continue execution. Otherwise terminate the thread.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, FALSE)) {
+ goto Handled2;
+ }
+
+ //
+ // Transfer exception information to the user stack, transition
+ // to user mode, and attempt to dispatch the exception to a frame
+ // based handler.
+ //
+
+ repeat:
+ try {
+
+ //
+ // Compute length of exception record and new aligned stack
+ // address.
+ //
+
+ Length = (sizeof(EXCEPTION_RECORD) + 15) & (~15);
+ UserStack1 = (ContextFrame.IntSp & (~15)) - Length;
+
+ //
+ // Probe user stack area for writability and then transfer the
+ // exception record to the user stack area.
+ //
+
+ ProbeForWrite((PCHAR)UserStack1, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack1, ExceptionRecord, Length);
+
+ //
+ // Compute length of context record and new aligned user stack
+ // pointer.
+ //
+
+ Length = (sizeof(CONTEXT) + 15) & (~15);
+ UserStack2 = UserStack1 - Length;
+
+ //
+ // Probe user stack area for writability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack2, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack2, &ContextFrame, sizeof(CONTEXT));
+
+ //
+ // Set address of exception record, context record, and the
+ // and the new stack pointer in the current trap frame.
+ //
+
+ TrapFrame->IntSp = UserStack2;
+ TrapFrame->IntFp = UserStack2;
+ ExceptionFrame->IntS0 = UserStack1;
+ ExceptionFrame->IntS1 = UserStack2;
+
+ //
+ // Set the address of the exception routine that will call the
+ // exception dispatcher and then return to the trap handler.
+ // The trap handler will restore the exception and trap frame
+ // context and continue execution in the routine that will
+ // call the exception dispatcher.
+ //
+
+ TrapFrame->Fir = (ULONGLONG)(LONG)KeUserExceptionDispatcher;
+ return;
+
+ //
+ // If an exception occurs, then copy the new exception information
+ // to an exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord1,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // If the exception is a stack overflow, then attempt
+ // to raise the stack overflow exception. Otherwise,
+ // the user's stack is not accessible, or is misaligned,
+ // and second chance processing is performed.
+ //
+
+ if (ExceptionRecord1.ExceptionCode == STATUS_STACK_OVERFLOW) {
+ ExceptionRecord1.ExceptionAddress = ExceptionRecord->ExceptionAddress;
+ RtlMoveMemory((PVOID)ExceptionRecord,
+ &ExceptionRecord1, sizeof(EXCEPTION_RECORD));
+ goto repeat;
+ }
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, TRUE)) {
+ goto Handled2;
+
+ } else if (DbgkForwardException(ExceptionRecord, FALSE, TRUE)) {
+ goto Handled2;
+
+ } else {
+ ZwTerminateProcess(NtCurrentProcess(), ExceptionRecord->ExceptionCode);
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+
+ }
+ }
+
+ //
+ // Move machine state from context frame to trap and exception frames and
+ // then return to continue execution with the restored state.
+ //
+
+Handled1:
+ KeContextToKframes(TrapFrame, ExceptionFrame, &ContextFrame,
+ ContextFrame.ContextFlags, PreviousMode);
+
+ //
+ // Exception was handled by the debugger or the associated subsystem
+ // and state was modified, if necessary, using the get state and set
+ // state capabilities. Therefore the context frame does not need to
+ // be transferred to the trap and exception frames.
+ //
+
+Handled2:
+ return;
+}
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from an exception filter to copy the exception
+ information from one exception record to another when an exception occurs.
+
+Arguments:
+
+ ExceptionRecord1 - Supplies a pointer to the destination exception record.
+
+ ExceptionRecord2 - Supplies a pointer to the source exception record.
+
+Return Value:
+
+ A value of EXCEPTION_EXECUTE_HANDLER is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Copy one exception record to another and return value that causes
+ // an exception handler to be executed.
+ //
+
+ RtlMoveMemory((PVOID)ExceptionRecord1,
+ (PVOID)ExceptionRecord2,
+ sizeof(EXCEPTION_RECORD));
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+NTSTATUS
+KeRaiseUserException(
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an exception to be raised in the calling thread's user-mode
+ context. It does this by editing the trap frame the kernel was entered with to
+ point to trampoline code that raises the requested exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ The status value that should be returned by the caller.
+
+--*/
+
+{
+ PKTRAP_FRAME TrapFrame;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+
+ TrapFrame->Fir = (ULONGLONG)(LONG)KeRaiseUserExceptionDispatcher;
+ return(ExceptionCode);
+}
diff --git a/private/ntos/ke/alpha/floatem.c b/private/ntos/ke/alpha/floatem.c
new file mode 100644
index 000000000..685966db7
--- /dev/null
+++ b/private/ntos/ke/alpha/floatem.c
@@ -0,0 +1,4183 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ floatem.c
+
+Abstract:
+
+ This module implements a software emulation of the IEEE single and
+ double floating operations. It is required on Alpha processors since
+ the hardware does not fully support all of the operations required
+ by the IEEE standard. In particular, infinities and NaNs are not
+ handled by the hardware, but rather cause an exception. On receipt
+ of the exception, a software emulation of the floating operation
+ is performed to determine the real result of the operation and if
+ an exception will actually be raised.
+
+ This code is also used to perform all floating operations on EV4
+ processors when plus or minus infinity rounding is used.
+
+ Since floating exceptions are rather rare events, this routine is
+ written in C. Should a higher performance implementation be required,
+ then the algorithms contained herein, can be used to guide a higher
+ performance assembly language implementation.
+
+ N.B. This routine does not emulate floating loads, floating stores,
+ control to/from floating, or move to/from floating instructions.
+ These instructions never require emulation.
+
+ Floating point operations are carried out by unpacking the operands,
+ normalizing denormalized numbers, checking for NaNs, interpreting
+ infinities, and computing results.
+
+ Floating operands are converted to a format that has a value with the
+ appropriate number of leading zeros, an overflow bit, the mantissa, a
+ guard bit, a round bit, and a set of sticky bits. The unpacked mantissa
+ includes the hidden bit.
+
+ The overflow bit is needed for addition and is also used for multiply.
+ The mantissa is 24-bits for single operations and 53-bits for double
+ operations. The guard bit and round bit are used to hold precise values
+ for normalization and rounding.
+
+ If the result of an operation is normalized, then the guard bit becomes
+ the round bit and the round bit is accumulated with the sticky bits. If
+ the result of an operation needs to be shifted left one bit for purposes
+ of normalization, then the guard bit becomes part of the mantissa and the
+ round bit is used for rounding.
+
+ The round bit plus the sticky bits are used to determine how rounding is
+ performed.
+
+Author:
+
+ David N. Cutler (davec) 16-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Thomas Van Baak (tvb) 12-Sep-1992
+
+ Adapted for Alpha AXP.
+
+ Nigel Haslock (haslock) 20-Apr-1995
+
+ Adjustments for additional EV4.5 and EV5 functionality
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#include "alphaops.h"
+
+#if DBG
+
+extern ULONG RtlDebugFlags;
+#define DBGPRINT ((RtlDebugFlags & 0x4) != 0) && DbgPrint
+#define DBGPRINT2 ((RtlDebugFlags & 0x8) != 0) && DbgPrint
+
+#else
+
+#define DBGPRINT 0 && DbgPrint
+#define DBGPRINT2 0 && DbgPrint
+
+#endif
+
+#define LOW_PART(Quad) ((ULONG)(Quad))
+#define HIGH_PART(Quad) ((ULONG)(Quad >> 32))
+#define MAKE_QUAD(Low, High) (((ULONGLONG)(High)) << 32 | ((ULONGLONG)(Low)))
+
+//
+// The hardware recognizes the new CVTST instruction by the kludged
+// opcode function 16.2ac instead of the proper 16.00e (per ECO #46).
+//
+
+#define CVTST_FUNC_PROPER 0x00E
+
+//
+// Define unpacked format NaN mask values and boolean macros.
+//
+// N.B. The NaN bit is set for a quiet NaN and reset for a signaling NaN.
+// This is the same as Intel, Sun, IBM and opposite of Mips, HP.
+//
+
+#define DOUBLE_NAN_BIT_HIGH (1 << (53 - 32))
+#define SINGLE_NAN_BIT (1 << 24)
+
+#define DoubleSignalNan(DoubleOperand) \
+ (((DoubleOperand)->Nan != FALSE) && \
+ (((DoubleOperand)->MantissaHigh & DOUBLE_NAN_BIT_HIGH) == 0))
+
+#define DoubleQuietNan(DoubleOperand) \
+ (((DoubleOperand)->Nan != FALSE) && \
+ (((DoubleOperand)->MantissaHigh & DOUBLE_NAN_BIT_HIGH) != 0))
+
+#define SingleSignalNan(SingleOperand) \
+ (((SingleOperand)->Nan != FALSE) && \
+ (((SingleOperand)->Mantissa & SINGLE_NAN_BIT) == 0))
+
+#define SingleQuietNan(SingleOperand) \
+ (((SingleOperand)->Nan != FALSE) && \
+ (((SingleOperand)->Mantissa & SINGLE_NAN_BIT) != 0))
+
+//
+// Define context block structure.
+//
+
+typedef struct _FP_CONTEXT_BLOCK {
+ ULONG Fc;
+ PEXCEPTION_RECORD ExceptionRecord;
+ PKEXCEPTION_FRAME ExceptionFrame;
+ PKTRAP_FRAME TrapFrame;
+ PSW_FPCR SoftwareFpcr;
+ ULONG Round;
+ BOOLEAN IeeeMode;
+ BOOLEAN UnderflowEnable;
+} FP_CONTEXT_BLOCK, *PFP_CONTEXT_BLOCK;
+
+//
+// Define single and double operand value structures.
+//
+
+typedef struct _FP_DOUBLE_OPERAND {
+ LONG MantissaHigh;
+ ULONG MantissaLow;
+ LONGLONG Mantissa; // ## Not fully used yet
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+ BOOLEAN Normal;
+} FP_DOUBLE_OPERAND, *PFP_DOUBLE_OPERAND;
+
+typedef struct _FP_SINGLE_OPERAND {
+ LONG Mantissa;
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+ BOOLEAN Normal;
+} FP_SINGLE_OPERAND, *PFP_SINGLE_OPERAND;
+
+//
+// Define single and double IEEE floating point memory formats.
+//
+
+typedef struct _DOUBLE_FORMAT {
+ ULONGLONG Mantissa : 52;
+ ULONGLONG Exponent : 11;
+ ULONGLONG Sign : 1;
+} DOUBLE_FORMAT, *PDOUBLE_FORMAT;
+
+typedef struct _SINGLE_FORMAT {
+ ULONG Mantissa : 23;
+ ULONG Exponent : 8;
+ ULONG Sign : 1;
+} SINGLE_FORMAT, *PSINGLE_FORMAT;
+
+//
+// Define forward referenced function prototypes.
+//
+
+ULONGLONG
+KiConvertSingleOperandToRegister (
+ IN ULONG SingleValue
+ );
+
+ULONG
+KiConvertRegisterToSingleOperand (
+ IN ULONGLONG DoubleValue
+ );
+
+BOOLEAN
+KiConvertQuadwordToLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN LONGLONG Quadword
+ );
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+PFP_IEEE_VALUE
+KiInitializeIeeeValue (
+ IN PEXCEPTION_RECORD ExceptionRecord
+ );
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN ULONGLONG ResultValue
+ );
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONGLONG StickyBits
+ );
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ );
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ );
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ );
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ );
+
+BOOLEAN
+KiEmulateFloating (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PSW_FPCR SoftwareFpcr
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate a floating operation and convert the
+ exception status to the proper value. If the exception is an unimplemented
+ operation, then the operation is emulated. Otherwise, the status code is
+ just converted to its proper value.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ SoftwareFpcr - Supplies a pointer to a variable that contains a copy of
+ the software FPCR.
+
+Return Value:
+
+ A value of TRUE is returned if the floating exception is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULARGE_INTEGER AhighBhigh;
+ ULARGE_INTEGER AhighBlow;
+ ULARGE_INTEGER AlowBhigh;
+ ULARGE_INTEGER AlowBlow;
+ ULONG Carry1;
+ ULONG Carry2;
+ BOOLEAN CompareEqual;
+ BOOLEAN CompareLess;
+ BOOLEAN CompareResult;
+ FP_CONTEXT_BLOCK ContextBlock;
+ LARGE_INTEGER DoubleDividend;
+ LARGE_INTEGER DoubleDivisor;
+ ULONG DoubleMantissaLow;
+ LONG DoubleMantissaHigh;
+ FP_DOUBLE_OPERAND DoubleOperand1;
+ FP_DOUBLE_OPERAND DoubleOperand2;
+ FP_DOUBLE_OPERAND DoubleOperand3;
+ LARGE_INTEGER DoubleQuotient;
+ PVOID ExceptionAddress;
+ ULONG ExponentDifference;
+ ULONG Fa;
+ ULONG Fb;
+ ULONG Function;
+ ULONG Index;
+ ALPHA_INSTRUCTION Instruction;
+ ULARGE_INTEGER LargeResult;
+ LONG Negation;
+ LONGLONG Quadword;
+ LONG SingleMantissa;
+ FP_SINGLE_OPERAND SingleOperand1;
+ FP_SINGLE_OPERAND SingleOperand2;
+ FP_SINGLE_OPERAND SingleOperand3;
+ ULONG StickyBits;
+ BOOLEAN ValidOperation;
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // floating operation causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // Fetch the faulting or trapping instruction. Check the opcode and
+ // function code (including the trap enable bits) for IEEE floating
+ // point operations that are expected to be emulated.
+ //
+ // N.B. Only a subset of the 2048 possible combinations of 11 bits
+ // in the function field are valid. A total of 88 functions
+ // are affected by missing plus and minus infinity rounding
+ // mode support in the EV4 chip.
+ //
+
+ Instruction = *((PALPHA_INSTRUCTION)ExceptionRecord->ExceptionAddress);
+ DBGPRINT("KiEmulateFloating: Instruction = %.8lx, Fpcr = %.16Lx\n",
+ Instruction.Long, TrapFrame->Fpcr);
+ Function = Instruction.FpOp.Function;
+
+ ValidOperation = FALSE;
+ if (Instruction.FpOp.Opcode == IEEEFP_OP) {
+
+ //
+ // Adjust the function code if the instruction is CVTST.
+ //
+
+ if (Function == CVTST_FUNC) {
+ Function = CVTST_FUNC_PROPER;
+
+ } else if (Function == CVTST_S_FUNC) {
+ Function = CVTST_FUNC_PROPER | FP_TRAP_ENABLE_S;
+ }
+
+ switch (Function & FP_FUNCTION_MASK) {
+ case ADDS_FUNC :
+ case SUBS_FUNC :
+ case MULS_FUNC :
+ case DIVS_FUNC :
+ case ADDT_FUNC :
+ case SUBT_FUNC :
+ case MULT_FUNC :
+ case DIVT_FUNC :
+ case CVTTQ_FUNC :
+ case CVTTS_FUNC :
+
+ switch (Function & FP_TRAP_ENABLE_MASK) {
+ case FP_TRAP_ENABLE_NONE :
+ case FP_TRAP_ENABLE_U :
+ case FP_TRAP_ENABLE_SU :
+ case FP_TRAP_ENABLE_SUI :
+
+ ValidOperation = TRUE;
+ break;
+ }
+ break;
+
+ case CVTQS_FUNC :
+ case CVTQT_FUNC :
+
+ switch (Function & FP_TRAP_ENABLE_MASK) {
+ case FP_TRAP_ENABLE_NONE :
+ case FP_TRAP_ENABLE_SUI :
+
+ ValidOperation = TRUE;
+ break;
+ }
+ break;
+
+ case CVTST_FUNC_PROPER :
+
+ switch (Function & FP_TRAP_ENABLE_MASK) {
+ case FP_TRAP_ENABLE_NONE :
+ case FP_TRAP_ENABLE_S :
+
+ ValidOperation = TRUE;
+ break;
+ }
+ break;
+
+ case CMPTEQ_FUNC :
+ case CMPTLE_FUNC :
+ case CMPTLT_FUNC :
+ case CMPTUN_FUNC :
+
+ ValidOperation = TRUE;
+ break;
+ }
+
+ } else if (Instruction.FpOp.Opcode == FPOP_OP) {
+ switch (Function) {
+ case CVTLQ_FUNC :
+ case CVTQL_FUNC :
+ case CVTQLV_FUNC :
+ case CVTQLSV_FUNC :
+
+ ValidOperation = TRUE;
+ break;
+ }
+ }
+
+ if (ValidOperation == FALSE) {
+
+ //
+ // An illegal instruction, function code, format value, or trap
+ // enable value was encountered. Generate an illegal instruction
+ // exception.
+ //
+
+ ExceptionRecord->ExceptionCode = STATUS_ILLEGAL_INSTRUCTION;
+ DBGPRINT("KiEmulateFloating: Invalid Function or Format\n");
+ return FALSE;
+ }
+
+ //
+ // Increment the floating emulation count.
+ //
+
+ KeGetCurrentPrcb()->KeFloatingEmulationCount += 1;
+
+ //
+ // Initialize the address of the exception record, exception frame,
+ // and trap frame in the context block used during the emulation of
+ // the floating point operation.
+ //
+ // N.B. The SoftwareFpcr and IEEE exception records are only used
+ // with IEEE mode instructions.
+ //
+
+ ContextBlock.ExceptionRecord = ExceptionRecord;
+ ContextBlock.ExceptionFrame = ExceptionFrame;
+ ContextBlock.TrapFrame = TrapFrame;
+ ContextBlock.SoftwareFpcr = SoftwareFpcr;
+
+ //
+ // Check if the /S bit is set in the instruction. This bit is always
+ // set in the case of a trigger instruction of an asynchronous trap
+ // (assuming valid trap shadow) but not necessarily always set in the
+ // case of an unimplemented floating instruction fault.
+ //
+
+ if ((Function & FP_TRAP_ENABLE_S) != 0) {
+ ContextBlock.IeeeMode = TRUE;
+
+ } else {
+ ContextBlock.IeeeMode = FALSE;
+ }
+
+ if ((Function & FP_TRAP_ENABLE_U) != 0) {
+ ContextBlock.UnderflowEnable = TRUE;
+
+ } else {
+ ContextBlock.UnderflowEnable = FALSE;
+ }
+
+ //
+ // Set the current rounding mode from the rounding mode specified in
+ // the instruction, or if dynamic rounding is specified, from the
+ // rounding mode specified in the FPCR.
+ // Set the emulation flag and emulate the floating point operation.
+ // The return value is dependent on the results of the emulation.
+ //
+
+ ContextBlock.Fc = Instruction.FpOp.Fc;
+ Fa = Instruction.FpOp.Fa;
+ Fb = Instruction.FpOp.Fb;
+
+ if ((Function & FP_ROUND_MASK) == FP_ROUND_D) {
+ ContextBlock.Round = ((PFPCR)&TrapFrame->Fpcr)->DynamicRoundingMode;
+
+ } else {
+ ContextBlock.Round = (Function & FP_ROUND_MASK) >> FP_ROUND_SHIFT;
+ }
+
+ SoftwareFpcr->EmulationOccurred = 1;
+
+ //
+ // Unpack operands and dispense with NaNs.
+ //
+
+ switch (Function & FP_FUNCTION_MASK) {
+ case ADDS_FUNC :
+ case SUBS_FUNC :
+ case MULS_FUNC :
+ case DIVS_FUNC :
+
+ //
+ // The function has two single operand values.
+ //
+
+ KiUnpackSingle(Fa, &ContextBlock, &SingleOperand1);
+ KiUnpackSingle(Fb, &ContextBlock, &SingleOperand2);
+
+ //
+ // Non-IEEE mode operate instructions trap on NaN, infinity, or
+ // denormal operands.
+ //
+
+ if ((ContextBlock.IeeeMode == FALSE) &&
+ ((SingleOperand1.Normal == FALSE) ||
+ (SingleOperand2.Normal == FALSE))) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+
+ if ((SingleOperand1.Nan != FALSE) || (SingleOperand2.Nan != FALSE)) {
+
+ //
+ // Store a quiet NaN if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signaling NaN.
+ //
+
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+ break;
+
+ case ADDT_FUNC :
+ case SUBT_FUNC :
+ case MULT_FUNC :
+ case DIVT_FUNC :
+
+ //
+ // The function has two double operand values.
+ //
+
+ KiUnpackDouble(Fa, &ContextBlock, &DoubleOperand1);
+ KiUnpackDouble(Fb, &ContextBlock, &DoubleOperand2);
+
+ //
+ // Non-IEEE mode operate instructions trap on NaN, infinity, or
+ // denormal operands.
+ //
+
+ if ((ContextBlock.IeeeMode == FALSE) &&
+ ((DoubleOperand1.Normal == FALSE) ||
+ (DoubleOperand2.Normal == FALSE))) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ if ((DoubleOperand1.Nan != FALSE) || (DoubleOperand2.Nan != FALSE)) {
+
+ //
+ // Store a quiet NaN if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signaling NaN.
+ //
+
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+ break;
+
+ case CMPTEQ_FUNC :
+ case CMPTLE_FUNC :
+ case CMPTLT_FUNC :
+ case CMPTUN_FUNC :
+
+ //
+ // The function has two double operand values.
+ //
+
+ KiUnpackDouble(Fa, &ContextBlock, &DoubleOperand1);
+ KiUnpackDouble(Fb, &ContextBlock, &DoubleOperand2);
+
+ //
+ // Non-IEEE mode compare instructions trap on NaN or denormal
+ // operands.
+ //
+
+ if ((ContextBlock.IeeeMode == FALSE) &&
+ (((DoubleOperand1.Normal == FALSE) &&
+ (DoubleOperand1.Infinity == FALSE)) ||
+ ((DoubleOperand2.Normal == FALSE) &&
+ (DoubleOperand2.Infinity == FALSE)))) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+
+ //
+ // Compare operation.
+ //
+ // If either operand is a NaN, then check the type of compare
+ // operation to determine the result value and if an exception
+ // should be raised. Otherwise, if the operation is a compare
+ // unordered operation, store a false result.
+ //
+
+ if ((DoubleOperand1.Nan != FALSE) || (DoubleOperand2.Nan != FALSE)) {
+
+ //
+ // If the compare is an unordered compare, then store a true
+ // result (a NaN compares unordered with everything, including
+ // itself). Raise an exception if the invalid operation trap
+ // is enabled and either of the NaNs is a signaling NaN.
+ //
+ // Otherwise, if the operation is compare equal, then store a
+ // false result. Raise an exception if the invalid operation
+ // trap is enabled and either of the NaNs is a signaling NaN.
+ //
+ // Otherwise store a false result and raise an exception if
+ // the invalid operation trap is enabled.
+ //
+
+ if ((Function & FP_FUNCTION_MASK) == CMPTUN_FUNC) {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_TRUE,
+ ExceptionFrame,
+ TrapFrame);
+
+ return KiInvalidCompareDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else if ((Function & FP_FUNCTION_MASK) == CMPTEQ_FUNC) {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_FALSE,
+ ExceptionFrame,
+ TrapFrame);
+
+ return KiInvalidCompareDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_FALSE,
+ ExceptionFrame,
+ TrapFrame);
+
+ return KiInvalidCompareDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ } else {
+ if ((Function & FP_FUNCTION_MASK) == CMPTUN_FUNC) {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_FALSE,
+ ExceptionFrame,
+ TrapFrame);
+
+ return TRUE;
+ }
+ }
+ break;
+
+ case CVTST_FUNC_PROPER :
+
+ //
+ // The function has one single operand value which is found in
+ // the second operand.
+ //
+
+ KiUnpackSingle(Fb, &ContextBlock, &SingleOperand1);
+
+ //
+ // Non-IEEE mode convert instructions trap on NaN, infinity, or
+ // denormal operands.
+ //
+
+ if ((ContextBlock.IeeeMode == FALSE) &&
+ (SingleOperand1.Normal == FALSE)) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ break;
+
+ case CVTTQ_FUNC :
+ case CVTTS_FUNC :
+
+ //
+ // The function has one double operand value which is found in
+ // the second operand.
+ //
+
+ KiUnpackDouble(Fb, &ContextBlock, &DoubleOperand1);
+
+ //
+ // Non-IEEE mode convert instructions trap on NaN, infinity, or
+ // denormal operands.
+ //
+
+ if ((ContextBlock.IeeeMode == FALSE) &&
+ (DoubleOperand1.Normal == FALSE)) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ break;
+
+ case CVTLQ_FUNC :
+ case CVTQL_FUNC :
+ case CVTQS_FUNC :
+ case CVTQT_FUNC :
+
+ //
+ // The function has one quadword operand value which is found in
+ // the second operand.
+ //
+
+ Quadword = KiGetRegisterValue(Fb + 32,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+ break;
+ }
+
+ //
+ // Case to the proper function routine to emulate the operation.
+ //
+
+ Negation = 0;
+ switch (Function & FP_FUNCTION_MASK) {
+
+ //
+ // Floating subtract operation.
+ //
+ // Floating subtract is accomplished by complementing the sign
+ // of the second operand and then performing an add operation.
+ //
+
+ case SUBS_FUNC :
+ DBGPRINT2("subs\n");
+ Negation = 0x1;
+
+ //
+ // Floating add operation.
+ //
+ // Floating add is accomplished using signed magnitude addition.
+ //
+ // The exponent difference is calculated and the smaller number
+ // is right shifted by the specified amount, but no more than
+ // the width of the operand values (i.e., 26 for single and 55
+ // for double). The shifted out value is saved for rounding.
+ //
+ // If the signs of the two operands are the same, then they
+ // are added together after having performed the alignment
+ // shift.
+ //
+ // If the signs of the two operands are different, then the
+ // sign of the result is the sign of the larger operand and
+ // the smaller operand is subtracted from the larger operand.
+ // In order to avoid making a double level test (i.e., one on
+ // the exponents, and one on the mantissas if the exponents
+ // are equal), it is possible that the result of the subtract
+ // could be negative (if the exponents are equal). If this
+ // occurs, then the result sign and mantissa are complemented
+ // to obtain the correct result.
+ //
+
+ case ADDS_FUNC :
+ DBGPRINT2("adds\n");
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ SingleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder the operands according to their exponent value
+ // so that Operand1 exponent will be >= Operand2 exponent.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 26 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ SingleOperand1.Exponent - SingleOperand2.Exponent;
+
+ if (ExponentDifference > 26) {
+ ExponentDifference = 26;
+ }
+
+ StickyBits =
+ SingleOperand2.Mantissa & ((1 << ExponentDifference) - 1);
+ SingleMantissa = SingleOperand2.Mantissa >> ExponentDifference;
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, if the
+ // operands are not infinity, perform the operation by
+ // subtracting the second operand from the first operand.
+ //
+
+ if ((SingleOperand1.Sign ^ SingleOperand2.Sign) == 0) {
+ SingleOperand1.Mantissa += SingleMantissa;
+
+ } else {
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else if (SingleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ SingleOperand1.Mantissa -= 1;
+ }
+
+ SingleOperand1.Mantissa -= SingleMantissa;
+ if (SingleOperand1.Mantissa < 0) {
+ SingleOperand1.Mantissa = -SingleOperand1.Mantissa;
+ SingleOperand1.Sign ^= 0x1;
+ }
+
+ //
+ // If the result is exactly zero and the signs of the
+ // operands differ, then the result is plus zero except
+ // when the rounding mode is minus infinity.
+ //
+
+ if ((SingleOperand1.Mantissa == 0) && (StickyBits == 0)) {
+ if (ContextBlock.Round == ROUND_TO_MINUS_INFINITY) {
+ SingleOperand1.Sign = 0x1;
+
+ } else {
+ SingleOperand1.Sign = 0x0;
+ }
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ case SUBT_FUNC :
+ DBGPRINT2("subt\n");
+ Negation = 0x1;
+
+ case ADDT_FUNC :
+ DBGPRINT2("addt\n");
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ DoubleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder the operands according to their exponent value
+ // so that Operand1 exponent will be >= Operand2 exponent.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 55 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ DoubleOperand1.Exponent - DoubleOperand2.Exponent;
+
+ if (ExponentDifference > 55) {
+ ExponentDifference = 55;
+ }
+
+ if (ExponentDifference >= 32) {
+ ExponentDifference -= 32;
+ StickyBits = (DoubleOperand2.MantissaLow) |
+ (DoubleOperand2.MantissaHigh & ((1 << ExponentDifference) - 1));
+
+ DoubleMantissaLow =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ DoubleMantissaHigh = 0;
+
+ } else if (ExponentDifference > 0) {
+ StickyBits =
+ DoubleOperand2.MantissaLow & ((1 << ExponentDifference) - 1);
+
+ DoubleMantissaLow =
+ (DoubleOperand2.MantissaLow >> ExponentDifference) |
+ (DoubleOperand2.MantissaHigh << (32 - ExponentDifference));
+
+ DoubleMantissaHigh =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ } else {
+ StickyBits = 0;
+ DoubleMantissaLow = DoubleOperand2.MantissaLow;
+ DoubleMantissaHigh = DoubleOperand2.MantissaHigh;
+ }
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, if the
+ // operands are not infinity, perform the operation by
+ // subtracting the second operand from the first operand.
+ //
+
+ if ((DoubleOperand1.Sign ^ DoubleOperand2.Sign) == 0) {
+ DoubleOperand1.MantissaLow += DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh += DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh += 1;
+ }
+
+ } else {
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else if (DoubleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ if (DoubleOperand1.MantissaLow < 1) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= 1;
+ }
+
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh -= DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaHigh < 0) {
+ DoubleOperand1.MantissaLow = -(LONG)DoubleOperand1.MantissaLow;
+ DoubleOperand1.MantissaHigh = -DoubleOperand1.MantissaHigh;
+ if (DoubleOperand1.MantissaLow != 0) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+ DoubleOperand1.Sign ^= 0x1;
+ }
+
+ //
+ // If the result is exactly zero and the signs of the
+ // operands differ, then the result is plus zero except
+ // when the rounding mode is minus infinity.
+ //
+
+ if ((DoubleOperand1.MantissaHigh == 0) &&
+ (DoubleOperand1.MantissaLow == 0) &&
+ (StickyBits == 0)) {
+ if (ContextBlock.Round == ROUND_TO_MINUS_INFINITY) {
+ DoubleOperand1.Sign = 0x1;
+
+ } else {
+ DoubleOperand1.Sign = 0x0;
+ }
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ //
+ // Floating multiply operation.
+ //
+ // Floating multiply is accomplished using unsigned multiplies
+ // of the mantissa values, and adding the partial results together
+ // to form the total product.
+ //
+ // The two mantissa values are preshifted such that the final
+ // result is properly aligned.
+ //
+
+ case MULS_FUNC :
+ DBGPRINT2("muls\n");
+
+ //
+ // Reorder the operands according to their exponent value
+ // so that Operand1 exponent will be >= Operand2 exponent.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 64-bit value and then unsigned multiply
+ // the two mantissa values. The single result is the high part
+ // of the 64-bit product and the sticky bits are the low part
+ // of the 64-bit product.
+ //
+ // The size of the product will be (1+23+2)+(1+23+2) = 52 bits
+ // of which the high (1+1+23+2) = 27 bits are result and the
+ // remaining 25 bits are sticky. By preshifting the operands
+ // left 7 bits, the number of sticky bits is 32. This alignment
+ // is convenient.
+ //
+ // The 7 bit preshift amount must be applied in part to both
+ // operands because 26 of 32 bits of the mantissa are used and
+ // so neither operand can be safely shifted left by more than 6
+ // bits. Thus one operand is shifted the maximum of 6 bits and
+ // the other the remaining 1 bit.
+ //
+
+ LargeResult.QuadPart = ((ULONGLONG)((ULONG)(SingleOperand1.Mantissa << (32 - 26)))) *
+ ((ULONGLONG)((ULONG)(SingleOperand2.Mantissa << 1)));
+
+ SingleOperand1.Mantissa = LargeResult.HighPart;
+ StickyBits = LargeResult.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent +=
+ SingleOperand2.Exponent - SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ case MULT_FUNC :
+ DBGPRINT2("mult\n");
+
+ //
+ // Reorder the operands according to their exponent value
+ // so that Operand1 exponent will be >= Operand2 exponent.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 128-bit value and then unsigned multiply
+ // the two mantissa values. The double result is the high part
+ // of the 128-bit product and the sticky bits are the low part
+ // of the 128-bit product.
+ //
+ // The size of the product will be (1+52+2)+(1+52+2) = 110 bits
+ // of which the high (1+1+52+2) = 56 bits are result and the
+ // remaining 54 bits are sticky. By preshifting the operands
+ // left 10 bits, the number of sticky bits is 64. This alignment
+ // is convenient.
+ //
+ // The 10 bit preshift amount must be applied in part to both
+ // operands because 55 of 64 bits of the mantissa are used and
+ // so neither operand can be safely shifted left by more than 9
+ // bits. Thus one operand is shifted the maximum of 9 bits and
+ // the other the remaining 1 bit.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ (DoubleOperand1.MantissaHigh << 1) |
+ (DoubleOperand1.MantissaLow >> 31);
+
+ DoubleOperand1.MantissaLow <<= 1;
+ DoubleOperand2.MantissaHigh =
+ (DoubleOperand2.MantissaHigh << (64 - 55)) |
+ (DoubleOperand2.MantissaLow >> (32 - (64 - 55)));
+
+ DoubleOperand2.MantissaLow <<= (64 - 55);
+
+ //
+ // The 128-bit product is formed by multiplying and adding
+ // all the cross product values.
+ //
+ // Consider the operands (A and B) as being composed of two
+ // parts Ahigh, Alow, Bhigh, and Blow. The cross product sum
+ // is then:
+ //
+ // Ahigh * Bhigh * 2^64 +
+ // Ahigh * Blow * 2^32 +
+ // Alow * Bhigh * 2^32 +
+ // Alow * Blow
+ //
+
+ AhighBhigh.QuadPart = (ULONGLONG)(ULONG)DoubleOperand1.MantissaHigh *
+ (ULONGLONG)(ULONG)DoubleOperand2.MantissaHigh;
+
+ AhighBlow.QuadPart = (ULONGLONG)(ULONG)DoubleOperand1.MantissaHigh *
+ (ULONGLONG)DoubleOperand2.MantissaLow;
+
+ AlowBhigh.QuadPart = (ULONGLONG)DoubleOperand1.MantissaLow *
+ (ULONGLONG)(ULONG)DoubleOperand2.MantissaHigh;
+
+ AlowBlow.QuadPart = (ULONGLONG)DoubleOperand1.MantissaLow *
+ (ULONGLONG)DoubleOperand2.MantissaLow;
+
+ AlowBlow.HighPart += AhighBlow.LowPart;
+ if (AlowBlow.HighPart < AhighBlow.LowPart) {
+ Carry1 = 1;
+
+ } else {
+ Carry1 = 0;
+ }
+
+ AlowBlow.HighPart += AlowBhigh.LowPart;
+ if (AlowBlow.HighPart < AlowBhigh.LowPart) {
+ Carry1 += 1;
+ }
+
+ DoubleOperand1.MantissaLow = AhighBlow.HighPart + Carry1;
+ if (DoubleOperand1.MantissaLow < Carry1) {
+ Carry2 = 1;
+
+ } else {
+ Carry2 = 0;
+ }
+
+ DoubleOperand1.MantissaLow += AlowBhigh.HighPart;
+ if (DoubleOperand1.MantissaLow < AlowBhigh.HighPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaLow += AhighBhigh.LowPart;
+ if (DoubleOperand1.MantissaLow < AhighBhigh.LowPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaHigh = AhighBhigh.HighPart + Carry2;
+ StickyBits = AlowBlow.HighPart | AlowBlow.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent +=
+ DoubleOperand2.Exponent - DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ //
+ // Floating divide operation.
+ //
+ // Floating division is accomplished by repeated subtract using
+ // a single one-bit-at-a-time algorithm. The number of division
+ // steps performed is equal to the mantissa size plus one guard
+ // bit.
+ //
+ // The sticky bits are the remainder after the specified number
+ // of division steps.
+ //
+
+ case DIVS_FUNC :
+ DBGPRINT2("divs\n");
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) ||
+ ((SingleOperand1.Infinity == FALSE) &&
+ (SingleOperand1.Mantissa == 0) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0))) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiDivideByZeroSingle(&ContextBlock,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (SingleOperand1.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (SingleOperand2.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent = 0;
+ SingleOperand1.Mantissa = 0;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 26 iterations.
+ //
+
+ SingleOperand3.Mantissa = 0;
+ for (Index = 0; Index < 26; Index += 1) {
+ SingleOperand3.Mantissa <<= 1;
+ if (SingleOperand1.Mantissa >= SingleOperand2.Mantissa) {
+ SingleOperand1.Mantissa -= SingleOperand2.Mantissa;
+ SingleOperand3.Mantissa |= 1;
+ }
+
+ SingleOperand1.Mantissa <<= 1;
+ }
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand3.Sign = SingleOperand1.Sign ^ SingleOperand2.Sign;
+ SingleOperand3.Exponent = SingleOperand1.Exponent -
+ SingleOperand2.Exponent + SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ SingleOperand3.Infinity = FALSE;
+ SingleOperand3.Nan = FALSE;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand3,
+ SingleOperand1.Mantissa);
+
+ case DIVT_FUNC :
+ DBGPRINT2("divt\n");
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) ||
+ ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0))) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiDivideByZeroDouble(&ContextBlock,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (DoubleOperand1.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (DoubleOperand2.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent = 0;
+ DoubleOperand1.MantissaHigh = 0;
+ DoubleOperand1.MantissaLow = 0;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 55 iterations.
+ //
+
+ DoubleDividend.LowPart = DoubleOperand1.MantissaLow;
+ DoubleDividend.HighPart = DoubleOperand1.MantissaHigh;
+ DoubleDivisor.LowPart = DoubleOperand2.MantissaLow;
+ DoubleDivisor.HighPart = DoubleOperand2.MantissaHigh;
+ DoubleQuotient.LowPart = 0;
+ DoubleQuotient.HighPart = 0;
+ for (Index = 0; Index < 55; Index += 1) {
+ DoubleQuotient.HighPart =
+ (DoubleQuotient.HighPart << 1) |
+ DoubleQuotient.LowPart >> 31;
+
+ DoubleQuotient.LowPart <<= 1;
+ if (DoubleDividend.QuadPart >= DoubleDivisor.QuadPart) {
+ DoubleDividend.QuadPart = DoubleDividend.QuadPart - DoubleDivisor.QuadPart;
+ DoubleQuotient.LowPart |= 1;
+ }
+
+ DoubleDividend.HighPart =
+ (DoubleDividend.HighPart << 1) |
+ DoubleDividend.LowPart >> 31;
+
+ DoubleDividend.LowPart <<= 1;
+ }
+
+ DoubleOperand3.MantissaLow = DoubleQuotient.LowPart;
+ DoubleOperand3.MantissaHigh = DoubleQuotient.HighPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand3.Sign = DoubleOperand1.Sign ^ DoubleOperand2.Sign;
+ DoubleOperand3.Exponent = DoubleOperand1.Exponent -
+ DoubleOperand2.Exponent + DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ DoubleOperand3.Infinity = FALSE;
+ DoubleOperand3.Nan = FALSE;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand3,
+ DoubleDividend.LowPart | DoubleDividend.HighPart);
+
+ //
+ // Floating compare double.
+ //
+ // This operation is performed after having separated out NaNs,
+ // and therefore the only comparison predicates left are equal
+ // and less.
+ //
+ // Floating compare double is accomplished by comparing signs,
+ // then exponents, and finally the mantissa if necessary.
+ //
+ // N.B. The sign of zero is ignored.
+ //
+
+ case CMPTEQ_FUNC :
+ case CMPTLE_FUNC :
+ case CMPTLT_FUNC :
+
+ //
+ // If either operand is zero, then set the sign of the operand
+ // positive and the exponent to a value less than the minimum
+ // denormal number.
+ //
+
+ if ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0)) {
+ DoubleOperand1.Sign = 0;
+ DoubleOperand1.Exponent = -52;
+ }
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ DoubleOperand2.Sign = 0;
+ DoubleOperand2.Exponent = -52;
+ }
+
+ //
+ // Compare signs first.
+ //
+
+ if (DoubleOperand1.Sign < DoubleOperand2.Sign) {
+
+ //
+ // The first operand is greater than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Sign > DoubleOperand2.Sign) {
+
+ //
+ // The first operand is less than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+
+ //
+ // The operand signs are equal.
+ //
+ // If the sign of the operand is negative, then the sense of
+ // the comparison is reversed.
+ //
+
+ if (DoubleOperand1.Sign == 0) {
+
+ //
+ // Compare positive operand with positive operand.
+ //
+
+ if (DoubleOperand1.Exponent > DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Exponent < DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaHigh >
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaHigh <
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaLow >
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaLow <
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+
+ } else {
+
+ //
+ // Compare negative operand with negative operand.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.Exponent < DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaHigh >
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaHigh <
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaLow >
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaLow <
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Form the condition code result value using the comparison
+ // information and the compare function codes.
+ //
+
+ switch (Function & FP_FUNCTION_MASK) {
+ case CMPTEQ_FUNC :
+ CompareResult = CompareEqual;
+ DBGPRINT2("cmpteq\n");
+ break;
+
+ case CMPTLE_FUNC :
+ CompareResult = (CompareLess | CompareEqual);
+ DBGPRINT2("cmptle\n");
+ break;
+
+ case CMPTLT_FUNC :
+ CompareResult = CompareLess;
+ DBGPRINT2("cmptlt\n");
+ break;
+ }
+
+ //
+ // Set the result operand to 2.0 if the comparison is true,
+ // otherwise store 0.0.
+ //
+
+ if (CompareResult != FALSE) {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_TRUE,
+ ExceptionFrame,
+ TrapFrame);
+
+ } else {
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ FP_COMPARE_FALSE,
+ ExceptionFrame,
+ TrapFrame);
+ }
+ return TRUE;
+
+ //
+ // Floating convert single to double.
+ //
+ // Floating conversion to double is accomplished by forming a
+ // double floating operand and then normalizing and storing
+ // the result value.
+ //
+
+ case CVTST_FUNC_PROPER :
+ DBGPRINT2("cvtst\n");
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (SingleOperand1.Nan != FALSE) {
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = DOUBLE_MAXIMUM_EXPONENT;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = TRUE;
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = SingleOperand1.Infinity;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ //
+ // Floating convert double to single.
+ //
+ // Floating conversion to single is accomplished by forming a
+ // single floating operand and then normalizing and storing the
+ // result value.
+ //
+
+ case CVTTS_FUNC :
+ DBGPRINT2("cvtts\n");
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (DoubleOperand1.Nan != FALSE) {
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ SingleOperand1.Exponent = SINGLE_MAXIMUM_EXPONENT;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = TRUE;
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand1);
+ }
+
+ //
+ // Transform the double operand to single format.
+ //
+
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ StickyBits = DoubleOperand1.MantissaLow << (26 - (55 - 32));
+ SingleOperand1.Exponent = DoubleOperand1.Exponent +
+ SINGLE_EXPONENT_BIAS - DOUBLE_EXPONENT_BIAS;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = DoubleOperand1.Infinity;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ //
+ // Floating convert longword to quadword.
+ //
+ // Floating conversion from longword to quadword is accomplished by
+ // a repositioning of 32 bits of the operand, with sign extension.
+ //
+
+ case CVTLQ_FUNC :
+ DBGPRINT2("cvtlq\n");
+
+ //
+ // Pack floating register longword format into upper 32-bits
+ // by keeping bits 63..62 and 58..29, eliminating unused bits
+ // 61..59. Then right justify and sign extend the 32 bits into
+ // 64 bits.
+ //
+
+ Quadword = ((Quadword >> 62) << 62) | ((ULONGLONG)(Quadword << 5) >> 2);
+ KiSetRegisterValue(ContextBlock.Fc + 32,
+ Quadword >> 32,
+ ExceptionFrame,
+ TrapFrame);
+
+ return TRUE;
+
+ //
+ // Floating convert quadword to longword.
+ //
+ // Floating conversion from quadword to longword is accomplished by
+ // truncating the high order 32 bits of the quadword after checking
+ // for overflow.
+ //
+
+ case CVTQL_FUNC :
+ DBGPRINT2("cvtql\n");
+
+ return KiConvertQuadwordToLongword(&ContextBlock, Quadword);
+
+ //
+ // Floating convert quadword to single.
+ //
+ // Floating conversion to single is accomplished by forming a
+ // single floating operand and then normalizing and storing the
+ // result value.
+ //
+
+ case CVTQS_FUNC :
+ DBGPRINT2("cvtqs\n");
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Quadword < 0) {
+ SingleOperand1.Sign = 0x1;
+ Quadword = -Quadword;
+
+ } else {
+ SingleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (Quadword != 0) {
+ SingleOperand1.Exponent = SINGLE_EXPONENT_BIAS + 63;
+ while (Quadword > 0) {
+ Quadword <<= 1;
+ SingleOperand1.Exponent -= 1;
+ }
+
+ SingleOperand1.Mantissa = (LONG)((ULONGLONG)Quadword >> (64 - 26));
+ if (Quadword & (((ULONGLONG)1 << (64 - 26)) - 1)) {
+ StickyBits = 1;
+
+ } else {
+ StickyBits = 0;
+ }
+
+ } else {
+ SingleOperand1.Exponent = 0;
+ SingleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ //
+ // Floating convert quadword to double.
+ //
+ // Floating conversion to double is accomplished by forming a
+ // double floating operand and then normalizing and storing the
+ // result value.
+ //
+
+ case CVTQT_FUNC :
+ DBGPRINT2("cvtqt\n");
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Quadword < 0) {
+ DoubleOperand1.Sign = 0x1;
+ Quadword = -Quadword;
+
+ } else {
+ DoubleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (Quadword != 0) {
+ DoubleOperand1.Exponent = DOUBLE_EXPONENT_BIAS + 63;
+ while (Quadword > 0) {
+ Quadword <<= 1;
+ DoubleOperand1.Exponent -= 1;
+ }
+
+ DoubleOperand1.MantissaHigh = (LONG)((ULONGLONG)Quadword >> ((64 - 55) + 32));
+ DoubleOperand1.MantissaLow = (LONG)((ULONGLONG)Quadword >> (64 - 55));
+ if (Quadword & (((ULONGLONG)1 << (64 - 55)) - 1)) {
+ StickyBits = 1;
+
+ } else {
+ StickyBits = 0;
+ }
+
+ } else {
+ DoubleOperand1.MantissaHigh = 0;
+ DoubleOperand1.MantissaLow = 0;
+ DoubleOperand1.Exponent = 0;
+ StickyBits = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ //
+ // Floating convert double to quadword.
+ //
+ // Floating conversion to quadword is accomplished by forming
+ // a quadword value from a double floating value.
+ //
+
+ case CVTTQ_FUNC :
+ DBGPRINT2("cvttq\n");
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) ||
+ (DoubleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationQuadword(&ContextBlock, 0);
+ }
+
+ //
+ // Convert double to quadword and store the result value.
+ //
+
+ return KiNormalizeQuadword(&ContextBlock, &DoubleOperand1);
+ }
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ DBGPRINT("KiEmulateFloating: Exception\n");
+ return FALSE;
+ }
+
+ DBGPRINT("KiEmulateFloating: Invalid Instruction\n");
+ return FALSE;
+}
+
+ULONGLONG
+KiConvertSingleOperandToRegister (
+ IN ULONG SingleValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function converts a 32-bit single format floating point value to
+ the 64-bit, double format used within floating point registers. Alpha
+ floating point registers are 64-bits wide and single format values are
+ transformed to 64-bits when stored or loaded from memory.
+
+Arguments:
+
+ SingleValue - Supplies the 32-bit single operand value as an integer.
+
+Return Value:
+
+ The 64-bit register format operand value is returned as the function
+ value.
+
+--*/
+
+{
+ PDOUBLE_FORMAT DoubleFormat;
+ ULONGLONG Result;
+ PSINGLE_FORMAT SingleFormat;
+
+ SingleFormat = (PSINGLE_FORMAT)&SingleValue;
+ DoubleFormat = (PDOUBLE_FORMAT)&Result;
+
+ DoubleFormat->Sign = SingleFormat->Sign;
+ DoubleFormat->Mantissa = ((ULONGLONG)SingleFormat->Mantissa) << (52 - 23);
+ if (SingleFormat->Exponent == SINGLE_MAXIMUM_EXPONENT) {
+ DoubleFormat->Exponent = DOUBLE_MAXIMUM_EXPONENT;
+
+ } else if (SingleFormat->Exponent == SINGLE_MINIMUM_EXPONENT) {
+ DoubleFormat->Exponent = DOUBLE_MINIMUM_EXPONENT;
+
+ } else {
+ DoubleFormat->Exponent = SingleFormat->Exponent - SINGLE_EXPONENT_BIAS +
+ DOUBLE_EXPONENT_BIAS;
+ }
+ return Result;
+}
+
+ULONG
+KiConvertRegisterToSingleOperand (
+ IN ULONGLONG DoubleValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function converts the 64-bit, double format floating point value
+ used within the floating point registers to a 32-bit, single format
+ floating point value.
+
+Arguments:
+
+ DoubleValue - Supplies the 64-bit double operand value as an integer.
+
+Return Value:
+
+ The 32-bit register format operand value is returned as the function
+ value.
+
+--*/
+
+{
+ PDOUBLE_FORMAT DoubleFormat;
+ ULONG Result;
+ PSINGLE_FORMAT SingleFormat;
+
+ SingleFormat = (PSINGLE_FORMAT)&Result;
+ DoubleFormat = (PDOUBLE_FORMAT)&DoubleValue;
+
+ SingleFormat->Sign = (ULONG)DoubleFormat->Sign;
+ SingleFormat->Mantissa = (ULONG)(DoubleFormat->Mantissa >> (52 - 23));
+ if (DoubleFormat->Exponent == DOUBLE_MAXIMUM_EXPONENT) {
+ SingleFormat->Exponent = SINGLE_MAXIMUM_EXPONENT;
+
+ } else if (DoubleFormat->Exponent == DOUBLE_MINIMUM_EXPONENT) {
+ SingleFormat->Exponent = SINGLE_MINIMUM_EXPONENT;
+
+ } else {
+ SingleFormat->Exponent = (ULONG)(DoubleFormat->Exponent - DOUBLE_EXPONENT_BIAS +
+ SINGLE_EXPONENT_BIAS);
+ }
+ return Result;
+}
+
+BOOLEAN
+KiConvertQuadwordToLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN LONGLONG Quadword
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a quadword operand to a longword
+ result.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ Operand - Supplies the quadword operand value.
+
+Return Value:
+
+ If the quadword value would overflow the longword result and the invalid
+ trap is enabled then a value of FALSE is returned. Otherwise, the quadword
+ is truncated to a longword and a value of TRUE is returned.
+
+--*/
+
+{
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONGLONG ResultValue;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // Truncate the quadword to a longword and convert the longword integer
+ // to floating register longword integer format.
+ //
+
+ ResultValue = ((Quadword & (ULONGLONG)0xc0000000) << 32) |
+ ((Quadword & (ULONGLONG)0x3fffffff) << 29);
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if ((Quadword < (LONG)0x80000000) || (Quadword > (LONG)0x7fffffff)) {
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InvalidOperation = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInvalid = 1;
+ if (SoftwareFpcr->EnableInvalid != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.U64Value.LowPart = LOW_PART(ResultValue);
+ IeeeValue->Value.U64Value.HighPart = HIGH_PART(ResultValue);
+ return FALSE;
+ }
+
+ Fpcr->DisableInvalid = 1;
+ }
+
+ //
+ // Set the destination register value and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+ return TRUE;
+}
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero double
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quiet NaN or a properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = DoubleOperand1->Sign ^ DoubleOperand2->Sign;
+ ResultValueHigh = DOUBLE_INFINITY_VALUE_HIGH | (ResultSign << 31);
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriately signed
+ // infinity and return a value of TRUE.
+ //
+
+ if (DoubleOperand1->Infinity == FALSE) {
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->DivisionByZero = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusDivisionByZero = 1;
+ if (SoftwareFpcr->EnableDivisionByZero != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+
+ Fpcr->DisableDivisionByZero = 1;
+ }
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ MAKE_QUAD(ResultValueLow, ResultValueHigh),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero single
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quiet NaN or a properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValue;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = SingleOperand1->Sign ^ SingleOperand2->Sign;
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultSign << 31);
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriately signed
+ // infinity and return a value of TRUE.
+ //
+
+ if (SingleOperand1->Infinity == FALSE) {
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->DivisionByZero = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusDivisionByZero = 1;
+ if (SoftwareFpcr->EnableDivisionByZero != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+
+ Fpcr->DisableDivisionByZero = 1;
+ }
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ KiConvertSingleOperandToRegister(ResultValue),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+PFP_IEEE_VALUE
+KiInitializeIeeeValue (
+ IN PEXCEPTION_RECORD ExceptionRecord
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize an IEEE exception record.
+
+ N.B. The original hardware exception record should be overwritten with an
+ IEEE exception record only when it is known for certain that an IEEE
+ exception must be generated.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to the exception record.
+
+Return Value:
+
+ The address of the IEEE value portion of the exception record is returned
+ as the function value.
+
+--*/
+
+{
+
+ //
+ // Initialize the number of exception information parameters, zero
+ // the first parameter to indicate a hardware initiated exception,
+ // set the continuation address, and clear the IEEE exception value.
+ //
+
+ ExceptionRecord->NumberParameters = 6;
+ ExceptionRecord->ExceptionInformation[0] = 0;
+ ExceptionRecord->ExceptionInformation[1] =
+ ((ULONG)(ExceptionRecord)->ExceptionAddress) + 4;
+ ExceptionRecord->ExceptionInformation[2] = 0;
+ ExceptionRecord->ExceptionInformation[3] = 0;
+ ExceptionRecord->ExceptionInformation[4] = 0;
+ ExceptionRecord->ExceptionInformation[5] = 0;
+
+ //
+ // Return address of IEEE exception value.
+ //
+
+ return (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+}
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to determine whether an invalid operation
+ exception should be raised for a double compare operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForSignalNan - Supplies a boolean value that determines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, no operation is performed and a value
+ of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, perform no operation and return a
+ // value of TRUE.
+ //
+
+ if ((CheckForSignalNan == FALSE) ||
+ (DoubleSignalNan(DoubleOperand1) != FALSE) ||
+ (DoubleSignalNan(DoubleOperand2) != FALSE)) {
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InvalidOperation = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInvalid = 1;
+ if (SoftwareFpcr->EnableInvalid != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.CompareValue = FpCompareUnordered;
+ return FALSE;
+ }
+
+ Fpcr->DisableInvalid = 1;
+ }
+
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN for an invalid double floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForSignalNan - Supplies a boolean value that determines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quiet NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // If the second operand is a NaN, then compute a quiet NaN from its
+ // value. Otherwise, if the first operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quiet
+ // (real indefinite) NaN.
+ //
+
+ DBGPRINT("Operand1: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x%.8x\n",
+ DoubleOperand1->Infinity, DoubleOperand1->Nan,
+ DoubleOperand1->Sign,
+ DoubleOperand1->Exponent,
+ DoubleOperand1->MantissaHigh, DoubleOperand1->MantissaLow);
+ DBGPRINT("Operand2: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x%.8x\n",
+ DoubleOperand2->Infinity, DoubleOperand2->Nan,
+ DoubleOperand2->Sign,
+ DoubleOperand2->Exponent,
+ DoubleOperand2->MantissaHigh, DoubleOperand2->MantissaLow);
+
+ if (DoubleOperand2->Nan != FALSE) {
+ ResultValueLow = DoubleOperand2->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand2->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand2->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX_HIGH;
+ ResultValueHigh |= DoubleOperand2->Sign << 31;
+
+ } else if (DoubleOperand2->Nan != FALSE) {
+ ResultValueLow = DoubleOperand1->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand1->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand1->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX_HIGH;
+ ResultValueHigh |= DoubleOperand1->Sign << 31;
+
+ } else {
+ ResultValueLow = DOUBLE_QUIET_NAN_VALUE_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN_VALUE_HIGH;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ if ((CheckForSignalNan == FALSE) ||
+ (DoubleSignalNan(DoubleOperand1) != FALSE) ||
+ (DoubleSignalNan(DoubleOperand2) != FALSE)) {
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InvalidOperation = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInvalid = 1;
+ if (SoftwareFpcr->EnableInvalid != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+
+ Fpcr->DisableInvalid = 1;
+ }
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ MAKE_QUAD(ResultValueLow, ResultValueHigh),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN ULONGLONG ResultValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN for an invalid conversion to quadword.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultValue - Suplies a quadword result value to be stored.
+
+Return Value:
+
+ If the invalid operation trap is enabled, then a value of FALSE is
+ returned. Otherwise, an appropriate quadword value is stored as the
+ destination result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // If the invalid operation trap is enabled then store the proper
+ // exception code and exception flags and return a value of FALSE.
+ // Otherwise, store a quiet NaN as the destination result and return
+ // a value of TRUE.
+ //
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InvalidOperation = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInvalid = 1;
+ if (SoftwareFpcr->EnableInvalid != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.U64Value.LowPart = LOW_PART(ResultValue);
+ IeeeValue->Value.U64Value.HighPart = HIGH_PART(ResultValue);
+ return FALSE;
+ }
+
+ Fpcr->DisableInvalid = 1;
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForSignalNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN for an invalid single floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForSignalNan - Supplies a boolean value that determines whether the
+ operand values should be checked for a signaling NaN.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quiet NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValue;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // If the second operand is a NaN, then compute a quiet NaN from its
+ // value. Otherwise, if the first operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quiet
+ // (real indefinite) NaN.
+ //
+
+ if (SingleOperand2->Nan != FALSE) {
+ ResultValue = SingleOperand2->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue |= SingleOperand2->Sign << 31;
+
+ } else if (SingleOperand1->Nan != FALSE) {
+ ResultValue = SingleOperand1->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue |= SingleOperand1->Sign << 31;
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN_VALUE;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ if ((CheckForSignalNan == FALSE) ||
+ (SingleSignalNan(SingleOperand1) != FALSE) ||
+ (SingleSignalNan(SingleOperand2) != FALSE)) {
+
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InvalidOperation = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ return FALSE;
+ }
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInvalid = 1;
+ if (SoftwareFpcr->EnableInvalid != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+
+ Fpcr->DisableInvalid = 1;
+ }
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ KiConvertSingleOperandToRegister(ResultValue),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONGLONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a double floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <63:56> - zero
+ <55> - overflow bit
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONGLONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONGLONG ExceptionResult;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ ULONGLONG Mantissa;
+ BOOLEAN Overflow;
+ ULONGLONG ResultValue;
+ ULONG RoundBit;
+ PSW_FPCR SoftwareFpcr;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ DBGPRINT("KiNormalizeDouble: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x%.8x\n",
+ ResultOperand->Infinity, ResultOperand->Nan, ResultOperand->Sign,
+ ResultOperand->Exponent,
+ ResultOperand->MantissaHigh, ResultOperand->MantissaLow);
+ DBGPRINT("KiNormalizeDouble: StickyBits=%.16Lx\n", StickyBits);
+
+ if (ResultOperand->Infinity != FALSE) {
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ MAKE_QUAD(DOUBLE_INFINITY_VALUE_LOW,
+ DOUBLE_INFINITY_VALUE_HIGH |
+ (ResultOperand->Sign << 31)),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+ }
+
+ Mantissa = MAKE_QUAD(ResultOperand->MantissaLow,
+ ResultOperand->MantissaHigh);
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumulate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((Mantissa & ((ULONGLONG)1 << 55)) != 0) {
+ StickyBits |= (Mantissa & 0x1);
+ Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is nonzero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 54.
+ //
+
+ if (Mantissa != 0) {
+ while ((Mantissa & ((ULONGLONG)1 << 54)) == 0) {
+ Mantissa <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa two bits, set the round bit, and accumulate
+ // the other lost bit with the sticky bits. Round the result value using
+ // the mantissa, the round bit, and the sticky bits.
+ //
+
+ StickyBits |= (Mantissa & 0x1);
+ RoundBit = (ULONG)(Mantissa & 0x2);
+ Mantissa >>= 2;
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((Mantissa & 0x1) != 0)) {
+ Mantissa += 1;
+ }
+ }
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ Mantissa += 1;
+ }
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ Mantissa += 1;
+ }
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 53, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((Mantissa & ((ULONGLONG)1 << 53)) != 0) {
+ Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow sticky bits being set in the FPCR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has occurred.
+ // This results in both the inexact and underflow sticky bits being set
+ // in the FPCR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in the
+ // FPCR.
+ //
+
+ if (ResultOperand->Exponent >= DOUBLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValue = MAKE_QUAD(DOUBLE_INFINITY_VALUE_LOW,
+ DOUBLE_INFINITY_VALUE_HIGH |
+ (ResultOperand->Sign << 31));
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValue = MAKE_QUAD(DOUBLE_MAXIMUM_VALUE_LOW,
+ DOUBLE_MAXIMUM_VALUE_HIGH |
+ (ResultOperand->Sign << 31));
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValue = MAKE_QUAD(DOUBLE_INFINITY_VALUE_LOW,
+ DOUBLE_INFINITY_VALUE_HIGH);
+
+ } else {
+ ResultValue = MAKE_QUAD(DOUBLE_MAXIMUM_VALUE_LOW,
+ DOUBLE_MAXIMUM_VALUE_HIGH |
+ (1 << 31));
+ }
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValue = MAKE_QUAD(DOUBLE_INFINITY_VALUE_LOW,
+ DOUBLE_INFINITY_VALUE_HIGH |
+ (1 << 31));
+
+ } else {
+ ResultValue = MAKE_QUAD(DOUBLE_MAXIMUM_VALUE_LOW,
+ DOUBLE_MAXIMUM_VALUE_HIGH);
+ }
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 1536
+ // from the exponent.
+ //
+
+ ExceptionResult = Mantissa & (((ULONGLONG)1 << 52) - 1);
+ ExceptionResult |= (((ULONGLONG)ResultOperand->Exponent - 1536) << 52);
+ ExceptionResult |= ((ULONGLONG)ResultOperand->Sign << 63);
+
+ } else {
+
+ //
+ // After rounding if the exponent value is less than or equal to
+ // the minimum exponent value and the mantissa is nonzero, then
+ // underflow has occurred.
+ //
+
+ if ((ResultOperand->Exponent <= DOUBLE_MINIMUM_EXPONENT) &&
+ (Mantissa != 0)) {
+
+ //
+ // If the FPCR underflow to zero (denormal enable) control bit
+ // is set, then flush the denormalized result to zero and do
+ // not set an underflow status or generate an exception.
+ //
+
+ if ((ContextBlock->IeeeMode == FALSE) ||
+ (SoftwareFpcr->DenormalResultEnable == 0)) {
+ DBGPRINT("SoftwareFpcr->DenormalResultEnable == 0\n");
+ ResultValue = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+
+ } else {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift > 52) {
+ StickyBits = Mantissa;
+
+ //
+ // The denormalized result value will be zero. If the
+ // rounding mode is round toward plus infinity and the
+ // sign of the result is positive, or if the rounding
+ // mode is round toward minus infinity and the sign of
+ // the result is negative, then keep the denormalized
+ // result nonzero.
+ //
+
+ if (((ContextBlock->Round == ROUND_TO_PLUS_INFINITY) &&
+ (ResultOperand->Sign == 0x0)) ||
+ ((ContextBlock->Round == ROUND_TO_MINUS_INFINITY) &&
+ (ResultOperand->Sign != 0x0))) {
+ ResultValue = 1;
+
+ } else {
+ ResultValue = 0;
+ }
+
+ } else {
+ StickyBits |= Mantissa << (64 - DenormalizeShift);
+ ResultValue = Mantissa >> DenormalizeShift;
+ }
+ ResultValue |= (ULONGLONG)ResultOperand->Sign << 63;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 1536 to the exponent.
+ //
+
+ ExceptionResult = Mantissa & (((ULONGLONG)1 << 52) - 1);
+ ExceptionResult |= (((ULONGLONG)ResultOperand->Exponent + 1536) << 52);
+ ExceptionResult |= ((ULONGLONG)ResultOperand->Sign << 63);
+
+ //
+ // If the denormalized result is inexact, then set underflow.
+ // Otherwise, for exact denormals do not set the underflow
+ // sticky bit, but generate an underflow exception if that
+ // exception is enabled.
+ //
+
+ Overflow = FALSE;
+ Underflow = TRUE;
+ if ((StickyBits != 0) || (RoundBit != 0)) {
+ Inexact = TRUE;
+
+ } else {
+ Inexact = FALSE;
+ }
+ }
+
+ } else {
+
+ //
+ // If the result is zero, then set the proper sign for zero.
+ //
+
+ if (Mantissa == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValue = Mantissa & (((ULONGLONG)1 << 52) - 1);
+ ResultValue |= (ULONGLONG)ResultOperand->Exponent << 52;
+ ResultValue |= (ULONGLONG)ResultOperand->Sign << 63;
+ if ((StickyBits != 0) || (RoundBit != 0)) {
+ Inexact = TRUE;
+
+ } else {
+ Inexact = FALSE;
+ }
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+
+ if (Overflow != FALSE) {
+ Fpcr->Overflow = 1;
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ return FALSE;
+ }
+ SoftwareFpcr->StatusOverflow = 1;
+ SoftwareFpcr->StatusInexact = 1;
+ if (SoftwareFpcr->EnableOverflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ IeeeValue->Value.Fp64Value.W[0] = LOW_PART(ExceptionResult);
+ IeeeValue->Value.Fp64Value.W[1] = HIGH_PART(ExceptionResult);
+ return FALSE;
+
+ } else if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp64Value.W[0] = LOW_PART(ExceptionResult);
+ IeeeValue->Value.Fp64Value.W[1] = HIGH_PART(ExceptionResult);
+ return FALSE;
+ }
+
+ Fpcr->DisableOverflow = 1;
+ Fpcr->DisableInexact = 1;
+
+ } else if (Underflow != FALSE) {
+ if (Inexact != FALSE) {
+ Fpcr->Underflow = 1;
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode != FALSE) {
+ SoftwareFpcr->StatusUnderflow = 1;
+ SoftwareFpcr->StatusInexact = 1;
+ }
+ }
+ if (ContextBlock->IeeeMode == FALSE) {
+ if (ContextBlock->UnderflowEnable != FALSE) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ return FALSE;
+ }
+
+ } else if (SoftwareFpcr->EnableUnderflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ IeeeValue->Value.Fp64Value.W[0] = LOW_PART(ExceptionResult);
+ IeeeValue->Value.Fp64Value.W[1] = HIGH_PART(ExceptionResult);
+ return FALSE;
+
+ } else if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp64Value.W[0] = LOW_PART(ExceptionResult);
+ IeeeValue->Value.Fp64Value.W[1] = HIGH_PART(ExceptionResult);
+ return FALSE;
+ }
+
+ if (Inexact != FALSE) {
+ Fpcr->DisableUnderflow = 1;
+ Fpcr->DisableInexact = 1;
+ }
+
+ } else if (Inexact != FALSE) {
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode != FALSE) {
+ SoftwareFpcr->StatusInexact = 1;
+ if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp64Value.W[0] = LOW_PART(ResultValue);
+ IeeeValue->Value.Fp64Value.W[1] = HIGH_PART(ResultValue);
+ return FALSE;
+ }
+
+ Fpcr->DisableInexact = 1;
+ }
+ }
+
+ //
+ // Set the destination register value and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a result value to a quadword result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and an overflow bit of zero.
+ As called above, the guard bit and the round bit are also zero.
+ The result format is:
+
+ <63:55> - zero
+ <54 - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ There are no sticky bits.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ LONGLONG ExponentShift;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONGLONG Mantissa;
+ BOOLEAN Overflow;
+ ULONGLONG ResultValue;
+ ULONG RoundBit;
+ ULONGLONG StickyBits;
+ PSW_FPCR SoftwareFpcr;
+
+ //
+ // Subtract out the exponent bias and divide the cases into right
+ // and left shifts.
+ //
+
+ ExponentShift = ResultOperand->Exponent - DOUBLE_EXPONENT_BIAS;
+ DBGPRINT("KiNormalizeQuadword: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x%.8x\n",
+ ResultOperand->Infinity, ResultOperand->Nan, ResultOperand->Sign,
+ ResultOperand->Exponent,
+ ResultOperand->MantissaHigh, ResultOperand->MantissaLow);
+ DBGPRINT(".. ExponentShift = %d\n", ExponentShift);
+ Mantissa = MAKE_QUAD(ResultOperand->MantissaLow,
+ ResultOperand->MantissaHigh);
+
+ if (ExponentShift < 54) {
+
+ //
+ // The integer result value is less than 2**54 and so a right shift
+ // must be performed.
+ //
+
+ ExponentShift = 54 - ExponentShift;
+ if (ExponentShift < 64) {
+ StickyBits = Mantissa << (64 - ExponentShift);
+ ResultValue = Mantissa >> ExponentShift;
+
+ } else {
+ StickyBits = Mantissa;
+ ResultValue = 0;
+ }
+ Overflow = FALSE;
+
+ } else if (ExponentShift > 54) {
+ ExponentShift -= 54;
+
+ //
+ // The integer result value is 2**54 or greater and so a left shift
+ // must be performed. If the unsigned integer result value is 2**64
+ // or greater, then overflow has occurred and store the low order 64
+ // bits of the true result.
+ //
+
+ if (ExponentShift < (64 - 54)) {
+ StickyBits = Mantissa >> (64 - ExponentShift);
+ ResultValue = Mantissa << ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+ StickyBits = 0;
+ if (ExponentShift < 64) {
+ ResultValue = Mantissa << ExponentShift;
+
+ } else {
+ ResultValue = 0;
+ }
+ Overflow = TRUE;
+ }
+
+ } else {
+ StickyBits = 0;
+ ResultValue = Mantissa;
+ Overflow = FALSE;
+ }
+ DBGPRINT(".. ResultValue = %.16Lx, StickyBits = %.16Lx\n",
+ ResultValue, StickyBits);
+
+ //
+ // Round the result value using the mantissa, the round bit, and the
+ // sticky bits.
+ //
+
+ RoundBit = (ULONG)(StickyBits >> 63);
+ StickyBits <<= 1;
+ DBGPRINT(".. ResultValue = %.16Lx, StickyBits = %.16Lx, RoundBit = %lx\n",
+ ResultValue, StickyBits, RoundBit);
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultValue & 0x1) != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ }
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ break;
+ }
+
+ //
+ // If the result value is positive and the result is negative, then
+ // overflow has occurred. Otherwise, negate the result value and
+ // check if the result is negative. If the result is positive, then
+ // overflow has occurred.
+ //
+
+ if (ResultOperand->Sign == 0) {
+ if ((LONGLONG)ResultValue < 0) {
+ Overflow = TRUE;
+ }
+
+ } else {
+ ResultValue = -(LONGLONG)ResultValue;
+ if ((LONGLONG)ResultValue > 0) {
+ Overflow = TRUE;
+ }
+ }
+ DBGPRINT(".. ResultValue = %.16Lx, StickyBits = %.16Lx\n",
+ ResultValue, StickyBits);
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ return KiInvalidOperationQuadword(ContextBlock, ResultValue);
+
+ } else if ((StickyBits | RoundBit) != 0) {
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode != FALSE) {
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+ SoftwareFpcr->StatusInexact = 1;
+ if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+ IeeeValue->Value.U64Value.LowPart = LOW_PART(ResultValue);
+ IeeeValue->Value.U64Value.HighPart = HIGH_PART(ResultValue);
+ return FALSE;
+ }
+
+ Fpcr->DisableInexact = 1;
+ }
+ }
+
+ //
+ // Set the destination register value and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a single floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <31:27> - zero
+ <26> - overflow bit
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG ExceptionResult;
+ PFPCR Fpcr;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ ULONG Mantissa;
+ BOOLEAN Overflow;
+ ULONG ResultValue;
+ ULONG RoundBit;
+ PSW_FPCR SoftwareFpcr;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ DBGPRINT("KiNormalizeSingle: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x\n",
+ ResultOperand->Infinity, ResultOperand->Nan, ResultOperand->Sign,
+ ResultOperand->Exponent, ResultOperand->Mantissa);
+ DBGPRINT("KiNormalizeSingle: StickyBits=%.8lx\n", StickyBits);
+
+ if (ResultOperand->Infinity != FALSE) {
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31);
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ KiConvertSingleOperandToRegister(ResultValue),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+ }
+
+ Mantissa = ResultOperand->Mantissa;
+ Fpcr = (PFPCR)&ContextBlock->TrapFrame->Fpcr;
+ SoftwareFpcr = ContextBlock->SoftwareFpcr;
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumulate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((Mantissa & (1 << 26)) != 0) {
+ StickyBits |= (Mantissa & 0x1);
+ Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is nonzero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 25.
+ //
+
+ if (Mantissa != 0) {
+ while ((Mantissa & ((ULONGLONG)1 << 25)) == 0) {
+ Mantissa <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa two bits, set the round bit, and accumulate
+ // the other lost bit with the sticky bits. Round the result value using
+ // the mantissa, the round bit, and the sticky bits.
+ //
+
+ StickyBits |= (Mantissa & 0x1);
+ RoundBit = (Mantissa & 0x2);
+ Mantissa >>= 2;
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((Mantissa & 0x1) != 0)) {
+ Mantissa += 1;
+ }
+ }
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ Mantissa += 1;
+ }
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ Mantissa += 1;
+ }
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 24, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((Mantissa & (1 << 24)) != 0) {
+ Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow sticky bits being set in the FPCR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has occurred.
+ // This results in both the inexact and underflow sticky bits being set
+ // in the FPCR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in the
+ // FPCR.
+ //
+
+ if (ResultOperand->Exponent >= SINGLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValue = SINGLE_MAXIMUM_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValue = SINGLE_INFINITY_VALUE;
+
+ } else {
+ ResultValue = (ULONG)(SINGLE_MAXIMUM_VALUE | (1 << 31));
+ }
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValue = (ULONG)(SINGLE_INFINITY_VALUE | (1 << 31));
+
+ } else {
+ ResultValue = SINGLE_MAXIMUM_VALUE;
+ }
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 192
+ // from the exponent.
+ //
+
+ ExceptionResult = Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent - 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ } else {
+
+ //
+ // After rounding if the exponent value is less than or equal to
+ // the minimum exponent value and the mantissa is nonzero, then
+ // underflow has occurred.
+ //
+
+ if ((ResultOperand->Exponent <= SINGLE_MINIMUM_EXPONENT) &&
+ (Mantissa != 0)) {
+
+ //
+ // If the FPCR underflow to zero (denormal enable) control bit
+ // is set, then flush the denormalized result to zero and do
+ // not set an underflow status or generate an exception.
+ //
+
+ if ((ContextBlock->IeeeMode == FALSE) ||
+ (SoftwareFpcr->DenormalResultEnable == 0)) {
+ DBGPRINT("SoftwareFpcr->DenormalResultEnable == 0\n");
+ ResultValue = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+
+ } else {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift > 23) {
+ StickyBits = Mantissa;
+
+ //
+ // The denormalized result value will be zero. If the
+ // rounding mode is round toward plus infinity and the
+ // sign of the result is positive, or if the rounding
+ // mode is round toward minus infinity and the sign of
+ // the result is negative, then keep the denormalized
+ // result nonzero.
+ //
+
+ if (((ContextBlock->Round == ROUND_TO_PLUS_INFINITY) &&
+ (ResultOperand->Sign == 0x0)) ||
+ ((ContextBlock->Round == ROUND_TO_MINUS_INFINITY) &&
+ (ResultOperand->Sign != 0x0))) {
+ ResultValue = 1;
+
+ } else {
+ ResultValue = 0;
+ }
+
+ } else {
+ StickyBits |= Mantissa << (32 - DenormalizeShift);
+ ResultValue = Mantissa >> DenormalizeShift;
+ }
+ ResultValue |= ResultOperand->Sign << 31;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 192 to the exponent.
+ //
+
+ ExceptionResult = Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent + 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ //
+ // If the denormalized result is inexact, then set underflow.
+ // Otherwise, for exact denormals do not set the underflow
+ // sticky bit, but generate an underflow exception if that
+ // exception is enabled.
+ //
+
+ Overflow = FALSE;
+ Underflow = TRUE;
+ if ((StickyBits != 0) || (RoundBit != 0)) {
+ Inexact = TRUE;
+
+ } else {
+ Inexact = FALSE;
+ }
+ }
+
+ } else {
+
+ //
+ // If the result is zero, then set the proper sign for zero.
+ //
+
+ if (Mantissa == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValue = Mantissa & ((1 << 23) - 1);
+ ResultValue |= (ResultOperand->Exponent << 23);
+ ResultValue |= (ResultOperand->Sign << 31);
+ if ((StickyBits != 0) || (RoundBit != 0)) {
+ Inexact = TRUE;
+
+ } else {
+ Inexact = FALSE;
+ }
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ IeeeValue = KiInitializeIeeeValue(ExceptionRecord);
+
+ if (Overflow != FALSE) {
+ Fpcr->Overflow = 1;
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode == FALSE) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ return FALSE;
+ }
+ SoftwareFpcr->StatusOverflow = 1;
+ SoftwareFpcr->StatusInexact = 1;
+ if (SoftwareFpcr->EnableOverflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ return FALSE;
+
+ } else if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ return FALSE;
+ }
+
+ Fpcr->DisableOverflow = 1;
+ Fpcr->DisableInexact = 1;
+
+ } else if (Underflow != FALSE) {
+ if (Inexact != FALSE) {
+ Fpcr->Underflow = 1;
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode != FALSE) {
+ SoftwareFpcr->StatusUnderflow = 1;
+ SoftwareFpcr->StatusInexact = 1;
+ }
+ }
+ if (ContextBlock->IeeeMode == FALSE) {
+ if (ContextBlock->UnderflowEnable != FALSE) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ return FALSE;
+ }
+
+ } else if (SoftwareFpcr->EnableUnderflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ return FALSE;
+
+ } else if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ return FALSE;
+ }
+
+ if (Inexact != FALSE) {
+ Fpcr->DisableUnderflow = 1;
+ Fpcr->DisableInexact = 1;
+ }
+
+ } else if (Inexact != FALSE) {
+ Fpcr->InexactResult = 1;
+ Fpcr->SummaryBit = 1;
+ if (ContextBlock->IeeeMode != FALSE) {
+ SoftwareFpcr->StatusInexact = 1;
+ if (SoftwareFpcr->EnableInexact != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+
+ Fpcr->DisableInexact = 1;
+ }
+ }
+
+ //
+ // Set the destination register value and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fc + 32,
+ KiConvertSingleOperandToRegister(ResultValue),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ return TRUE;
+}
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a double floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <63:55> - zero
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONGLONG Value;
+ ULONG Value1;
+ ULONG Value2;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value = KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+ Value1 = (ULONG)Value;
+ Value2 = (ULONG)(Value >> 32);
+
+ DoubleOperand->Sign = Value2 >> 31;
+ DoubleOperand->Exponent = (Value2 >> (52 - 32)) & 0x7ff;
+ DoubleOperand->MantissaHigh = Value2 & 0xfffff;
+ DoubleOperand->MantissaLow = Value1;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a NaN or an infinity. Otherwise if the exponent is the smallest
+ // possible value and the mantissa is nonzero, then the number is
+ // denormalized. Otherwise the number is finite and normal.
+ //
+
+ if (DoubleOperand->Exponent == DOUBLE_MAXIMUM_EXPONENT) {
+ DoubleOperand->Normal = FALSE;
+ if ((DoubleOperand->MantissaLow | DoubleOperand->MantissaHigh) != 0) {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = TRUE;
+
+ } else {
+ DoubleOperand->Infinity = TRUE;
+ DoubleOperand->Nan = FALSE;
+ }
+
+ } else {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = FALSE;
+ DoubleOperand->Normal = TRUE;
+ if (DoubleOperand->Exponent == DOUBLE_MINIMUM_EXPONENT) {
+ if ((DoubleOperand->MantissaHigh | DoubleOperand->MantissaLow) != 0) {
+ DoubleOperand->Normal = FALSE;
+ DoubleOperand->Exponent += 1;
+ while ((DoubleOperand->MantissaHigh & (1 << 20)) == 0) {
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 1) |
+ (DoubleOperand->MantissaLow >> 31);
+ DoubleOperand->MantissaLow <<= 1;
+ DoubleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ DoubleOperand->MantissaHigh |= (1 << 20);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 2) | (DoubleOperand->MantissaLow >> 30);
+ DoubleOperand->MantissaLow <<= 2;
+ DBGPRINT("KiUnpackDouble: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x%.8x\n",
+ DoubleOperand->Infinity, DoubleOperand->Nan, DoubleOperand->Sign,
+ DoubleOperand->Exponent,
+ DoubleOperand->MantissaHigh, DoubleOperand->MantissaLow);
+
+ return;
+}
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a single floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <31:26> - zero
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Value;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value = KiConvertRegisterToSingleOperand(
+ KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame));
+
+ SingleOperand->Sign = Value >> 31;
+ SingleOperand->Exponent = (Value >> 23) & 0xff;
+ SingleOperand->Mantissa = Value & 0x7fffff;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a NaN or an infinity. Otherwise if the exponent is the smallest
+ // possible value and the mantissa is nonzero, then the number is
+ // denormalized. Otherwise the number is finite and normal.
+ //
+
+ if (SingleOperand->Exponent == SINGLE_MAXIMUM_EXPONENT) {
+ SingleOperand->Normal = FALSE;
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = TRUE;
+
+ } else {
+ SingleOperand->Infinity = TRUE;
+ SingleOperand->Nan = FALSE;
+ }
+
+ } else {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = FALSE;
+ SingleOperand->Normal = TRUE;
+ if (SingleOperand->Exponent == SINGLE_MINIMUM_EXPONENT) {
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Normal = FALSE;
+ SingleOperand->Exponent += 1;
+ while ((SingleOperand->Mantissa & (1 << 23)) == 0) {
+ SingleOperand->Mantissa <<= 1;
+ SingleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ SingleOperand->Mantissa |= (1 << 23);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ SingleOperand->Mantissa <<= 2;
+ DBGPRINT("KiUnpackSingle: Inf=%d NaN=%d Sign=%d Exponent=%d Mantissa=%.8x\n",
+ SingleOperand->Infinity, SingleOperand->Nan, SingleOperand->Sign,
+ SingleOperand->Exponent, SingleOperand->Mantissa);
+ return;
+}
diff --git a/private/ntos/ke/alpha/flush.c b/private/ntos/ke/alpha/flush.c
new file mode 100644
index 000000000..44dc7fea5
--- /dev/null
+++ b/private/ntos/ke/alpha/flush.c
@@ -0,0 +1,548 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ flush.c
+
+Abstract:
+
+ This module implements Alpha AXP machine dependent kernel functions to flush
+ the data and instruction caches and to flush I/O buffers.
+
+Author:
+
+ David N. Cutler (davec) 26-Apr-1990
+ Joe Notarangelo 29-Nov-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+
+//
+// Define forward referenced prototyes.
+//
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ );
+
+
+VOID
+KeSweepDcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the data cache on all processors that are currently
+ running threads which are children of the current process or flushes the
+ data cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which data
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepDcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, SweepDcache);
+
+#endif
+
+ //
+ // Sweep the data cache on the current processor.
+ //
+
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // data cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the data cache on target
+ processors.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed
+
+ Parameter1 - Parameter3 - not used
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the data cache on the current processor and clear the sweep
+ // data cache packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, SweepDcache);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache on all processors that are
+ currently running threads which are children of the current process or
+ flushes the instruction cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, SweepIcache);
+
+#endif
+
+ //
+ // Sweep the instruction cache on the current processor.
+ //
+
+ KiImb();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // instruction cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the instruction cache on
+ target processors.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed
+
+ Parameter1 - Parameter3 - not used
+
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the instruction cache on the current processor and clear
+ // the sweep instruction cache packet address to signal the source
+ // to continue.
+ //
+
+#if !defined(NT_UP)
+
+ KiImb();
+ KiIpiSignalPacketDone(SignalDone);
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, SweepIcache);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcacheRange (
+ IN BOOLEAN AllProcessors,
+ IN PVOID BaseAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the an range of virtual addresses from the primary
+ instruction cache on all processors that are currently running threads
+ which are children of the current process or flushes the range of virtual
+ addresses from the primary instruction cache on all processors in the host
+ configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KeSweepIcache( AllProcessors );
+
+ return;
+}
+
+VOID
+KeFlushIoBuffers (
+ IN PMDL Mdl,
+ IN BOOLEAN ReadOperation,
+ IN BOOLEAN DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the I/O buffer specified by the memory descriptor
+ list from the data cache on all processors.
+
+ Alpha requires that caches be coherent with respect to I/O. All that
+ this routine needs to do is execute a memory barrier on the current
+ processor. However, in order to maintain i-stream coherency, all
+ processors must execute the IMB PAL call in the case of page reads.
+ Thus, all processors are IPI'd to perform the IMB for any flush
+ that is a DmaOperation, a ReadOperation, and an MDL_IO_PAGE_READ.
+
+
+Arguments:
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ KiMb();
+
+ //
+ // If the operation is a DMA operation, then check if the flush
+ // can be avoided because the host system supports the right set
+ // of cache coherency attributes. Otherwise, the flush can also
+ // be avoided if the operation is a programmed I/O and not a page
+ // read.
+ //
+
+ if (DmaOperation != FALSE) {
+ if (ReadOperation != FALSE) {
+ if ((KiDmaIoCoherency & DMA_READ_ICACHE_INVALIDATE) != 0) {
+
+ ASSERT((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0);
+
+ return;
+
+ } else if (((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) &&
+ ((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0)) {
+ return;
+ }
+
+ } else if ((KiDmaIoCoherency & DMA_WRITE_DCACHE_SNOOP) != 0) {
+ return;
+ }
+
+ } else if ((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) {
+ return;
+ }
+
+ //
+ // Either the operation is a DMA operation and the right coherency
+ // atributes are not supported by the host system, or the operation
+ // is programmed I/O and a page read.
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the flush I/O
+ // parameters to the target processors, if any, for execution.
+ //
+
+#if !defined(NT_UP)
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushIoBuffersTarget,
+ (PVOID)Mdl,
+ (PVOID)((ULONG)ReadOperation),
+ (PVOID)((ULONG)DmaOperation));
+ }
+
+#endif
+
+ //
+ // Flush I/O buffer on current processor.
+ //
+
+ HalFlushIoBuffers(Mdl, ReadOperation, DmaOperation);
+
+ //
+ // Wait until all target processors have finished flushing the
+ // specified I/O buffer.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing an I/O buffer on target
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the specified I/O buffer on the current processor.
+ //
+
+ HalFlushIoBuffers((PMDL)Mdl,
+ (BOOLEAN)((ULONG)ReadOperation),
+ (BOOLEAN)((ULONG)DmaOperation));
+
+ KiIpiSignalPacketDone(SignalDone);
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushIoBuffers);
+
+ return;
+}
+#endif
diff --git a/private/ntos/ke/alpha/flushtb.c b/private/ntos/ke/alpha/flushtb.c
new file mode 100644
index 000000000..cee2f081c
--- /dev/null
+++ b/private/ntos/ke/alpha/flushtb.c
@@ -0,0 +1,566 @@
+/*++
+
+Copyright (c) 1992-1993 Microsoft Corporation
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ flushtb.c
+
+Abstract:
+
+ This module implements machine dependent functions to flush the
+ translation buffers and synchronize PIDs in an Alpha AXP MP system.
+
+ N.B. This module contains only MP versions of the TB flush routines.
+ The UP versions are macros in ke.h
+ KeFlushEntireTb remains a routine for the UP system since it is
+ exported from the kernel for backwards compatibility.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+ Joe Notarangelo 29-Nov-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ );
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ );
+
+#if defined(NT_UP)
+#undef KeFlushEntireTb
+#endif
+
+
+VOID
+KeFlushEntireTb (
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the entire translation buffer (TB) on all
+ processors that are currently running threads which are children
+ of the current process or flushes the entire translation buffer
+ on all processors in the host configuration.
+
+Arguments:
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+#if defined(NT_UP)
+ __tbia();
+#else
+
+ if (AllProcessors != FALSE) {
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ //
+ // Acquire context swap lock to prevent any threads from
+ // changing processors.
+ //
+ KiLockContextSwap(&OldIrql);
+
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushEntireTbTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushEntireTb);
+
+ //
+ // Flush TB on current processor.
+ //
+
+ // KeFlushCurrentTb();
+ __tbia();
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing the entire TB.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - not used
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the entire TB on the current processor
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+
+ // KeFlushCurrentTb();
+ __tbia();
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushEntireTb);
+
+ return;
+}
+
+
+
+VOID
+KeFlushMultipleTb (
+ IN ULONG Number,
+ IN PVOID *Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE *PtePointer OPTIONAL,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes multiple entries from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a multiple entries from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies an optional pointer to an array of pointers to
+ page table entries that receive the specified page table entry
+ value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+ ULONG Index;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors and send the flush multiple
+ // parameters to the target processors, if any, for execution.
+ //
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ //
+ // Acquire context swap lock to prevent any threads from
+ // changing processors.
+ //
+ KiLockContextSwap(&OldIrql);
+
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ //
+ // If a page table entry address address is specified, then set the
+ // specified page table entries to the specific value.
+ //
+
+ if (ARGUMENT_PRESENT(PtePointer)) {
+ for (Index = 0; Index < Number; Index += 1) {
+ *PtePointer[Index] = PteValue;
+ }
+ }
+
+ TargetProcessors &= PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushMultipleTbTarget,
+ (PVOID)Number,
+ (PVOID)Virtual,
+ NULL);
+ }
+
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushMultipleTb);
+
+ //
+ // Flush the specified entries from the TB on the current processor.
+ //
+
+ KiFlushMultipleTb(Invalid, &Virtual[0], Number);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // If the context swap lock was acquired, release it.
+ //
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+ return;
+}
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing multiple TB entries.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Pid - Supplies the PID of the TB entries to flush.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ PVOID Array[FLUSH_MULTIPLE_MAXIMUM];
+
+ //
+ // Flush multiple entries from the TB on the current processor
+ //
+
+ //
+ // Capture the virtual addresses that are to be flushed from the TB
+ // on the current processor and clear the packet address.
+ //
+
+ for (Index = 0; Index < (ULONG)Number; Index += 1) {
+ Array[Index] = ((PVOID *)(Virtual))[Index];
+ }
+
+ KiIpiSignalPacketDone(SignalDone);
+
+ //
+ // Flush the specified virtual addresses from the TB on the current
+ // processor.
+ //
+
+ KiFlushMultipleTb(TRUE, &Array[0], (ULONG)Number);
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushMultipleTb);
+
+ return;
+}
+
+
+HARDWARE_PTE
+KeFlushSingleTb (
+ IN PVOID Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE PtePointer,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a single entry from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a single entry from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies a pointer to the page table entry which
+ receives the specified value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ HARDWARE_PTE OldPte;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors and send the flush single
+ // paramters to the target processors, if any, for execution.
+ //
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ //
+ // Acquire context swap lock to prevent any threads from
+ // changing processors.
+ //
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ //
+ // Capture the previous contents of the page table entry and set the
+ // page table entry to the new value.
+ //
+
+ OldPte = *PtePointer;
+ *PtePointer = PteValue;
+
+ TargetProcessors &= PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushSingleTbTarget,
+ (PVOID)Virtual,
+ NULL,
+ NULL);
+ }
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushSingleTb);
+
+ //
+ // Flush the specified entry from the TB on the current processor.
+ //
+
+// KiFlushSingleTb(Invalid, Virtual);
+ __tbis(Virtual);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+ //
+ // return the previous page table entry value.
+ //
+
+ return OldPte;
+}
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing a single TB entry.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ RequestPacket - Supplies a pointer to a flush single TB packet address.
+
+ Pid - Supplies the PID of the TB entries to flush.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush a single entry form the TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+// KiFlushSingleTb(TRUE, Virtual);
+ __tbis(Virtual);
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushSingleTb);
+
+ return;
+}
+#endif // !defined(NT_UP)
diff --git a/private/ntos/ke/alpha/genalpha.c b/private/ntos/ke/alpha/genalpha.c
new file mode 100644
index 000000000..fd0c346ad
--- /dev/null
+++ b/private/ntos/ke/alpha/genalpha.c
@@ -0,0 +1,1850 @@
+/*++
+ Copyright (c) 1990 Microsoft Corporation
+ Copyright (c) 1992, 1993 Digital Equipment Corporation
+
+Module Name:
+
+ genalpha.c
+
+Abstract:
+
+ This module implements a program which generates ALPHA machine dependent
+ structure offset definitions for kernel structures that are accessed in
+ assembly code.
+
+Author:
+
+ David N. Cutler (davec) 27-Mar-1990
+ Joe Notarangelo 26-Mar-1992
+
+Revision History:
+
+ Thomas Van Baak (tvb) 10-Jul-1992
+
+ Modified CONTEXT, TRAP, and EXCEPTION frames according to the new
+ Alpha calling standard.
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define HEADER_FILE
+#include "excpt.h"
+#include "ntdef.h"
+#include "ntkeapi.h"
+#include "ntalpha.h"
+#include "ntimage.h"
+#include "ntseapi.h"
+#include "ntobapi.h"
+#include "ntlpcapi.h"
+#include "ntioapi.h"
+#include "ntmmapi.h"
+#include "ntldr.h"
+#include "ntpsapi.h"
+#include "ntexapi.h"
+#include "ntnls.h"
+#include "nturtl.h"
+#include "ntcsrmsg.h"
+#include "ntcsrsrv.h"
+#include "ntxcapi.h"
+#include "arc.h"
+#include "ntstatus.h"
+#include "kxalpha.h"
+#include "stdarg.h"
+#include "setjmp.h"
+
+//
+// Define architecture specific generation macros.
+//
+
+#define genAlt(Name, Type, Member) \
+ dumpf("#define " #Name " 0x%lx\n", OFFSET(Type, Member))
+
+#define genCom(Comment) \
+ dumpf("\n"); \
+ dumpf("//\n"); \
+ dumpf("// " Comment "\n"); \
+ dumpf("//\n"); \
+ dumpf("\n")
+
+#define genDef(Prefix, Type, Member) \
+ dumpf("#define " #Prefix #Member " 0x%lx\n", OFFSET(Type, Member))
+
+#define genVal(Name, Value) \
+ dumpf("#define " #Name " 0x%lx\n", Value)
+
+#define genSpc() dumpf("\n");
+
+//
+// Define member offset computation macro.
+//
+
+#define OFFSET(type, field) ((LONG)(&((type *)0)->field))
+
+FILE *KsAlpha;
+FILE *HalAlpha;
+
+//
+// EnableInc(a) - Enable output to go to specified include file
+//
+
+#define EnableInc(a) OutputEnabled |= a;
+
+//
+// DisableInc(a) - Disable out from going to specified include file
+//
+
+#define DisableInc(a) OutputEnabled &= ~a;
+
+ULONG OutputEnabled;
+
+#define KSALPHA 0x1
+#define HALALPHA 0x2
+
+#define KERNEL KSALPHA
+#define HAL HALALPHA
+
+VOID
+GenerateCallPalNames( VOID );
+
+VOID dumpf( const char *format, ... );
+
+//
+// This routine returns the bit number right to left of a field.
+//
+
+LONG
+t (
+ IN ULONG z
+ )
+
+{
+ LONG i;
+
+ for (i = 0; i < 32; i += 1) {
+ if ((z >> i) & 1) {
+ break;
+ }
+ }
+ return i;
+}
+
+//
+// This routine returns the first bit set of a longword
+// (assumes at least one bit set )
+
+LONG
+v (
+ IN ULONG m
+ )
+{
+ LONG i;
+
+ for( i=0; i < 32; i++ ){
+ if( (m & (1 << i)) != 0 ){
+ goto done; /* break was not working */
+ }
+ }
+
+ done:
+ return i;
+}
+
+
+
+//
+// This program generates the ALPHA machine dependent assembler offset
+// definitions.
+//
+
+VOID
+main (argc, argv)
+ int argc;
+ char *argv[];
+{
+
+ char *outName;
+ LONG Bo;
+ union {
+ ULONG foo;
+ } x;
+ union {
+ ULONG mask;
+ HARDWARE_PTE p;
+ } pte;
+ union {
+ ULONG mask;
+ PSR p;
+ } psr;
+ union {
+ ULONG mask;
+ IE i;
+ } ie;
+ union {
+ ULONG mask;
+ MCHK_STATUS m;
+ } mchk;
+ union {
+ ULONG mask;
+ MCES m;
+ } mces;
+ union {
+ ULONG mask;
+ EXC_SUM e;
+ } excsum;
+
+
+ //
+ // Create files for output.
+ //
+
+ outName = (argc >= 2) ? argv[1] : "\\nt\\public\\sdk\\inc\\ksalpha.h";
+ KsAlpha = fopen( outName, "w" );
+ if( KsAlpha == NULL ){
+ fprintf( stderr, "GENALPHA: Cannot open %s for writing.\n", outName );
+ perror( "GENALPHA" );
+ exit(1);
+ }
+ fprintf( stderr, "GENALPHA: Writing %s header file.\n", outName );
+
+ outName = (argc >= 3) ? argv[2] : "\\nt\\private\\ntos\\inc\\halalpha.h";
+ HalAlpha = fopen( outName, "w" );
+ if( HalAlpha == NULL ){
+ fprintf( stderr, "GENALPHA: Cannot open %s for writing.\n", outName );
+ perror( "GENALPHA" );
+ exit(1);
+ }
+ fprintf( stderr, "GENALPHA: Writing %s header file.\n", outName );
+
+ //
+ // Include statement for ALPHA architecture static definitions.
+ //
+
+ EnableInc( KSALPHA | HALALPHA );
+ dumpf("#include \"kxalpha.h\"\n");
+ DisableInc( HALALPHA );
+
+ //
+ // Include architecture independent definitions.
+ //
+
+#include "..\genxx.inc"
+
+ //
+ // Generate architecture dependent definitions.
+ //
+ // Processor control register structure definitions.
+ //
+
+ EnableInc(HAL);
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Processor Control Registers Structure Offset Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define PCR_MINOR_VERSION 0x%lx\n",
+ PCR_MINOR_VERSION);
+
+ dumpf("#define PCR_MAJOR_VERSION 0x%lx\n",
+ PCR_MAJOR_VERSION);
+
+ dumpf("#define PcMinorVersion 0x%lx\n",
+ OFFSET(KPCR, MinorVersion));
+
+ dumpf("#define PcMajorVersion 0x%lx\n",
+ OFFSET(KPCR, MajorVersion));
+
+ dumpf("#define PcPalBaseAddress 0x%lx\n",
+ OFFSET(KPCR, PalBaseAddress));
+
+ dumpf("#define PcPalMajorVersion 0x%lx\n",
+ OFFSET(KPCR, PalMajorVersion));
+
+ dumpf("#define PcPalMinorVersion 0x%lx\n",
+ OFFSET(KPCR, PalMinorVersion));
+
+ dumpf("#define PcPalSequenceVersion 0x%lx\n",
+ OFFSET(KPCR, PalSequenceVersion));
+
+ dumpf("#define PcPalMajorSpecification 0x%lx\n",
+ OFFSET(KPCR, PalMajorSpecification));
+
+ dumpf("#define PcPalMinorSpecification 0x%lx\n",
+ OFFSET(KPCR, PalMinorSpecification));
+
+ dumpf("#define PcFirmwareRestartAddress 0x%lx\n",
+ OFFSET(KPCR, FirmwareRestartAddress));
+
+ dumpf("#define PcRestartBlock 0x%lx\n",
+ OFFSET(KPCR, RestartBlock));
+
+ dumpf("#define PcPalReserved 0x%lx\n",
+ OFFSET(KPCR, PalReserved));
+
+ dumpf("#define PcPanicStack 0x%lx\n",
+ OFFSET(KPCR, PanicStack));
+
+ dumpf("#define PcProcessorType 0x%lx\n",
+ OFFSET(KPCR, ProcessorType));
+
+ dumpf("#define PcProcessorRevision 0x%lx\n",
+ OFFSET(KPCR, ProcessorRevision));
+
+ dumpf("#define PcPhysicalAddressBits 0x%lx\n",
+ OFFSET(KPCR, PhysicalAddressBits));
+
+ dumpf("#define PcMaximumAddressSpaceNumber 0x%lx\n",
+ OFFSET(KPCR, MaximumAddressSpaceNumber));
+
+ dumpf("#define PcPageSize 0x%lx\n",
+ OFFSET(KPCR, PageSize));
+
+ dumpf("#define PcFirstLevelDcacheSize 0x%lx\n",
+ OFFSET(KPCR, FirstLevelDcacheSize));
+
+ dumpf("#define PcFirstLevelDcacheFillSize 0x%lx\n",
+ OFFSET(KPCR, FirstLevelDcacheFillSize));
+
+ dumpf("#define PcFirstLevelIcacheSize 0x%lx\n",
+ OFFSET(KPCR, FirstLevelIcacheSize));
+
+ dumpf("#define PcFirstLevelIcacheFillSize 0x%lx\n",
+ OFFSET(KPCR, FirstLevelIcacheFillSize));
+
+ dumpf("#define PcFirmwareRevisionId 0x%lx\n",
+ OFFSET(KPCR, FirmwareRevisionId));
+
+ dumpf("#define PcSystemType 0x%lx\n",
+ OFFSET(KPCR, SystemType[0]));
+
+ dumpf("#define PcSystemVariant 0x%lx\n",
+ OFFSET(KPCR, SystemVariant));
+
+ dumpf("#define PcSystemRevision 0x%lx\n",
+ OFFSET(KPCR, SystemRevision));
+
+ dumpf("#define PcSystemSerialNumber 0x%lx\n",
+ OFFSET(KPCR, SystemSerialNumber[0]));
+
+ dumpf("#define PcCycleClockPeriod 0x%lx\n",
+ OFFSET(KPCR, CycleClockPeriod));
+
+ dumpf("#define PcSecondLevelCacheSize 0x%lx\n",
+ OFFSET(KPCR, SecondLevelCacheSize));
+
+ dumpf("#define PcSecondLevelCacheFillSize 0x%lx\n",
+ OFFSET(KPCR, SecondLevelCacheFillSize));
+
+ dumpf("#define PcThirdLevelCacheSize 0x%lx\n",
+ OFFSET(KPCR, ThirdLevelCacheSize));
+
+ dumpf("#define PcThirdLevelCacheFillSize 0x%lx\n",
+ OFFSET(KPCR, ThirdLevelCacheFillSize));
+
+ dumpf("#define PcFourthLevelCacheSize 0x%lx\n",
+ OFFSET(KPCR, FourthLevelCacheSize));
+
+ dumpf("#define PcFourthLevelCacheFillSize 0x%lx\n",
+ OFFSET(KPCR, FourthLevelCacheFillSize));
+
+ dumpf("#define PcPrcb 0x%lx\n",
+ OFFSET(KPCR, Prcb));
+
+ dumpf("#define PcNumber 0x%lx\n",
+ OFFSET(KPCR, Number));
+
+ dumpf("#define PcSetMember 0x%lx\n",
+ OFFSET(KPCR, SetMember));
+
+ dumpf("#define PcHalReserved 0x%lx\n",
+ OFFSET(KPCR, HalReserved[0]));
+
+ dumpf("#define PcIrqlTable 0x%lx\n",
+ OFFSET(KPCR, IrqlTable[0]));
+
+ dumpf("#define PcIrqlMask 0x%lx\n",
+ OFFSET(KPCR, IrqlMask[0]));
+
+ dumpf("#define PcInterruptRoutine 0x%lx\n",
+ OFFSET(KPCR, InterruptRoutine));
+
+ dumpf("#define PcReservedVectors 0x%lx\n",
+ OFFSET(KPCR, ReservedVectors));
+
+ dumpf("#define PcMachineCheckError 0x%lx\n",
+ OFFSET(KPCR, MachineCheckError));
+
+ dumpf("#define PcDpcStack 0x%lx\n",
+ OFFSET(KPCR, DpcStack));
+
+ dumpf("#define PcNotMember 0x%lx\n",
+ OFFSET(KPCR, NotMember));
+
+ dumpf("#define PcCurrentPid 0x%lx\n",
+ OFFSET(KPCR, CurrentPid));
+
+ dumpf("#define PcSystemServiceDispatchStart 0x%lx\n",
+ OFFSET(KPCR, SystemServiceDispatchStart));
+
+ dumpf("#define PcSystemServiceDispatchEnd 0x%lx\n",
+ OFFSET(KPCR, SystemServiceDispatchEnd));
+
+ dumpf("#define PcIdleThread 0x%lx\n",
+ OFFSET(KPCR, IdleThread));
+
+ dumpf("#define ProcessorControlRegisterLength 0x%lx\n",
+ ((sizeof(KPCR) + 15) & ~15));
+
+ dumpf("#define SharedUserData 0x%lx\n", SharedUserData);
+ dumpf("#define UsTickCountLow 0x%lx\n", OFFSET(KUSER_SHARED_DATA, TickCountLow));
+ dumpf("#define UsTickCountMultiplier 0x%lx\n", OFFSET(KUSER_SHARED_DATA, TickCountMultiplier));
+ dumpf("#define UsInterruptTime 0x%lx\n",
+ OFFSET(KUSER_SHARED_DATA, InterruptTime));
+
+ dumpf("#define UsSystemTime 0x%lx\n",
+ OFFSET(KUSER_SHARED_DATA, SystemTime));
+
+ //
+ // Processor block structure definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Processor Block Structure Offset Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define PRCB_MINOR_VERSION 0x%lx\n",
+ PRCB_MINOR_VERSION);
+
+ dumpf("#define PRCB_MAJOR_VERSION 0x%lx\n",
+ PRCB_MAJOR_VERSION);
+
+ dumpf("#define PbMinorVersion 0x%lx\n",
+ OFFSET(KPRCB, MinorVersion));
+
+ dumpf("#define PbMajorVersion 0x%lx\n",
+ OFFSET(KPRCB, MajorVersion));
+
+ dumpf("#define PbCurrentThread 0x%lx\n",
+ OFFSET(KPRCB, CurrentThread));
+
+ dumpf("#define PbNextThread 0x%lx\n",
+ OFFSET(KPRCB, NextThread));
+
+ dumpf("#define PbIdleThread 0x%lx\n",
+ OFFSET(KPRCB, IdleThread));
+
+ dumpf("#define PbNumber 0x%lx\n",
+ OFFSET(KPRCB, Number));
+
+ dumpf("#define PbBuildType 0x%lx\n",
+ OFFSET(KPRCB, BuildType));
+
+ dumpf("#define PbSetMember 0x%lx\n",
+ OFFSET(KPRCB, SetMember));
+
+ dumpf("#define PbRestartBlock 0x%lx\n",
+ OFFSET(KPRCB, RestartBlock));
+
+ DisableInc( HALALPHA );
+
+ dumpf("#define PbInterruptCount 0x%lx\n",
+ OFFSET(KPRCB, InterruptCount));
+
+ dumpf("#define PbDpcTime 0x%lx\n",
+ OFFSET(KPRCB, DpcTime));
+
+ dumpf("#define PbInterruptTime 0x%lx\n",
+ OFFSET(KPRCB, InterruptTime));
+
+ dumpf("#define PbKernelTime 0x%lx\n",
+ OFFSET(KPRCB, KernelTime));
+
+ dumpf("#define PbUserTime 0x%lx\n",
+ OFFSET(KPRCB, UserTime));
+
+ dumpf("#define PbQuantumEndDpc 0x%lx\n",
+ OFFSET(KPRCB, QuantumEndDpc));
+
+ dumpf("#define PbIpiFrozen 0x%lx\n",
+ OFFSET(KPRCB, IpiFrozen));
+
+ dumpf("#define PbIpiCounts 0x%lx\n",
+ OFFSET(KPRCB, IpiCounts));
+
+ dumpf("#define PbProcessorState 0x%lx\n",
+ OFFSET(KPRCB, ProcessorState));
+
+ dumpf("#define PbAlignmentFixupCount 0x%lx\n",
+ OFFSET(KPRCB, KeAlignmentFixupCount));
+
+ dumpf("#define PbContextSwitches 0x%lx\n",
+ OFFSET(KPRCB, KeContextSwitches));
+
+ dumpf("#define PbDcacheFlushCount 0x%lx\n",
+ OFFSET(KPRCB, KeDcacheFlushCount));
+
+ dumpf("#define PbExceptionDispatchcount 0x%lx\n",
+ OFFSET(KPRCB, KeExceptionDispatchCount));
+
+ dumpf("#define PbFirstLevelTbFills 0x%lx\n",
+ OFFSET(KPRCB, KeFirstLevelTbFills));
+
+ dumpf("#define PbFloatingEmulationCount 0x%lx\n",
+ OFFSET(KPRCB, KeFloatingEmulationCount));
+
+ dumpf("#define PbIcacheFlushCount 0x%lx\n",
+ OFFSET(KPRCB, KeIcacheFlushCount));
+
+ dumpf("#define PbSecondLevelTbFills 0x%lx\n",
+ OFFSET(KPRCB, KeSecondLevelTbFills));
+
+ dumpf("#define PbSystemCalls 0x%lx\n",
+ OFFSET(KPRCB, KeSystemCalls));
+
+ genDef(Pb, KPRCB, CurrentPacket);
+ genDef(Pb, KPRCB, TargetSet);
+ genDef(Pb, KPRCB, WorkerRoutine);
+ genDef(Pb, KPRCB, RequestSummary);
+ genDef(Pb, KPRCB, SignalDone);
+
+ dumpf("#define PbDpcListHead 0x%lx\n",
+ OFFSET(KPRCB, DpcListHead));
+
+ dumpf("#define PbDpcLock 0x%lx\n",
+ OFFSET(KPRCB, DpcLock));
+
+ dumpf("#define PbDpcCount 0x%lx\n",
+ OFFSET(KPRCB, DpcCount));
+
+ dumpf("#define PbLastDpcCount 0x%lx\n",
+ OFFSET(KPRCB, LastDpcCount));
+
+ dumpf("#define PbQuantumEnd 0x%lx\n",
+ OFFSET(KPRCB, QuantumEnd));
+
+ dumpf("#define PbStartCount 0x%lx\n",
+ OFFSET(KPRCB, StartCount));
+
+ dumpf("#define PbSoftwareInterrupts 0x%lx\n",
+ OFFSET(KPRCB, SoftwareInterrupts));
+
+ dumpf("#define PbInterruptActive 0x%lx\n",
+ OFFSET(KPRCB, InterruptActive));
+
+ dumpf("#define PbDpcRoutineActive 0x%lx\n",
+ OFFSET(KPRCB, DpcRoutineActive));
+
+ dumpf("#define PbDpcQueueDepth 0x%lx\n",
+ OFFSET(KPRCB, DpcQueueDepth));
+
+ dumpf("#define PbDpcRequestRate 0x%lx\n",
+ OFFSET(KPRCB, DpcRequestRate));
+
+ dumpf("#define PbDpcBypassCount 0x%lx\n",
+ OFFSET(KPRCB, DpcBypassCount));
+
+ dumpf("#define PbApcBypassCount 0x%lx\n",
+ OFFSET(KPRCB, ApcBypassCount));
+
+ dumpf("#define PbDispatchInterruptCount 0x%lx\n",
+ OFFSET(KPRCB, DispatchInterruptCount));
+
+ dumpf("#define PbDpcInterruptRequested 0x%lx\n",
+ OFFSET(KPRCB, DpcInterruptRequested));
+
+ dumpf("#define PbMaximumDpcQueueDepth 0x%lx\n",
+ OFFSET(KPRCB, MaximumDpcQueueDepth));
+
+ dumpf("#define PbMinimumDpcRate 0x%lx\n",
+ OFFSET(KPRCB, MinimumDpcRate));
+
+ dumpf("#define PbAdjustDpcThreshold 0x%lx\n",
+ OFFSET(KPRCB, AdjustDpcThreshold));
+
+ dumpf("#define ProcessorBlockLength 0x%lx\n",
+ ((sizeof(KPRCB) + 15) & ~15));
+
+ //
+ // Immediate interprocessor command definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Immediate Interprocessor Command Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define IPI_APC 0x%lx\n", IPI_APC );
+ dumpf("#define IPI_DPC 0x%lx\n", IPI_DPC );
+ dumpf("#define IPI_FREEZE 0x%lx\n", IPI_FREEZE );
+ dumpf("#define IPI_PACKET_READY 0x%lx\n", IPI_PACKET_READY );
+
+ //
+ // Interprocessor interrupt count structure offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Interprocessor Interrupt Count Structure Offset Definitions\n");
+ dumpf("//\n" );
+ dumpf("\n" );
+
+ dumpf("#define IcFreeze 0x%lx\n",
+ OFFSET(KIPI_COUNTS, Freeze) );
+
+ dumpf("#define IcPacket 0x%lx\n",
+ OFFSET(KIPI_COUNTS, Packet) );
+
+ dumpf("#define IcDPC 0x%lx\n",
+ OFFSET(KIPI_COUNTS, DPC) );
+
+ dumpf("#define IcAPC 0x%lx\n",
+ OFFSET(KIPI_COUNTS, APC) );
+
+ dumpf("#define IcFlushSingleTb 0x%lx\n",
+ OFFSET(KIPI_COUNTS, FlushSingleTb) );
+
+ dumpf("#define IcFlushEntireTb 0x%lx\n",
+ OFFSET(KIPI_COUNTS, FlushEntireTb) );
+
+ dumpf("#define IcChangeColor 0x%lx\n",
+ OFFSET(KIPI_COUNTS, ChangeColor) );
+
+ dumpf("#define IcSweepDcache 0x%lx\n",
+ OFFSET(KIPI_COUNTS, SweepDcache) );
+
+ dumpf("#define IcSweepIcache 0x%lx\n",
+ OFFSET(KIPI_COUNTS, SweepIcache) );
+
+ dumpf("#define IcSweepIcacheRange 0x%lx\n",
+ OFFSET(KIPI_COUNTS, SweepIcacheRange) );
+
+ dumpf("#define IcFlushIoBuffers 0x%lx\n",
+ OFFSET(KIPI_COUNTS, FlushIoBuffers) );
+
+ //
+ // Context frame offset definitions and flag definitions.
+ //
+
+ EnableInc( HALALPHA );
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Context Frame Offset and Flag Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define CONTEXT_FULL 0x%lx\n", CONTEXT_FULL);
+ dumpf("#define CONTEXT_CONTROL 0x%lx\n", CONTEXT_CONTROL);
+ dumpf("#define CONTEXT_FLOATING_POINT 0x%lx\n", CONTEXT_FLOATING_POINT);
+ dumpf("#define CONTEXT_INTEGER 0x%lx\n", CONTEXT_INTEGER);
+ dumpf("\n");
+
+ dumpf("#define CxFltF0 0x%lx\n", OFFSET(CONTEXT, FltF0));
+ dumpf("#define CxFltF1 0x%lx\n", OFFSET(CONTEXT, FltF1));
+ dumpf("#define CxFltF2 0x%lx\n", OFFSET(CONTEXT, FltF2));
+ dumpf("#define CxFltF3 0x%lx\n", OFFSET(CONTEXT, FltF3));
+ dumpf("#define CxFltF4 0x%lx\n", OFFSET(CONTEXT, FltF4));
+ dumpf("#define CxFltF5 0x%lx\n", OFFSET(CONTEXT, FltF5));
+ dumpf("#define CxFltF6 0x%lx\n", OFFSET(CONTEXT, FltF6));
+ dumpf("#define CxFltF7 0x%lx\n", OFFSET(CONTEXT, FltF7));
+ dumpf("#define CxFltF8 0x%lx\n", OFFSET(CONTEXT, FltF8));
+ dumpf("#define CxFltF9 0x%lx\n", OFFSET(CONTEXT, FltF9));
+ dumpf("#define CxFltF10 0x%lx\n", OFFSET(CONTEXT, FltF10));
+ dumpf("#define CxFltF11 0x%lx\n", OFFSET(CONTEXT, FltF11));
+ dumpf("#define CxFltF12 0x%lx\n", OFFSET(CONTEXT, FltF12));
+ dumpf("#define CxFltF13 0x%lx\n", OFFSET(CONTEXT, FltF13));
+ dumpf("#define CxFltF14 0x%lx\n", OFFSET(CONTEXT, FltF14));
+ dumpf("#define CxFltF15 0x%lx\n", OFFSET(CONTEXT, FltF15));
+ dumpf("#define CxFltF16 0x%lx\n", OFFSET(CONTEXT, FltF16));
+ dumpf("#define CxFltF17 0x%lx\n", OFFSET(CONTEXT, FltF17));
+ dumpf("#define CxFltF18 0x%lx\n", OFFSET(CONTEXT, FltF18));
+ dumpf("#define CxFltF19 0x%lx\n", OFFSET(CONTEXT, FltF19));
+ dumpf("#define CxFltF20 0x%lx\n", OFFSET(CONTEXT, FltF20));
+ dumpf("#define CxFltF21 0x%lx\n", OFFSET(CONTEXT, FltF21));
+ dumpf("#define CxFltF22 0x%lx\n", OFFSET(CONTEXT, FltF22));
+ dumpf("#define CxFltF23 0x%lx\n", OFFSET(CONTEXT, FltF23));
+ dumpf("#define CxFltF24 0x%lx\n", OFFSET(CONTEXT, FltF24));
+ dumpf("#define CxFltF25 0x%lx\n", OFFSET(CONTEXT, FltF25));
+ dumpf("#define CxFltF26 0x%lx\n", OFFSET(CONTEXT, FltF26));
+ dumpf("#define CxFltF27 0x%lx\n", OFFSET(CONTEXT, FltF27));
+ dumpf("#define CxFltF28 0x%lx\n", OFFSET(CONTEXT, FltF28));
+ dumpf("#define CxFltF29 0x%lx\n", OFFSET(CONTEXT, FltF29));
+ dumpf("#define CxFltF30 0x%lx\n", OFFSET(CONTEXT, FltF30));
+ dumpf("#define CxFltF31 0x%lx\n", OFFSET(CONTEXT, FltF31));
+
+ dumpf("#define CxIntV0 0x%lx\n", OFFSET(CONTEXT, IntV0));
+ dumpf("#define CxIntT0 0x%lx\n", OFFSET(CONTEXT, IntT0));
+ dumpf("#define CxIntT1 0x%lx\n", OFFSET(CONTEXT, IntT1));
+ dumpf("#define CxIntT2 0x%lx\n", OFFSET(CONTEXT, IntT2));
+
+ dumpf("#define CxIntT3 0x%lx\n", OFFSET(CONTEXT, IntT3));
+ dumpf("#define CxIntT4 0x%lx\n", OFFSET(CONTEXT, IntT4));
+ dumpf("#define CxIntT5 0x%lx\n", OFFSET(CONTEXT, IntT5));
+ dumpf("#define CxIntT6 0x%lx\n", OFFSET(CONTEXT, IntT6));
+
+ dumpf("#define CxIntT7 0x%lx\n", OFFSET(CONTEXT, IntT7));
+ dumpf("#define CxIntS0 0x%lx\n", OFFSET(CONTEXT, IntS0));
+ dumpf("#define CxIntS1 0x%lx\n", OFFSET(CONTEXT, IntS1));
+ dumpf("#define CxIntS2 0x%lx\n", OFFSET(CONTEXT, IntS2));
+
+ dumpf("#define CxIntS3 0x%lx\n", OFFSET(CONTEXT, IntS3));
+ dumpf("#define CxIntS4 0x%lx\n", OFFSET(CONTEXT, IntS4));
+ dumpf("#define CxIntS5 0x%lx\n", OFFSET(CONTEXT, IntS5));
+ dumpf("#define CxIntFp 0x%lx\n", OFFSET(CONTEXT, IntFp));
+
+ dumpf("#define CxIntA0 0x%lx\n", OFFSET(CONTEXT, IntA0));
+ dumpf("#define CxIntA1 0x%lx\n", OFFSET(CONTEXT, IntA1));
+ dumpf("#define CxIntA2 0x%lx\n", OFFSET(CONTEXT, IntA2));
+ dumpf("#define CxIntA3 0x%lx\n", OFFSET(CONTEXT, IntA3));
+
+ dumpf("#define CxIntA4 0x%lx\n", OFFSET(CONTEXT, IntA4));
+ dumpf("#define CxIntA5 0x%lx\n", OFFSET(CONTEXT, IntA5));
+ dumpf("#define CxIntT8 0x%lx\n", OFFSET(CONTEXT, IntT8));
+ dumpf("#define CxIntT9 0x%lx\n", OFFSET(CONTEXT, IntT9));
+
+ dumpf("#define CxIntT10 0x%lx\n", OFFSET(CONTEXT, IntT10));
+ dumpf("#define CxIntT11 0x%lx\n", OFFSET(CONTEXT, IntT11));
+ dumpf("#define CxIntRa 0x%lx\n", OFFSET(CONTEXT, IntRa));
+ dumpf("#define CxIntT12 0x%lx\n", OFFSET(CONTEXT, IntT12));
+
+ dumpf("#define CxIntAt 0x%lx\n", OFFSET(CONTEXT, IntAt));
+ dumpf("#define CxIntGp 0x%lx\n", OFFSET(CONTEXT, IntGp));
+ dumpf("#define CxIntSp 0x%lx\n", OFFSET(CONTEXT, IntSp));
+ dumpf("#define CxIntZero 0x%lx\n", OFFSET(CONTEXT, IntZero));
+
+ dumpf("#define CxFpcr 0x%lx\n", OFFSET(CONTEXT, Fpcr));
+ dumpf("#define CxSoftFpcr 0x%lx\n", OFFSET(CONTEXT, SoftFpcr));
+ dumpf("#define CxFir 0x%lx\n", OFFSET(CONTEXT, Fir));
+ dumpf("#define CxPsr 0x%lx\n", OFFSET(CONTEXT, Psr));
+ dumpf("#define CxContextFlags 0x%lx\n", OFFSET(CONTEXT, ContextFlags));
+ dumpf("#define ContextFrameLength 0x%lx\n", (sizeof(CONTEXT) + 15) & (~15));
+
+
+ //
+ // Exception frame offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Exception Frame Offset Definitions and Length\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define ExFltF2 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF2));
+ dumpf("#define ExFltF3 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF3));
+ dumpf("#define ExFltF4 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF4));
+ dumpf("#define ExFltF5 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF5));
+ dumpf("#define ExFltF6 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF6));
+ dumpf("#define ExFltF7 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF7));
+ dumpf("#define ExFltF8 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF8));
+ dumpf("#define ExFltF9 0x%lx\n", OFFSET(KEXCEPTION_FRAME, FltF9));
+
+ dumpf("#define ExIntS0 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS0));
+ dumpf("#define ExIntS1 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS1));
+ dumpf("#define ExIntS2 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS2));
+ dumpf("#define ExIntS3 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS3));
+ dumpf("#define ExIntS4 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS4));
+ dumpf("#define ExIntS5 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntS5));
+ dumpf("#define ExIntFp 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntFp));
+
+ dumpf("#define ExPsr 0x%lx\n", OFFSET(KEXCEPTION_FRAME, Psr));
+ dumpf("#define ExSwapReturn 0x%lx\n", OFFSET(KEXCEPTION_FRAME, SwapReturn));
+ dumpf("#define ExIntRa 0x%lx\n", OFFSET(KEXCEPTION_FRAME, IntRa));
+ dumpf("#define ExceptionFrameLength 0x%lx\n",
+ (sizeof(KEXCEPTION_FRAME) + 15) & (~15));
+
+ //
+ // Jump buffer offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Jump Offset Definitions and Length\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define JbFp 0x%lx\n", OFFSET(_JUMP_BUFFER, Fp));
+ dumpf("#define JbPc 0x%lx\n", OFFSET(_JUMP_BUFFER, Pc));
+ dumpf("#define JbSeb 0x%lx\n", OFFSET(_JUMP_BUFFER, Seb));
+ dumpf("#define JbType 0x%lx\n", OFFSET(_JUMP_BUFFER, Type));
+ dumpf("#define JbFltF2 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF2));
+ dumpf("#define JbFltF3 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF3));
+ dumpf("#define JbFltF4 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF4));
+ dumpf("#define JbFltF5 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF5));
+ dumpf("#define JbFltF6 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF6));
+ dumpf("#define JbFltF7 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF7));
+ dumpf("#define JbFltF8 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF8));
+ dumpf("#define JbFltF9 0x%lx\n", OFFSET(_JUMP_BUFFER, FltF9));
+ dumpf("#define JbIntS0 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS0));
+ dumpf("#define JbIntS1 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS1));
+ dumpf("#define JbIntS2 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS2));
+ dumpf("#define JbIntS3 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS3));
+ dumpf("#define JbIntS4 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS4));
+ dumpf("#define JbIntS5 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS5));
+ dumpf("#define JbIntS6 0x%lx\n", OFFSET(_JUMP_BUFFER, IntS6));
+ dumpf("#define JbIntSp 0x%lx\n", OFFSET(_JUMP_BUFFER, IntSp));
+ dumpf("#define JbFir 0x%lx\n", OFFSET(_JUMP_BUFFER, Fir));
+
+ //
+ // Trap frame offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Trap Frame Offset Definitions and Length\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define TrFltF0 0x%lx\n", OFFSET(KTRAP_FRAME, FltF0));
+ dumpf("#define TrFltF1 0x%lx\n", OFFSET(KTRAP_FRAME, FltF1));
+
+ dumpf("#define TrFltF10 0x%lx\n", OFFSET(KTRAP_FRAME, FltF10));
+ dumpf("#define TrFltF11 0x%lx\n", OFFSET(KTRAP_FRAME, FltF11));
+ dumpf("#define TrFltF12 0x%lx\n", OFFSET(KTRAP_FRAME, FltF12));
+ dumpf("#define TrFltF13 0x%lx\n", OFFSET(KTRAP_FRAME, FltF13));
+ dumpf("#define TrFltF14 0x%lx\n", OFFSET(KTRAP_FRAME, FltF14));
+ dumpf("#define TrFltF15 0x%lx\n", OFFSET(KTRAP_FRAME, FltF15));
+ dumpf("#define TrFltF16 0x%lx\n", OFFSET(KTRAP_FRAME, FltF16));
+ dumpf("#define TrFltF17 0x%lx\n", OFFSET(KTRAP_FRAME, FltF17));
+ dumpf("#define TrFltF18 0x%lx\n", OFFSET(KTRAP_FRAME, FltF18));
+ dumpf("#define TrFltF19 0x%lx\n", OFFSET(KTRAP_FRAME, FltF19));
+ dumpf("#define TrFltF20 0x%lx\n", OFFSET(KTRAP_FRAME, FltF20));
+ dumpf("#define TrFltF21 0x%lx\n", OFFSET(KTRAP_FRAME, FltF21));
+ dumpf("#define TrFltF22 0x%lx\n", OFFSET(KTRAP_FRAME, FltF22));
+ dumpf("#define TrFltF23 0x%lx\n", OFFSET(KTRAP_FRAME, FltF23));
+ dumpf("#define TrFltF24 0x%lx\n", OFFSET(KTRAP_FRAME, FltF24));
+ dumpf("#define TrFltF25 0x%lx\n", OFFSET(KTRAP_FRAME, FltF25));
+ dumpf("#define TrFltF26 0x%lx\n", OFFSET(KTRAP_FRAME, FltF26));
+ dumpf("#define TrFltF27 0x%lx\n", OFFSET(KTRAP_FRAME, FltF27));
+ dumpf("#define TrFltF28 0x%lx\n", OFFSET(KTRAP_FRAME, FltF28));
+ dumpf("#define TrFltF29 0x%lx\n", OFFSET(KTRAP_FRAME, FltF29));
+ dumpf("#define TrFltF30 0x%lx\n", OFFSET(KTRAP_FRAME, FltF30));
+
+ dumpf("#define TrIntV0 0x%lx\n", OFFSET(KTRAP_FRAME, IntV0));
+
+ dumpf("#define TrIntT0 0x%lx\n", OFFSET(KTRAP_FRAME, IntT0));
+ dumpf("#define TrIntT1 0x%lx\n", OFFSET(KTRAP_FRAME, IntT1));
+ dumpf("#define TrIntT2 0x%lx\n", OFFSET(KTRAP_FRAME, IntT2));
+ dumpf("#define TrIntT3 0x%lx\n", OFFSET(KTRAP_FRAME, IntT3));
+ dumpf("#define TrIntT4 0x%lx\n", OFFSET(KTRAP_FRAME, IntT4));
+ dumpf("#define TrIntT5 0x%lx\n", OFFSET(KTRAP_FRAME, IntT5));
+ dumpf("#define TrIntT6 0x%lx\n", OFFSET(KTRAP_FRAME, IntT6));
+ dumpf("#define TrIntT7 0x%lx\n", OFFSET(KTRAP_FRAME, IntT7));
+
+ dumpf("#define TrIntFp 0x%lx\n", OFFSET(KTRAP_FRAME, IntFp));
+
+ dumpf("#define TrIntA0 0x%lx\n", OFFSET(KTRAP_FRAME, IntA0));
+ dumpf("#define TrIntA1 0x%lx\n", OFFSET(KTRAP_FRAME, IntA1));
+ dumpf("#define TrIntA2 0x%lx\n", OFFSET(KTRAP_FRAME, IntA2));
+ dumpf("#define TrIntA3 0x%lx\n", OFFSET(KTRAP_FRAME, IntA3));
+ dumpf("#define TrIntA4 0x%lx\n", OFFSET(KTRAP_FRAME, IntA4));
+ dumpf("#define TrIntA5 0x%lx\n", OFFSET(KTRAP_FRAME, IntA5));
+
+ dumpf("#define TrIntT8 0x%lx\n", OFFSET(KTRAP_FRAME, IntT8));
+ dumpf("#define TrIntT9 0x%lx\n", OFFSET(KTRAP_FRAME, IntT9));
+ dumpf("#define TrIntT10 0x%lx\n", OFFSET(KTRAP_FRAME, IntT10));
+ dumpf("#define TrIntT11 0x%lx\n", OFFSET(KTRAP_FRAME, IntT11));
+
+ dumpf("#define TrIntT12 0x%lx\n", OFFSET(KTRAP_FRAME, IntT12));
+ dumpf("#define TrIntAt 0x%lx\n", OFFSET(KTRAP_FRAME, IntAt));
+ dumpf("#define TrIntGp 0x%lx\n", OFFSET(KTRAP_FRAME, IntGp));
+ dumpf("#define TrIntSp 0x%lx\n", OFFSET(KTRAP_FRAME, IntSp));
+
+ dumpf("#define TrFpcr 0x%lx\n", OFFSET(KTRAP_FRAME, Fpcr));
+ dumpf("#define TrPsr 0x%lx\n", OFFSET(KTRAP_FRAME, Psr));
+ dumpf("#define TrPreviousKsp 0x%lx\n", OFFSET(KTRAP_FRAME, PreviousKsp));
+ dumpf("#define TrFir 0x%lx\n", OFFSET(KTRAP_FRAME, Fir));
+ dumpf("#define TrExceptionRecord 0x%lx\n", OFFSET(KTRAP_FRAME, ExceptionRecord[0]));
+ dumpf("#define TrOldIrql 0x%lx\n", OFFSET(KTRAP_FRAME, OldIrql));
+ dumpf("#define TrPreviousMode 0x%lx\n", OFFSET(KTRAP_FRAME, PreviousMode));
+ dumpf("#define TrIntRa 0x%lx\n", OFFSET(KTRAP_FRAME, IntRa));
+ dumpf("#define TrTrapFrame 0x%lx\n",OFFSET(KTRAP_FRAME, TrapFrame));
+ dumpf("#define TrapFrameLength 0x%lx\n", (sizeof(KTRAP_FRAME) + 15) & (~15));
+
+ //
+ // Usermode callout frame definitions
+ //
+ DisableInc(HALALPHA);
+ genCom("Usermode callout frame definitions");
+
+ genDef(Cu, KCALLOUT_FRAME, F2);
+ genDef(Cu, KCALLOUT_FRAME, F3);
+ genDef(Cu, KCALLOUT_FRAME, F4);
+ genDef(Cu, KCALLOUT_FRAME, F5);
+ genDef(Cu, KCALLOUT_FRAME, F6);
+ genDef(Cu, KCALLOUT_FRAME, F7);
+ genDef(Cu, KCALLOUT_FRAME, F8);
+ genDef(Cu, KCALLOUT_FRAME, F9);
+ genDef(Cu, KCALLOUT_FRAME, S0);
+ genDef(Cu, KCALLOUT_FRAME, S1);
+ genDef(Cu, KCALLOUT_FRAME, S2);
+ genDef(Cu, KCALLOUT_FRAME, S3);
+ genDef(Cu, KCALLOUT_FRAME, S4);
+ genDef(Cu, KCALLOUT_FRAME, S5);
+ genDef(Cu, KCALLOUT_FRAME, FP);
+ genDef(Cu, KCALLOUT_FRAME, CbStk);
+ genDef(Cu, KCALLOUT_FRAME, InStk);
+ genDef(Cu, KCALLOUT_FRAME, TrFr);
+ genDef(Cu, KCALLOUT_FRAME, TrFir);
+ genDef(Cu, KCALLOUT_FRAME, Ra);
+ genDef(Cu, KCALLOUT_FRAME, A0);
+ genDef(Cu, KCALLOUT_FRAME, A1);
+ dumpf("#define CuFrameLength 0x%lx\n", sizeof(KCALLOUT_FRAME));
+
+ //
+ // Usermode callout user frame definitions.
+ //
+
+ genCom("Usermode callout user frame definitions");
+
+ genDef(Ck, UCALLOUT_FRAME, Buffer);
+ genDef(Ck, UCALLOUT_FRAME, Length);
+ genDef(Ck, UCALLOUT_FRAME, ApiNumber);
+ genDef(Ck, UCALLOUT_FRAME, Sp);
+ genDef(Ck, UCALLOUT_FRAME, Ra);
+
+ EnableInc(HALALPHA);
+
+
+ //
+ // Loader Parameter Block offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Loader Parameter Block Offset Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define LpbLoadOrderListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, LoadOrderListHead));
+
+ dumpf("#define LpbMemoryDescriptorListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, MemoryDescriptorListHead));
+
+ dumpf("#define LpbKernelStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, KernelStack));
+
+ dumpf( "#define LpbPrcb 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Prcb));
+
+ dumpf("#define LpbProcess 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Process));
+
+ dumpf("#define LpbThread 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Thread));
+
+ dumpf("#define LpbRegistryLength 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryLength));
+
+ dumpf("#define LpbRegistryBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryBase));
+
+ dumpf("#define LpbDpcStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.DpcStack));
+
+ dumpf("#define LpbFirstLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirstLevelDcacheSize));
+
+ dumpf("#define LpbFirstLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirstLevelDcacheFillSize));
+
+ dumpf("#define LpbFirstLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirstLevelIcacheSize));
+
+ dumpf("#define LpbFirstLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirstLevelIcacheFillSize));
+
+ dumpf("#define LpbGpBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.GpBase));
+
+ dumpf("#define LpbPanicStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PanicStack));
+
+ dumpf("#define LpbPcrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PcrPage));
+
+ dumpf("#define LpbPdrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PdrPage));
+
+ dumpf("#define LpbSecondLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SecondLevelDcacheSize));
+
+ dumpf("#define LpbSecondLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SecondLevelDcacheFillSize));
+
+ dumpf("#define LpbSecondLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SecondLevelIcacheSize));
+
+ dumpf("#define LpbSecondLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SecondLevelIcacheFillSize));
+
+ dumpf("#define LpbPhysicalAddressBits 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PhysicalAddressBits));
+
+ dumpf("#define LpbMaximumAddressSpaceNumber 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.MaximumAddressSpaceNumber));
+
+ dumpf("#define LpbSystemSerialNumber 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SystemSerialNumber[0]));
+
+ dumpf("#define LpbSystemType 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SystemType[0]));
+
+ dumpf("#define LpbSystemVariant 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SystemVariant));
+
+ dumpf("#define LpbSystemRevision 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.SystemRevision));
+
+ dumpf("#define LpbProcessorType 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.ProcessorType));
+
+ dumpf("#define LpbProcessorRevision 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.ProcessorRevision));
+
+ dumpf("#define LpbCycleClockPeriod 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.CycleClockPeriod));
+
+ dumpf("#define LpbPageSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PageSize));
+
+ dumpf("#define LpbRestartBlock 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.RestartBlock));
+
+ dumpf("#define LpbFirmwareRestartAddress 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirmwareRestartAddress));
+
+ dumpf("#define LpbFirmwareRevisionId 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.FirmwareRevisionId));
+
+ dumpf("#define LpbPalBaseAddress 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Alpha.PalBaseAddress));
+
+ DisableInc( HALALPHA );
+
+ //
+ // Restart Block Structure and Alpha Save Area Structure.
+ //
+ // N.B. - The Alpha Save Area Structure Offsets are written as though
+ // they were offsets from the beginning of the Restart block.
+ //
+ EnableInc( HALALPHA );
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Restart Block Structure Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define RbSignature 0x%lx\n",
+ OFFSET(RESTART_BLOCK, Signature));
+
+ dumpf("#define RbLength 0x%lx\n",
+ OFFSET(RESTART_BLOCK, Length));
+
+ dumpf("#define RbVersion 0x%lx\n",
+ OFFSET(RESTART_BLOCK, Version));
+
+ dumpf("#define RbRevision 0x%lx\n",
+ OFFSET(RESTART_BLOCK, Revision));
+
+ dumpf("#define RbNextRestartBlock 0x%lx\n",
+ OFFSET(RESTART_BLOCK, NextRestartBlock));
+
+ dumpf("#define RbRestartAddress 0x%lx\n",
+ OFFSET(RESTART_BLOCK, RestartAddress));
+
+ dumpf("#define RbBootMasterId 0x%lx\n",
+ OFFSET(RESTART_BLOCK, BootMasterId));
+
+ dumpf("#define RbProcessorId 0x%lx\n",
+ OFFSET(RESTART_BLOCK, ProcessorId));
+
+ dumpf("#define RbBootStatus 0x%lx\n",
+ OFFSET(RESTART_BLOCK, BootStatus));
+
+ dumpf("#define RbCheckSum 0x%lx\n",
+ OFFSET(RESTART_BLOCK, CheckSum));
+
+ dumpf("#define RbSaveAreaLength 0x%lx\n",
+ OFFSET(RESTART_BLOCK, SaveAreaLength));
+
+ dumpf("#define RbSaveArea 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea));
+
+ dumpf("#define RbHaltReason 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, HaltReason) );
+
+ dumpf("#define RbLogoutFrame 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, LogoutFrame) );
+
+ dumpf("#define RbPalBase 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, PalBase) );
+
+ dumpf("#define RbIntV0 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntV0) );
+
+ dumpf("#define RbIntT0 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT0) );
+
+ dumpf("#define RbIntT1 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT1) );
+
+ dumpf("#define RbIntT2 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT2) );
+
+ dumpf("#define RbIntT3 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT3) );
+
+ dumpf("#define RbIntT4 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT4) );
+
+ dumpf("#define RbIntT5 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT5) );
+
+ dumpf("#define RbIntT6 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT6) );
+
+ dumpf("#define RbIntT7 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT7) );
+
+ dumpf("#define RbIntS0 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS0) );
+
+ dumpf("#define RbIntS1 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS1) );
+
+ dumpf("#define RbIntS2 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS2) );
+
+ dumpf("#define RbIntS3 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS3) );
+
+ dumpf("#define RbIntS4 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS4) );
+
+ dumpf("#define RbIntS5 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntS5) );
+
+ dumpf("#define RbIntFp 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntFp) );
+
+ dumpf("#define RbIntA0 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA0) );
+
+ dumpf("#define RbIntA1 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA1) );
+
+ dumpf("#define RbIntA2 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA2) );
+
+ dumpf("#define RbIntA3 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA3) );
+
+ dumpf("#define RbIntA4 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA4) );
+
+ dumpf("#define RbIntA5 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntA5) );
+
+ dumpf("#define RbIntT8 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT8) );
+
+ dumpf("#define RbIntT9 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT9) );
+
+ dumpf("#define RbIntT10 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT10) );
+
+ dumpf("#define RbIntT11 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT11) );
+
+ dumpf("#define RbIntRa 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntRa) );
+
+ dumpf("#define RbIntT12 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntT12) );
+
+ dumpf("#define RbIntAT 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntAT) );
+
+ dumpf("#define RbIntGp 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntGp) );
+
+ dumpf("#define RbIntSp 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntSp) );
+
+ dumpf("#define RbIntZero 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, IntZero) );
+
+ dumpf("#define RbFpcr 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Fpcr) );
+
+ dumpf("#define RbFltF0 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF0) );
+
+ dumpf("#define RbFltF1 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF1) );
+
+ dumpf("#define RbFltF2 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF2) );
+
+ dumpf("#define RbFltF3 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF3) );
+
+ dumpf("#define RbFltF4 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF4) );
+
+ dumpf("#define RbFltF5 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF5) );
+
+ dumpf("#define RbFltF6 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF6) );
+
+ dumpf("#define RbFltF7 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF7) );
+
+ dumpf("#define RbFltF8 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF8) );
+
+ dumpf("#define RbFltF9 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF9) );
+
+ dumpf("#define RbFltF10 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF10) );
+
+ dumpf("#define RbFltF11 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF11) );
+
+ dumpf("#define RbFltF12 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF12) );
+
+ dumpf("#define RbFltF13 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF13) );
+
+ dumpf("#define RbFltF14 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF14) );
+
+ dumpf("#define RbFltF15 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF15) );
+
+ dumpf("#define RbFltF16 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF16) );
+
+ dumpf("#define RbFltF17 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF17) );
+
+ dumpf("#define RbFltF18 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF18) );
+
+ dumpf("#define RbFltF19 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF19) );
+
+ dumpf("#define RbFltF20 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF20) );
+
+ dumpf("#define RbFltF21 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF21) );
+
+ dumpf("#define RbFltF22 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF22) );
+
+ dumpf("#define RbFltF23 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF23) );
+
+ dumpf("#define RbFltF24 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF24) );
+
+ dumpf("#define RbFltF25 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF25) );
+
+ dumpf("#define RbFltF26 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF26) );
+
+ dumpf("#define RbFltF27 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF27) );
+
+ dumpf("#define RbFltF28 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF28) );
+
+ dumpf("#define RbFltF29 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF29) );
+
+ dumpf("#define RbFltF30 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF30) );
+
+ dumpf("#define RbFltF31 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, FltF31) );
+
+ dumpf("#define RbAsn 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Asn) );
+
+ dumpf("#define RbGeneralEntry 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, GeneralEntry) );
+
+ dumpf("#define RbIksp 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Iksp) );
+
+ dumpf("#define RbInterruptEntry 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, InterruptEntry) );
+
+ dumpf("#define RbKgp 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Kgp) );
+
+ dumpf("#define RbMces 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Mces) );
+
+ dumpf("#define RbMemMgmtEntry 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, MemMgmtEntry) );
+
+ dumpf("#define RbPanicEntry 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, PanicEntry) );
+
+ dumpf("#define RbPcr 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Pcr) );
+
+ dumpf("#define RbPdr 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Pdr) );
+
+ dumpf("#define RbPsr 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Psr) );
+
+ dumpf("#define RbReiRestartAddress 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, ReiRestartAddress) );
+
+ dumpf("#define RbSirr 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Sirr) );
+
+ dumpf("#define RbSyscallEntry 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, SyscallEntry) );
+
+ dumpf("#define RbTeb 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Teb) );
+
+ dumpf("#define RbThread 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, Thread) );
+
+ dumpf("#define RbPerProcessorState 0x%lx\n",
+ OFFSET(RESTART_BLOCK, u.SaveArea) +
+ OFFSET(ALPHA_RESTART_SAVE_AREA, PerProcessorState) );
+
+
+ //
+ // Address space layout definitions
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Address Space Layout Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define KSEG0_BASE 0x%lx\n", KSEG0_BASE );
+ dumpf("#define KSEG2_BASE 0x%lx\n", KSEG2_BASE );
+ DisableInc( HALALPHA );
+
+ dumpf("#define SYSTEM_BASE 0x%lx\n", SYSTEM_BASE);
+ dumpf("#define PDE_BASE 0x%lx\n", PDE_BASE);
+ dumpf("#define PTE_BASE 0x%lx\n", PTE_BASE);
+
+ //
+ // Page table and page directory entry definitions
+ //
+
+ EnableInc( HALALPHA );
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Page Table and Directory Entry Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define PAGE_SIZE 0x%lx\n", PAGE_SIZE);
+ dumpf("#define PAGE_SHIFT 0x%lx\n", PAGE_SHIFT);
+ dumpf("#define PDI_SHIFT 0x%lx\n", PDI_SHIFT);
+ dumpf("#define PTI_SHIFT 0x%lx\n", PTI_SHIFT);
+ DisableInc( HALALPHA );
+
+ //
+ // Breakpoint instruction definitions
+ //
+
+ EnableInc( HALALPHA );
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Breakpoint Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define USER_BREAKPOINT 0x%lx\n", USER_BREAKPOINT);
+ dumpf("#define KERNEL_BREAKPOINT 0x%lx\n", KERNEL_BREAKPOINT);
+ dumpf("#define BREAKIN_BREAKPOINT 0x%lx\n", BREAKIN_BREAKPOINT);
+
+ dumpf("#define DEBUG_PRINT_BREAKPOINT 0x%lx\n", DEBUG_PRINT_BREAKPOINT);
+ dumpf("#define DEBUG_PROMPT_BREAKPOINT 0x%lx\n", DEBUG_PROMPT_BREAKPOINT);
+ dumpf("#define DEBUG_STOP_BREAKPOINT 0x%lx\n", DEBUG_STOP_BREAKPOINT);
+ dumpf("#define DEBUG_LOAD_SYMBOLS_BREAKPOINT 0x%lx\n", DEBUG_LOAD_SYMBOLS_BREAKPOINT);
+ dumpf("#define DEBUG_UNLOAD_SYMBOLS_BREAKPOINT 0x%lx\n", DEBUG_UNLOAD_SYMBOLS_BREAKPOINT);
+
+ DisableInc( HALALPHA );
+ //
+ //
+ // Trap code definitions
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Trap Code Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define GENTRAP_INTEGER_OVERFLOW 0x%lx\n", GENTRAP_INTEGER_OVERFLOW);
+ dumpf("#define GENTRAP_INTEGER_DIVIDE_BY_ZERO 0x%lx\n", GENTRAP_INTEGER_DIVIDE_BY_ZERO);
+ dumpf("#define GENTRAP_FLOATING_OVERFLOW 0x%lx\n", GENTRAP_FLOATING_OVERFLOW);
+ dumpf("#define GENTRAP_FLOATING_DIVIDE_BY_ZERO 0x%lx\n", GENTRAP_FLOATING_DIVIDE_BY_ZERO);
+ dumpf("#define GENTRAP_FLOATING_UNDERFLOW 0x%lx\n", GENTRAP_FLOATING_UNDERFLOW);
+ dumpf("#define GENTRAP_FLOATING_INVALID_OPERAND 0x%lx\n", GENTRAP_FLOATING_INVALID_OPERAND);
+ dumpf("#define GENTRAP_FLOATING_INEXACT_RESULT 0x%lx\n", GENTRAP_FLOATING_INEXACT_RESULT);
+
+ //
+ // Miscellaneous definitions
+ //
+
+ EnableInc( HALALPHA );
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Miscellaneous Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define Executive 0x%lx\n", Executive);
+ dumpf("#define KernelMode 0x%lx\n", KernelMode);
+ dumpf("#define FALSE 0x%lx\n", FALSE);
+ dumpf("#define TRUE 0x%lx\n", TRUE);
+ DisableInc( HALALPHA );
+
+ dumpf("#define BASE_PRIORITY_THRESHOLD 0x%lx\n",
+ BASE_PRIORITY_THRESHOLD);
+
+ dumpf("#define EVENT_PAIR_INCREMENT 0x%lx\n",
+ EVENT_PAIR_INCREMENT);
+
+ dumpf("#define LOW_REALTIME_PRIORITY 0x%lx\n",
+ LOW_REALTIME_PRIORITY);
+
+ dumpf("#define MM_USER_PROBE_ADDRESS 0x%lx\n",
+ MM_USER_PROBE_ADDRESS);
+
+ dumpf("#define KERNEL_STACK_SIZE 0x%lx\n",
+ KERNEL_STACK_SIZE);
+
+ dumpf("#define KERNEL_LARGE_STACK_COMMIT 0x%lx\n",
+ KERNEL_LARGE_STACK_COMMIT);
+
+ dumpf("#define SET_LOW_WAIT_HIGH 0x%lx\n",
+ SET_LOW_WAIT_HIGH);
+
+ dumpf("#define SET_HIGH_WAIT_LOW 0x%lx\n",
+ SET_HIGH_WAIT_LOW);
+
+ dumpf("#define CLOCK_QUANTUM_DECREMENT 0x%lx\n",
+ CLOCK_QUANTUM_DECREMENT);
+
+ dumpf("#define READY_SKIP_QUANTUM 0x%lx\n",
+ READY_SKIP_QUANTUM);
+
+ dumpf("#define THREAD_QUANTUM 0x%lx\n",
+ THREAD_QUANTUM);
+
+ dumpf("#define WAIT_QUANTUM_DECREMENT 0x%lx\n",
+ WAIT_QUANTUM_DECREMENT);
+
+ dumpf("#define ROUND_TRIP_DECREMENT_COUNT 0x%lx\n",
+ ROUND_TRIP_DECREMENT_COUNT);
+
+ //
+ // Generate processor type definitions.
+ //
+
+ EnableInc( HALALPHA );
+ dumpf("#define PROCESSOR_ALPHA_21064 0x%lx\n",
+ PROCESSOR_ALPHA_21064);
+
+ dumpf("#define PROCESSOR_ALPHA_21164 0x%lx\n",
+ PROCESSOR_ALPHA_21164);
+
+ dumpf("#define PROCESSOR_ALPHA_21066 0x%lx\n",
+ PROCESSOR_ALPHA_21066);
+
+ dumpf("#define PROCESSOR_ALPHA_21068 0x%lx\n",
+ PROCESSOR_ALPHA_21068);
+ DisableInc( HALALPHA );
+
+ //
+ // Generate pte masks and offsets
+ //
+
+ pte.mask = 0;
+
+ pte.p.Valid = 0xffffffff;
+ dumpf( "#define PTE_VALID_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_VALID 0x%lx\n", v(pte.mask) );
+ pte.p.Valid = 0;
+
+ pte.p.Owner = 0xffffffff;
+ dumpf( "#define PTE_OWNER_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_OWNER 0x%lx\n", v(pte.mask) );
+ pte.p.Owner = 0;
+
+ pte.p.Dirty = 0xffffffff;
+ dumpf( "#define PTE_DIRTY_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_DIRTY 0x%lx\n", v(pte.mask) );
+ pte.p.Dirty = 0;
+
+
+ pte.p.Global = 0xffffffff;
+ dumpf( "#define PTE_GLOBAL_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_GLOBAL 0x%lx\n", v(pte.mask) );
+ pte.p.Global = 0;
+
+
+ pte.p.Write = 0xffffffff;
+ dumpf( "#define PTE_WRITE_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_WRITE 0x%lx\n", v(pte.mask) );
+ pte.p.Write = 0;
+
+ pte.p.CopyOnWrite = 0xffffffff;
+ dumpf( "#define PTE_COPYONWRITE_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_COPYONWRITE 0x%lx\n", v(pte.mask) );
+ pte.p.CopyOnWrite = 0;
+
+ pte.p.PageFrameNumber = 0xffffffff;
+ dumpf( "#define PTE_PFN_MASK 0x%lx\n", pte.mask );
+ dumpf( "#define PTE_PFN 0x%lx\n", v(pte.mask) );
+ pte.p.PageFrameNumber = 0;
+
+ psr.mask = 0;
+
+ psr.p.MODE = 0xffffffff;
+ dumpf( "#define PSR_MODE_MASK 0x%lx\n", psr.mask );
+ dumpf( "#define PSR_USER_MODE 0x%lx\n", psr.mask );
+ dumpf( "#define PSR_MODE 0x%lx\n", v(psr.mask) );
+ psr.p.MODE = 0;
+
+ psr.p.INTERRUPT_ENABLE = 0xffffffff;
+ dumpf( "#define PSR_IE_MASK 0x%lx\n", psr.mask );
+ dumpf( "#define PSR_IE 0x%lx\n", v(psr.mask) );
+ psr.p.INTERRUPT_ENABLE = 0;
+
+ psr.p.IRQL = 0xffffffff;
+ dumpf( "#define PSR_IRQL_MASK 0x%lx\n", psr.mask );
+ dumpf( "#define PSR_IRQL 0x%lx\n", v(psr.mask) );
+ psr.p.IRQL = 0;
+
+ ie.mask = 0;
+
+ ie.i.SoftwareInterruptEnables = 0xffffffff;
+ dumpf( "#define IE_SFW_MASK 0x%lx\n", ie.mask );
+ dumpf( "#define IE_SFW 0x%lx\n", v(ie.mask) );
+ ie.i.SoftwareInterruptEnables = 0;
+
+ ie.i.HardwareInterruptEnables = 0xffffffff;
+ dumpf( "#define IE_HDW_MASK 0x%lx\n", ie.mask );
+ dumpf( "#define IE_HDW 0x%lx\n", v(ie.mask) );
+ ie.i.HardwareInterruptEnables = 0;
+
+ EnableInc( HALALPHA );
+
+ mchk.mask = 0;
+
+ mchk.m.Correctable = 0xffffffff;
+ dumpf( "#define MCHK_CORRECTABLE_MASK 0x%lx\n", mchk.mask );
+ dumpf( "#define MCHK_CORRECTABLE 0x%lx\n", v(mchk.mask));
+ mchk.m.Correctable = 0;
+
+ mchk.m.Retryable = 0xffffffff;
+ dumpf( "#define MCHK_RETRYABLE_MASK 0x%lx\n", mchk.mask );
+ dumpf( "#define MCHK_RETRYABLE 0x%lx\n", v(mchk.mask) );
+ mchk.m.Retryable = 0;
+
+ mces.mask = 0;
+
+ mces.m.MachineCheck = 0xffffffff;
+ dumpf( "#define MCES_MCK_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_MCK 0x%lx\n", v(mces.mask) );
+ mces.m.MachineCheck = 0;
+
+ mces.m.SystemCorrectable = 0xffffffff;
+ dumpf( "#define MCES_SCE_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_SCE 0x%lx\n", v(mces.mask) );
+ mces.m.SystemCorrectable = 0;
+
+ mces.m.ProcessorCorrectable = 0xffffffff;
+ dumpf( "#define MCES_PCE_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_PCE 0x%lx\n", v(mces.mask) );
+ mces.m.ProcessorCorrectable = 0;
+
+ mces.m.DisableProcessorCorrectable = 0xffffffff;
+ dumpf( "#define MCES_DPC_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_DPC 0x%lx\n", v(mces.mask) );
+ mces.m.DisableProcessorCorrectable = 0;
+
+ mces.m.DisableSystemCorrectable = 0xffffffff;
+ dumpf( "#define MCES_DSC_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_DSC 0x%lx\n", v(mces.mask) );
+ mces.m.DisableSystemCorrectable = 0;
+
+ mces.m.DisableMachineChecks = 0xffffffff;
+ dumpf( "#define MCES_DMCK_MASK 0x%lx\n", mces.mask );
+ dumpf( "#define MCES_DMCK 0x%lx\n", v(mces.mask) );
+ mces.m.DisableMachineChecks = 0;
+
+ DisableInc( HALALPHA );
+
+ excsum.mask = 0;
+
+ excsum.e.SoftwareCompletion = 0xffffffff;
+ dumpf( "#define EXCSUM_SWC_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_SWC 0x%lx\n", v(excsum.mask) );
+ excsum.e.SoftwareCompletion = 0;
+
+ excsum.e.InvalidOperation = 0xffffffff;
+ dumpf( "#define EXCSUM_INV_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_INV 0x%lx\n", v(excsum.mask) );
+ excsum.e.InvalidOperation = 0;
+
+ excsum.e.DivisionByZero = 0xffffffff;
+ dumpf( "#define EXCSUM_DZE_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_DZE 0x%lx\n", v(excsum.mask) );
+ excsum.e.DivisionByZero = 0;
+
+ excsum.e.Overflow = 0xffffffff;
+ dumpf( "#define EXCSUM_OVF_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_OVF 0x%lx\n", v(excsum.mask) );
+ excsum.e.Overflow = 0;
+
+ excsum.e.Underflow = 0xffffffff;
+ dumpf( "#define EXCSUM_UNF_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_UNF 0x%lx\n", v(excsum.mask) );
+ excsum.e.Underflow = 0;
+
+ excsum.e.InexactResult = 0xffffffff;
+ dumpf( "#define EXCSUM_INE_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_INE 0x%lx\n", v(excsum.mask) );
+ excsum.e.InexactResult = 0;
+
+ excsum.e.IntegerOverflow = 0xffffffff;
+ dumpf( "#define EXCSUM_IOV_MASK 0x%lx\n", excsum.mask );
+ dumpf( "#define EXCSUM_IOV 0x%lx\n", v(excsum.mask) );
+ excsum.e.IntegerOverflow = 0;
+
+
+ //
+ // Generate the call pal mnemonic to opcode definitions.
+ //
+
+ EnableInc( HALALPHA );
+
+ GenerateCallPalNames();
+
+ //
+ // Close header file.
+ //
+
+ fprintf(stderr, " Finished\n");
+ return;
+}
+
+#include "alphaops.h"
+
+//
+// N.B. any new call pal functions must be added to both alphaops.h
+// and to the call pal entry table below.
+//
+
+struct _CALLPAL_ENTRY{
+ SHORT CallPalFunction;
+ char *CallPalMnemonic;
+} CallPals[] = {
+ // Unprivileged Call Pals
+ { BPT_FUNC, BPT_FUNC_STR },
+ { CALLSYS_FUNC, CALLSYS_FUNC_STR },
+ { IMB_FUNC, IMB_FUNC_STR },
+ { GENTRAP_FUNC, GENTRAP_FUNC_STR },
+ { RDTEB_FUNC, RDTEB_FUNC_STR },
+ { KBPT_FUNC, KBPT_FUNC_STR },
+ { CALLKD_FUNC, CALLKD_FUNC_STR },
+ // Privileged Call Pals
+ { HALT_FUNC, HALT_FUNC_STR },
+ { RESTART_FUNC, RESTART_FUNC_STR },
+ { DRAINA_FUNC, DRAINA_FUNC_STR },
+ { REBOOT_FUNC, REBOOT_FUNC_STR },
+ { INITPAL_FUNC, INITPAL_FUNC_STR },
+ { WRENTRY_FUNC, WRENTRY_FUNC_STR },
+ { SWPIRQL_FUNC, SWPIRQL_FUNC_STR },
+ { RDIRQL_FUNC, RDIRQL_FUNC_STR },
+ { DI_FUNC, DI_FUNC_STR },
+ { EI_FUNC, EI_FUNC_STR },
+ { SWPPAL_FUNC, SWPPAL_FUNC_STR },
+ { SSIR_FUNC, SSIR_FUNC_STR },
+ { CSIR_FUNC, CSIR_FUNC_STR },
+ { RFE_FUNC, RFE_FUNC_STR },
+ { RETSYS_FUNC, RETSYS_FUNC_STR },
+ { SWPCTX_FUNC, SWPCTX_FUNC_STR },
+ { SWPPROCESS_FUNC, SWPPROCESS_FUNC_STR },
+ { RDMCES_FUNC, RDMCES_FUNC_STR },
+ { WRMCES_FUNC, WRMCES_FUNC_STR },
+ { TBIA_FUNC, TBIA_FUNC_STR },
+ { TBIS_FUNC, TBIS_FUNC_STR },
+ { TBISASN_FUNC, TBISASN_FUNC_STR },
+ { DTBIS_FUNC, DTBIS_FUNC_STR },
+ { RDKSP_FUNC, RDKSP_FUNC_STR },
+ { SWPKSP_FUNC, SWPKSP_FUNC_STR },
+ { RDPSR_FUNC, RDPSR_FUNC_STR },
+ { RDPCR_FUNC, RDPCR_FUNC_STR },
+ { RDTHREAD_FUNC, RDTHREAD_FUNC_STR },
+ { TBIM_FUNC, TBIM_FUNC_STR },
+ { TBIMASN_FUNC, TBIMASN_FUNC_STR },
+ { RDCOUNTERS_FUNC, RDCOUNTERS_FUNC_STR },
+ { RDSTATE_FUNC, RDSTATE_FUNC_STR },
+ { WRPERFMON_FUNC, WRPERFMON_FUNC_STR },
+ // 21064 (EV4) - specific functions
+ { INITPCR_FUNC, INITPCR_FUNC_STR },
+ // End of structure indicator
+ { -1, "" },
+};
+
+VOID
+GenerateCallPalNames( VOID )
+{
+ struct _CALLPAL_ENTRY *CallPal = CallPals;
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Call PAL mnemonics\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("// begin callpal\n" );
+ dumpf("\n" );
+
+ while( CallPal->CallPalFunction != -1 ){
+
+ dumpf( "#define %s 0x%lx\n",
+ CallPal->CallPalMnemonic,
+ CallPal->CallPalFunction );
+
+ CallPal++;
+ }
+
+ dumpf("\n" );
+ dumpf("// end callpal\n" );
+ dumpf("\n" );
+}
+
+VOID
+dumpf( const char *format, ... )
+
+{
+
+ va_list(arglist);
+
+ va_start(arglist, format);
+
+ if( OutputEnabled & KSALPHA ){
+ vfprintf( KsAlpha, format, arglist );
+ }
+
+ if( OutputEnabled & HALALPHA ){
+ vfprintf( HalAlpha, format, arglist );
+ }
+
+ va_end(arglist);
+}
diff --git a/private/ntos/ke/alpha/getsetrg.c b/private/ntos/ke/alpha/getsetrg.c
new file mode 100644
index 000000000..f670fa603
--- /dev/null
+++ b/private/ntos/ke/alpha/getsetrg.c
@@ -0,0 +1,1081 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+Copyright (c) 1992 Digital Equipment Corporation
+
+Module Name:
+
+ getsetrg.c
+
+Abstract:
+
+ This module implement the code necessary to get and set register values.
+ These routines are used during the emulation of unaligned data references
+ and floating point exceptions.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Thomas Van Baak (tvb) 14-Jul-1992
+
+ Adapted for NT/Alpha
+
+--*/
+
+#include "ki.h"
+
+ULONGLONG
+KiGetRegisterValue (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the value of a register from the specified
+ exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ switch (Register) {
+
+ //
+ // Integer register V0.
+ //
+
+ case 0:
+ return TrapFrame->IntV0;
+
+ //
+ // Integer register T0.
+ //
+
+ case 1:
+ return TrapFrame->IntT0;
+
+ //
+ // Integer register T1.
+ //
+
+ case 2:
+ return TrapFrame->IntT1;
+
+ //
+ // Integer register T2.
+ //
+
+ case 3:
+ return TrapFrame->IntT2;
+
+ //
+ // Integer register T3.
+ //
+
+ case 4:
+ return TrapFrame->IntT3;
+
+ //
+ // Integer register T4.
+ //
+
+ case 5:
+ return TrapFrame->IntT4;
+
+ //
+ // Integer register T5.
+ //
+
+ case 6:
+ return TrapFrame->IntT5;
+
+ //
+ // Integer register T6.
+ //
+
+ case 7:
+ return TrapFrame->IntT6;
+
+ //
+ // Integer register T7.
+ //
+
+ case 8:
+ return TrapFrame->IntT7;
+
+ //
+ // Integer register S0.
+ //
+
+ case 9:
+ return ExceptionFrame->IntS0;
+
+ //
+ // Integer register S1.
+ //
+
+ case 10:
+ return ExceptionFrame->IntS1;
+
+ //
+ // Integer register S2.
+ //
+
+ case 11:
+ return ExceptionFrame->IntS2;
+
+ //
+ // Integer register S3.
+ //
+
+ case 12:
+ return ExceptionFrame->IntS3;
+
+ //
+ // Integer register S4.
+ //
+
+ case 13:
+ return ExceptionFrame->IntS4;
+
+ //
+ // Integer register S5.
+ //
+
+ case 14:
+ return ExceptionFrame->IntS5;
+
+ //
+ // Integer register S6/Fp.
+ //
+ // N.B. Unlike the other S registers, S6 is obtained from the trap
+ // frame instead of the exception frame since it is used by the kernel
+ // as a trap frame pointer.
+ //
+
+ case 15:
+ return TrapFrame->IntFp;
+
+ //
+ // Integer register A0.
+ //
+
+ case 16:
+ return TrapFrame->IntA0;
+
+ //
+ // Integer register A1.
+ //
+
+ case 17:
+ return TrapFrame->IntA1;
+
+ //
+ // Integer register A2
+ //
+
+ case 18:
+ return TrapFrame->IntA2;
+
+ //
+ // Integer register A3.
+ //
+
+ case 19:
+ return TrapFrame->IntA3;
+
+ //
+ // Integer register A4.
+ //
+
+ case 20:
+ return TrapFrame->IntA4;
+
+ //
+ // Integer register A5.
+ //
+
+ case 21:
+ return TrapFrame->IntA5;
+
+ //
+ // Integer register T8.
+ //
+
+ case 22:
+ return TrapFrame->IntT8;
+
+ //
+ // Integer register T9.
+ //
+
+ case 23:
+ return TrapFrame->IntT9;
+
+ //
+ // Integer register T10.
+ //
+
+ case 24:
+ return TrapFrame->IntT10;
+
+ //
+ // Integer register T11.
+ //
+
+ case 25:
+ return TrapFrame->IntT11;
+
+ //
+ // Integer register Ra.
+ //
+
+ case 26:
+ return TrapFrame->IntRa;
+
+ //
+ // Integer register T12.
+ //
+
+ case 27:
+ return TrapFrame->IntT12;
+
+ //
+ // Integer register At.
+ //
+
+ case 28:
+ return TrapFrame->IntAt;
+
+ //
+ // Integer register Gp.
+ //
+
+ case 29:
+ return TrapFrame->IntGp;
+
+ //
+ // Integer register Sp.
+ //
+
+ case 30:
+ return TrapFrame->IntSp;
+
+ //
+ // Integer register Zero.
+ //
+
+ case 31:
+ return 0;
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ return TrapFrame->FltF0;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ return TrapFrame->FltF1;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ return ExceptionFrame->FltF2;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ return ExceptionFrame->FltF3;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ return ExceptionFrame->FltF4;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ return ExceptionFrame->FltF5;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ return ExceptionFrame->FltF6;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ return ExceptionFrame->FltF7;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ return ExceptionFrame->FltF8;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ return ExceptionFrame->FltF9;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ return TrapFrame->FltF10;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ return TrapFrame->FltF11;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ return TrapFrame->FltF12;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ return TrapFrame->FltF13;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ return TrapFrame->FltF14;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ return TrapFrame->FltF15;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ return TrapFrame->FltF16;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ return TrapFrame->FltF17;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ return TrapFrame->FltF18;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ return TrapFrame->FltF19;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ return TrapFrame->FltF20;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ return TrapFrame->FltF21;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ return TrapFrame->FltF22;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ return TrapFrame->FltF23;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ return TrapFrame->FltF24;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ return TrapFrame->FltF25;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ return TrapFrame->FltF26;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ return TrapFrame->FltF27;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ return TrapFrame->FltF28;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ return TrapFrame->FltF29;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ return TrapFrame->FltF30;
+
+ //
+ // Floating register F31 (Zero).
+ //
+
+ case 63:
+ return 0;
+ }
+}
+
+VOID
+KiSetRegisterValue (
+ IN ULONG Register,
+ IN ULONGLONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the value of a register in the specified
+ exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ switch (Register) {
+
+ //
+ // Integer register V0.
+ //
+
+ case 0:
+ TrapFrame->IntV0 = Value;
+ return;
+
+ //
+ // Integer register T0.
+ //
+
+ case 1:
+ TrapFrame->IntT0 = Value;
+ return;
+
+ //
+ // Integer register T1.
+ //
+
+ case 2:
+ TrapFrame->IntT1 = Value;
+ return;
+
+ //
+ // Integer register T2.
+ //
+
+ case 3:
+ TrapFrame->IntT2 = Value;
+ return;
+
+ //
+ // Integer register T3.
+ //
+
+ case 4:
+ TrapFrame->IntT3 = Value;
+ return;
+
+ //
+ // Integer register T4.
+ //
+
+ case 5:
+ TrapFrame->IntT4 = Value;
+ return;
+
+ //
+ // Integer register T5.
+ //
+
+ case 6:
+ TrapFrame->IntT5 = Value;
+ return;
+
+ //
+ // Integer register T6.
+ //
+
+ case 7:
+ TrapFrame->IntT6 = Value;
+ return;
+
+ //
+ // Integer register T7.
+ //
+
+ case 8:
+ TrapFrame->IntT7 = Value;
+ return;
+
+ //
+ // Integer register S0.
+ //
+
+ case 9:
+ ExceptionFrame->IntS0 = Value;
+ return;
+
+ //
+ // Integer register S1.
+ //
+
+ case 10:
+ ExceptionFrame->IntS1 = Value;
+ return;
+
+ //
+ // Integer register S2.
+ //
+
+ case 11:
+ ExceptionFrame->IntS2 = Value;
+ return;
+
+ //
+ // Integer register S3.
+ //
+
+ case 12:
+ ExceptionFrame->IntS3 = Value;
+ return;
+
+ //
+ // Integer register S4.
+ //
+
+ case 13:
+ ExceptionFrame->IntS4 = Value;
+ return;
+
+ //
+ // Integer register S5.
+ //
+
+ case 14:
+ ExceptionFrame->IntS5 = Value;
+ return;
+
+ //
+ // Integer register S6/Fp.
+ //
+ // N.B. Unlike the other S registers, S6 is stored back in the trap
+ // frame instead of the exception frame since it is used by the kernel
+ // as a trap frame pointer.
+ //
+
+ case 15:
+ TrapFrame->IntFp = Value;
+ return;
+
+ //
+ // Integer register A0.
+ //
+
+ case 16:
+ TrapFrame->IntA0 = Value;
+ return;
+
+ //
+ // Integer register A1.
+ //
+
+ case 17:
+ TrapFrame->IntA1 = Value;
+ return;
+
+ //
+ // Integer register A2.
+ //
+
+ case 18:
+ TrapFrame->IntA2 = Value;
+ return;
+
+ //
+ // Integer register A3.
+ //
+
+ case 19:
+ TrapFrame->IntA3 = Value;
+ return;
+
+ //
+ // Integer register A4.
+ //
+
+ case 20:
+ TrapFrame->IntA4 = Value;
+ return;
+
+ //
+ // Integer register A5.
+ //
+
+ case 21:
+ TrapFrame->IntA5 = Value;
+ return;
+
+ //
+ // Integer register T8.
+ //
+
+ case 22:
+ TrapFrame->IntT8 = Value;
+ return;
+
+ //
+ // Integer register T9.
+ //
+
+ case 23:
+ TrapFrame->IntT9 = Value;
+ return;
+
+ //
+ // Integer register T10.
+ //
+
+ case 24:
+ TrapFrame->IntT10 = Value;
+ return;
+
+ //
+ // Integer register T11.
+ //
+
+ case 25:
+ TrapFrame->IntT11 = Value;
+ return;
+
+ //
+ // Integer register Ra.
+ //
+
+ case 26:
+ TrapFrame->IntRa = Value;
+ return;
+
+ //
+ // Integer register T12.
+ //
+
+ case 27:
+ TrapFrame->IntT12 = Value;
+ return;
+
+ //
+ // Integer register At.
+ //
+
+ case 28:
+ TrapFrame->IntAt = Value;
+ return;
+
+ //
+ // Integer register Gp.
+ //
+
+ case 29:
+ TrapFrame->IntGp = Value;
+ return;
+
+ //
+ // Integer register Sp.
+ //
+
+ case 30:
+ TrapFrame->IntSp = Value;
+ return;
+
+ //
+ // Integer register Zero.
+ //
+
+ case 31:
+ return;
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ TrapFrame->FltF0 = Value;
+ return;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ TrapFrame->FltF1 = Value;
+ return;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ ExceptionFrame->FltF2 = Value;
+ return;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ ExceptionFrame->FltF3 = Value;
+ return;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ ExceptionFrame->FltF4 = Value;
+ return;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ ExceptionFrame->FltF5 = Value;
+ return;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ ExceptionFrame->FltF6 = Value;
+ return;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ ExceptionFrame->FltF7 = Value;
+ return;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ ExceptionFrame->FltF8 = Value;
+ return;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ ExceptionFrame->FltF9 = Value;
+ return;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ TrapFrame->FltF10 = Value;
+ return;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ TrapFrame->FltF11 = Value;
+ return;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ TrapFrame->FltF12 = Value;
+ return;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ TrapFrame->FltF13 = Value;
+ return;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ TrapFrame->FltF14 = Value;
+ return;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ TrapFrame->FltF15 = Value;
+ return;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ TrapFrame->FltF16 = Value;
+ return;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ TrapFrame->FltF17 = Value;
+ return;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ TrapFrame->FltF18 = Value;
+ return;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ TrapFrame->FltF19 = Value;
+ return;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ TrapFrame->FltF20 = Value;
+ return;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ TrapFrame->FltF21 = Value;
+ return;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ TrapFrame->FltF22 = Value;
+ return;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ TrapFrame->FltF23 = Value;
+ return;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ TrapFrame->FltF24 = Value;
+ return;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ TrapFrame->FltF25 = Value;
+ return;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ TrapFrame->FltF26 = Value;
+ return;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ TrapFrame->FltF27 = Value;
+ return;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ TrapFrame->FltF28 = Value;
+ return;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ TrapFrame->FltF29 = Value;
+ return;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ TrapFrame->FltF30 = Value;
+ return;
+
+ //
+ // Floating register F31 (Zero).
+ //
+
+ case 63:
+ return;
+ }
+}
diff --git a/private/ntos/ke/alpha/initkr.c b/private/ntos/ke/alpha/initkr.c
new file mode 100644
index 000000000..187a69da4
--- /dev/null
+++ b/private/ntos/ke/alpha/initkr.c
@@ -0,0 +1,499 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ initkr.c
+
+Abstract:
+
+ This module contains the code to initialize the kernel data structures
+ and to initialize the idle thread, its process, and the processor control
+ block.
+
+Author:
+
+ David N. Cutler (davec) 11-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Joe Notarangelo 21-April-1992
+ very minor changes for ALPHA
+ - system time to 64bit integer
+ - some data moved out of pcr
+--*/
+
+#include "ki.h"
+
+//
+// Global Data
+//
+
+//
+// The Alpha architecture includes a feature called address space
+// numbers (ASN). Some MIPs processors support a similar feature called
+// Process IDs (PID). NT supports both using the nomenclature Process
+// ID.
+//
+
+//
+// Maximum PID that can be assigned to any process
+//
+
+ULONG KiMaximumPid = ALPHA_AXP_MAXIMUM_ASN;
+
+//
+// Spinlock for protecting the synchronization of address space
+// numbers when the address space number wraps and a tbiap operation
+// needs to be performed across all processors.
+//
+
+KSPIN_LOCK KiSynchronizeAsnsLock;
+
+
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function gains control after the system has been bootstrapped and
+ before the system has been initialized. Its function is to initialize
+ the kernel data structures, initialize the idle thread and process objects,
+ initialize the processor control block, call the executive initialization
+ routine, and then return to the system startup routine. This routine is
+ also called to initialize the processor specific structures when a new
+ processor is brought on line.
+
+Arguments:
+
+ Process - Supplies a pointer to a control object of type process for
+ the specified processor.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread for
+ the specified processor.
+
+ IdleStack - Supplies a pointer the base of the real kernel stack for
+ idle thread on the specified processor.
+
+ Prcb - Supplies a pointer to a processor control block for the specified
+ processor.
+
+ Number - Supplies the number of the processor that is being
+ initialized.
+
+ LoaderBlock - Supplies a pointer to the loader parameter block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ UCHAR DataByte;
+ ULONG DataLong;
+ LONG Index;
+ KIRQL OldIrql;
+ PRESTART_BLOCK RestartBlock;
+
+ //
+ // Save the address of the loader parameter block.
+ //
+
+ KeLoaderBlock = LoaderBlock;
+
+ //
+ // Set the appropriate member in the active processors set.
+ //
+
+ SetMember(Number, KeActiveProcessors);
+
+ //
+ // Set the number of processors based on the maximum of the current
+ // number of processors and the current processor number.
+ //
+
+ if ((Number + 1) > KeNumberProcessors) {
+ KeNumberProcessors = Number + 1;
+ }
+
+ //
+ // Set the maximum address space number to the minimum of all
+ // maximum address space numbers passed via the loader block.
+ // (Note that NT calls this feature PID, while the Alpha architecture
+ // calls it ASN).
+ //
+
+ if( KiMaximumPid > LoaderBlock->u.Alpha.MaximumAddressSpaceNumber ){
+ KiMaximumPid = LoaderBlock->u.Alpha.MaximumAddressSpaceNumber;
+ }
+ //
+ // Initialize the passive release, APC, and DPC interrupt vectors.
+ //
+
+ PCR->InterruptRoutine[0] = KiPassiveRelease;
+ PCR->InterruptRoutine[APC_LEVEL] = KiApcInterrupt;
+ PCR->InterruptRoutine[DISPATCH_LEVEL] = KiDispatchInterrupt;
+ PCR->ReservedVectors =
+ (1 << PASSIVE_LEVEL) | (1 << APC_LEVEL) | (1 << DISPATCH_LEVEL);
+
+ //
+ // Initialize the processor id fields in the PCR.
+ //
+
+ PCR->Number = Number;
+ PCR->SetMember = 1 << Number;
+ PCR->NotMember = ~PCR->SetMember;
+
+
+ //
+ // Initialize the processor block.
+ //
+
+ Prcb->MinorVersion = PRCB_MINOR_VERSION;
+ Prcb->MajorVersion = PRCB_MAJOR_VERSION;
+ Prcb->BuildType = 0;
+
+#if DBG
+
+ Prcb->BuildType |= PRCB_BUILD_DEBUG;
+
+#endif
+
+#ifdef NT_UP
+
+ Prcb->BuildType |= PRCB_BUILD_UNIPROCESSOR;
+
+#endif
+
+ Prcb->CurrentThread = Thread;
+ Prcb->NextThread = (PKTHREAD)NULL;
+ Prcb->IdleThread = Thread;
+ Prcb->Number = Number;
+ Prcb->SetMember = 1 << Number;
+
+ KeInitializeDpc(&Prcb->QuantumEndDpc,
+ (PKDEFERRED_ROUTINE)KiQuantumEnd,
+ NIL);
+
+#if !defined(NT_UP)
+
+ Prcb->TargetSet = 0;
+ Prcb->WorkerRoutine = NULL;
+ Prcb->RequestSummary = 0;
+ Prcb->IpiFrozen = 0;
+
+#if NT_INST
+
+ Prcb->IpiCounts = &KiIpiCounts[Number];
+
+#endif //NT_INST
+
+#endif //NT_UP
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+
+ //
+ // Initialize DPC listhead and lock.
+ //
+
+ InitializeListHead(&Prcb->DpcListHead);
+ KeInitializeSpinLock(&Prcb->DpcLock);
+
+ //
+ // Set address of processor block.
+ //
+
+ KiProcessorBlock[Number] = Prcb;
+
+ //
+ // Set address of process object in thread object.
+ //
+
+ Thread->ApcState.Process = Process;
+
+ //
+ // Set the appropriate member in the active processors set.
+ //
+
+ SetMember( Number, KeActiveProcessors );
+
+ //
+ // Set the number of processors based on the maximum of the current
+ // number of processors and the current processor number.
+ //
+
+ if( (Number+1) > KeNumberProcessors ){
+ KeNumberProcessors = Number + 1;
+ }
+
+ //
+ // Set global processor architecture, level and revision. The
+ // latter two are the least common denominator on an MP system.
+ //
+
+ KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_ALPHA;
+ KeFeatureBits = 0;
+ if ( KeProcessorLevel == 0 ||
+ KeProcessorLevel > (USHORT)PCR->ProcessorType
+ ) {
+ KeProcessorLevel = (USHORT)PCR->ProcessorType;
+ }
+ if ( KeProcessorRevision == 0 ||
+ KeProcessorRevision > (USHORT)PCR->ProcessorRevision
+ ) {
+ KeProcessorRevision = (USHORT)PCR->ProcessorRevision;
+ }
+
+ //
+ // Initialize all interrupt vectors to transfer control to the unexpected
+ // interrupt routine.
+ //
+ // N.B. This interrupt object is never actually "connected" to an interrupt
+ // vector via KeConnectInterrupt. It is initialized and then connected
+ // by simply storing the address of the dispatch code in the interrupt
+ // vector.
+ //
+
+
+ if (Number == 0) {
+
+ //
+ // Initial the address of the interrupt dispatch routine.
+ //
+
+ KxUnexpectedInterrupt.DispatchAddress = KiUnexpectedInterrupt;
+
+ //
+ // Initialize the context swap spinlock.
+ //
+
+ KeInitializeSpinLock(&KiContextSwapLock);
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ KxUnexpectedInterrupt.DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ //
+ // Sweep the instruction cache on the current processor.
+ //
+
+ KiImb();
+ }
+
+ for (Index = DISPATCH_LEVEL+1; Index < MAXIMUM_VECTOR; Index += 1) {
+ PCR->InterruptRoutine[Index] = (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ //
+ // Raise IRQL to APC level.
+ //
+
+ KeRaiseIrql(APC_LEVEL, &OldIrql);
+
+ //
+ // If the initial processor is being initialized, then initialize the
+ // per system data structures.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initialize the address of the restart block for the boot master.
+ //
+
+ Prcb->RestartBlock = SYSTEM_BLOCK->RestartBlock;
+
+ //
+ // Initialize the kernel debugger if enabled by the load options.
+ //
+
+ if (KdInitSystem(LoaderBlock, FALSE) == FALSE) {
+ KeBugCheck(PHASE0_INITIALIZATION_FAILED);
+ }
+
+#if DBG
+
+ //
+ // Allow a breakin to the kernel debugger if one is pending.
+ //
+
+ if (KdPollBreakIn() != FALSE){
+ DbgBreakPointWithStatus(DBG_STATUS_CONTROL_C);
+ }
+
+#endif //DBG
+
+ //
+ // Initialize processor block array.
+ //
+
+ for (Index = 1; Index < MAXIMUM_PROCESSORS; Index += 1) {
+ KiProcessorBlock[Index] = (PKPRCB)NULL;
+ }
+
+ //
+ // Initialize default DMA coherency value for Alpha
+ //
+ KiDmaIoCoherency = DMA_READ_DCACHE_INVALIDATE | DMA_WRITE_DCACHE_SNOOP;
+
+ //
+ // Perform architecture independent initialization.
+ //
+
+ KiInitSystem();
+
+ //
+ // Initialize idle thread process object and then set:
+ //
+ // 1. all the quantum values to the maximum possible.
+ // 2. the process in the balance set.
+ // 3. the active processor mask to the specified processor.
+ //
+
+ KeInitializeProcess(Process,
+ (KPRIORITY)0,
+ (KAFFINITY)(0xffffffff),
+ (PULONG)(PDE_BASE + ((PDE_BASE >> PDI_SHIFT - 2) & 0xffc)),
+ FALSE);
+
+ Process->ThreadQuantum = MAXCHAR;
+
+ //
+ // Initialize the spinlock for synchronizing ASNs.
+ //
+
+ KeInitializeSpinLock( &KiSynchronizeAsnsLock );
+
+ }
+
+ //
+ // Initialize idle thread object and then set:
+ //
+ // 1. the initial kernel stack to the specified idle stack.
+ // 2. the next processor number to the specified processor.
+ // 3. the thread priority to the highest possible value.
+ // 4. the state of the thread to running.
+ // 5. the thread affinity to the specified processor.
+ // 6. the specified processor member in the process active processors
+ // set.
+ //
+
+ KeInitializeThread(Thread, (PVOID)((ULONG)IdleStack - PAGE_SIZE),
+ (PKSYSTEM_ROUTINE)NULL, (PKSTART_ROUTINE)NULL,
+ (PVOID)NULL, (PCONTEXT)NULL, (PVOID)NULL, Process);
+
+ Thread->InitialStack = IdleStack;
+ Thread->StackBase = IdleStack;
+ Thread->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+ Thread->NextProcessor = Number;
+ Thread->Priority = HIGH_PRIORITY;
+ Thread->State = Running;
+ Thread->Affinity = (KAFFINITY)(1 << Number);
+ Thread->WaitIrql = DISPATCH_LEVEL;
+
+ //
+ // If the current processor is the boot master then set the appropriate
+ // bit in the active summary of the idle process.
+ //
+
+ if( Number == 0 ){
+ SetMember(Number, Process->ActiveProcessors);
+ }
+
+ //
+ // call the executive initialization routine.
+ //
+
+ try {
+ ExpInitializeExecutive(Number, LoaderBlock);
+ }
+ except( EXCEPTION_EXECUTE_HANDLER ) {
+ KeBugCheck (PHASE0_EXCEPTION);
+ }
+
+ //
+ // If the initial processor is being initialized, then compute the
+ // timer table reciprocal value and reset the PRCB values for
+ // the controllable DPC behavior in order to reflect any registry
+ // overrides
+ //
+
+ if (Number == 0) {
+ KiTimeIncrementReciprocal = KiComputeReciprocal((LONG)KeMaximumIncrement,
+ &KiTimeIncrementShiftCount);
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+ }
+
+ //
+ //
+ // Raise IRQL to dispatch level and set the priority of the idle thread
+ // to zero. This will have the effect of immediately causing the phase
+ // one initialization thread to get scheduled for execution. The idle
+ // thread priority is then set to the lowest realtime priority. This is
+ // necessary so that mutexes aquired at DPC level do not cause the active
+ // matrix to get corrupted.
+ //
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeSetPriorityThread(Thread, (KPRIORITY)0);
+ Thread->Priority = LOW_REALTIME_PRIORITY;
+
+ //
+ // Raise IRQL to the highest level.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // If a restart block exists for the current processor then set boot
+ // completed.
+ //
+
+#if !defined(NT_UP)
+
+ RestartBlock = Prcb->RestartBlock;
+
+ if( RestartBlock != NULL ){
+ RestartBlock->BootStatus.BootFinished = 1;
+ }
+
+ //
+ // If the current processor is a secondary processor then set the
+ // appropriate bit in the idle summary.
+ //
+
+ if( Number != 0 ){
+ SetMember( Number, KiIdleSummary );
+ }
+
+#endif //NT_UP
+
+ return;
+}
diff --git a/private/ntos/ke/alpha/intobj.c b/private/ntos/ke/alpha/intobj.c
new file mode 100644
index 000000000..36a7af8f1
--- /dev/null
+++ b/private/ntos/ke/alpha/intobj.c
@@ -0,0 +1,435 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ intobj.c
+
+Abstract:
+
+ This module implements the kernel interrupt object. Functions are provided
+ to initialize, connect, and disconnect interrupt objects.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+
+VOID
+KeInitializeInterrupt (
+ IN PKINTERRUPT Interrupt,
+ IN PKSERVICE_ROUTINE ServiceRoutine,
+ IN PVOID ServiceContext,
+ IN PKSPIN_LOCK SpinLock OPTIONAL,
+ IN ULONG Vector,
+ IN KIRQL Irql,
+ IN KIRQL SynchronizeIrql,
+ IN KINTERRUPT_MODE InterruptMode,
+ IN BOOLEAN ShareVector,
+ IN CCHAR ProcessorNumber,
+ IN BOOLEAN FloatingSave
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel interrupt object. The service routine,
+ service context, spin lock, vector, IRQL, Synchronized IRQL, and floating
+ context save flag are initialized.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+ ServiceRoutine - Supplies a pointer to a function that is to be
+ executed when an interrupt occurs via the specified interrupt
+ vector.
+
+ ServiceContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the ServiceRoutine parameter.
+
+ SpinLock - Supplies an optional pointer to an executive spin lock.
+
+ Vector - Supplies the index of the entry in the Interrupt Dispatch Table
+ that is to be associated with the ServiceRoutine function.
+
+ Irql - Supplies the request priority of the interrupting source.
+
+ SynchronizeIrql - The request priority that the interrupt should be
+ synchronized with.
+
+ InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
+ Latched.
+
+ ShareVector - Supplies a boolean value that specifies whether the
+ vector can be shared with other interrupt objects or not. If FALSE
+ then the vector may not be shared, if TRUE it may be.
+ Latched.
+
+ ProcessorNumber - Supplies the number of the processor to which the
+ interrupt will be connected.
+
+ FloatingSave - Supplies a boolean value that determines whether the
+ floating point registers and pipe line are to be saved before calling
+ the ServiceRoutine function.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Interrupt->Type = InterruptObject;
+ Interrupt->Size = sizeof(KINTERRUPT);
+
+ //
+ // Initialize the address of the service routine, the service context,
+ // the address of the spin lock, the address of the actual spin lock
+ // that will be used, the vector number, the IRQL of the interrupting
+ // source, the Synchronized IRQL of the interrupt object, the interrupt
+ // mode, the processor number, and the floating context save flag.
+ //
+
+ Interrupt->ServiceRoutine = ServiceRoutine;
+ Interrupt->ServiceContext = ServiceContext;
+
+ if (ARGUMENT_PRESENT(SpinLock)) {
+ Interrupt->ActualLock = SpinLock;
+ } else {
+ Interrupt->SpinLock = 0;
+ Interrupt->ActualLock = &Interrupt->SpinLock;
+ }
+
+ Interrupt->Vector = Vector;
+ Interrupt->Irql = Irql;
+ Interrupt->SynchronizeIrql = SynchronizeIrql;
+ Interrupt->Mode = InterruptMode;
+ Interrupt->ShareVector = ShareVector;
+ Interrupt->Number = ProcessorNumber;
+ Interrupt->FloatingSave = FloatingSave;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ Interrupt->DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ KeSweepIcache(FALSE);
+
+ //
+ // Set the connected state of the interrupt object to FALSE.
+ //
+
+ Interrupt->Connected = FALSE;
+ return;
+}
+
+BOOLEAN
+KeConnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function connects an interrupt object to the interrupt vector
+ specified by the interrupt object. If the interrupt object is already
+ connected, or an attempt is made to connect to an interrupt that cannot
+ be connected, then a value of FALSE is returned. Else the specified
+ interrupt object is connected to the interrupt vector, the connected
+ state is set to TRUE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is already connected or an attempt is made to
+ connect to an interrupt vector that cannot be connected, then a value
+ of FALSE is returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ KAFFINITY Affinity;
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ KIRQL Irql;
+ CHAR Number;
+ KIRQL OldIrql;
+ ULONG Vector;
+
+ //
+ // If the interrupt object is already connected, the interrupt vector
+ // number is invalid, an attempt is being made to connect to a vector
+ // that cannot be connected, the interrupt request level is invalid,
+ // the processor number is invalid, of the interrupt vector is less
+ // than or equal to the highest level and it not equal to the specified
+ // IRQL, then do not connect the interrupt object. Else connect interrupt
+ // object to the specified vector and establish the proper interrupt
+ // dispatcher.
+ //
+
+ Connected = FALSE;
+ Irql = Interrupt->Irql;
+ Number = Interrupt->Number;
+ Vector = Interrupt->Vector;
+ if (((Vector >= MAXIMUM_VECTOR) || (Irql > HIGH_LEVEL) ||
+ ((Vector <= HIGH_LEVEL) &&
+ (((1 << Vector & PCR->ReservedVectors) != 0))) ||
+ (Number >= KeNumberProcessors)) == FALSE) {
+
+ //
+ //
+ // Set affinity to the specified processor.
+ //
+
+ Affinity = KeSetAffinityThread(KeGetCurrentThread(),
+ (KAFFINITY)(1 << Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the specified interrupt vector is not connected, then
+ // connect the interrupt vector to the interrupt object dispatch
+ // code, establish the dispatcher address, and set the new
+ // interrupt mode and enable masks. Else if the interrupt is
+ // already chained, then add the new interrupt object at the end
+ // of the chain. If the interrupt vector is not chained, then
+ // start a chain with the previous interrupt object at the front
+ // of the chain. The interrupt mode of all interrupt objects in
+ // a chain must be the same.
+ //
+
+ if (Interrupt->Connected == FALSE) {
+ if ( PCR->InterruptRoutine[Vector] ==
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode) ) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ if (Interrupt->FloatingSave) {
+ Interrupt->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupt->Irql == Interrupt->SynchronizeIrql) {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+ } else {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupt->DispatchCode);
+
+ HalEnableSystemInterrupt(Vector, Irql, Interrupt->Mode);
+
+ } else {
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interrupt->Mode == Interruptx->Mode) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ ASSERT (Irql <= KiSynchIrql);
+ if (Interruptx->DispatchAddress != KiChainedDispatch) {
+ InitializeListHead(&Interruptx->InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ }
+
+ InsertTailList(&Interruptx->InterruptListEntry,
+ &Interrupt->InterruptListEntry);
+ }
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set affinity back to the original value.
+ //
+
+ KeSetAffinityThread(KeGetCurrentThread(), Affinity);
+ }
+
+ //
+ // Return whether interrupt was connected to the specified vector.
+ //
+
+ return Connected;
+}
+
+BOOLEAN
+KeDisconnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function disconnects an interrupt object from the interrupt vector
+ specified by the interrupt object. If the interrupt object is not
+ connected, then a value of FALSE is returned. Else the specified interrupt
+ object is disconnected from the interrupt vector, the connected state is
+ set to FALSE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is not connected, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ KAFFINITY Affinity;
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ PKINTERRUPT Interrupty;
+ KIRQL Irql;
+ KIRQL OldIrql;
+ ULONG Vector;
+
+ //
+ // Set affinity to the specified processor.
+ //
+
+ Affinity = KeSetAffinityThread(KeGetCurrentThread(),
+ (KAFFINITY)(1 << Interrupt->Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the interrupt object is connected, then disconnect it from the
+ // specified vector.
+ //
+
+ Connected = Interrupt->Connected;
+ if (Connected != FALSE) {
+ Irql = Interrupt->Irql;
+ Vector = Interrupt->Vector;
+
+ //
+ // If the specified interrupt vector is not connected to the chained
+ // interrupt dispatcher, then disconnect it by setting its dispatch
+ // address to the unexpected interrupt routine. Else remove the
+ // interrupt object from the interrupt chain. If there is only
+ // one entry remaining in the list, then reestablish the dispatch
+ // address.
+ //
+
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interruptx->DispatchAddress == KiChainedDispatch) {
+ ASSERT (Irql <= KiSynchIrql);
+ if (Interrupt == Interruptx) {
+ Interruptx = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT, InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interruptx->DispatchCode);
+
+ }
+
+ RemoveEntryList(&Interrupt->InterruptListEntry);
+ Interrupty = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry);
+
+ if (Interruptx == Interrupty) {
+ if (Interrupty->FloatingSave) {
+ Interrupty->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupty->Irql == Interrupty->SynchronizeIrql) {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+
+ } else {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupty->DispatchCode);
+
+ }
+
+ } else {
+ HalDisableSystemInterrupt(Vector, Irql);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ KeSweepIcache(TRUE);
+ Interrupt->Connected = FALSE;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set affinity back to the original value.
+ //
+
+ KeSetAffinityThread(KeGetCurrentThread(), Affinity);
+
+ //
+ // Return whether interrupt was disconnected from the specified vector.
+ //
+
+ return Connected;
+}
diff --git a/private/ntos/ke/alpha/intsup.s b/private/ntos/ke/alpha/intsup.s
new file mode 100644
index 000000000..0b03ae379
--- /dev/null
+++ b/private/ntos/ke/alpha/intsup.s
@@ -0,0 +1,899 @@
+// TITLE("Interrupt Object Support Routines")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// intsup.s
+//
+// Abstract:
+//
+// This module implements the code necessary to support interrupt objects.
+// It contains the interrupt dispatch code and the code template that gets
+// copied into an interrupt object.
+//
+// Author:
+//
+// David N. Cutler (davec) 2-Apr-1990
+// Joe Notarangelo 07-Apr-1992 (based on xxintsup.s by Dave Cutler)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL( "Synchronize Execution" )
+//++
+//
+// BOOLEAN
+// KeSynchronizeExecution (
+// IN PKINTERRUPT Interrupt,
+// IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
+// IN PVOID SynchronizeContext
+// )
+//
+// Routine Description:
+//
+// This function synchronizes the execution of the specified routine with the
+// execution of the service routine associated with the specified interrupt
+// object.
+//
+// Arguments:
+//
+// Interrupt (a0) - Supplies a pointer to a control object of type interrupt.
+//
+// SynchronizeRoutine (a1) - Supplies a pointer to a function whose execution
+// is to be synchronized with the execution of the service routine associated
+// with the specified interrupt object.
+//
+// SynchronizeContext (a2) - Supplies a pointer to an arbitrary data structure
+// which is to be passed to the function specified by the SynchronizeRoutine
+// parameter.
+//
+// Return Value:
+//
+// The value returned by the SynchronizeRoutine function is returned as the
+// function value.
+//
+//--
+
+
+ .struct 0
+SyS0: .space 8 // saved integer register s0
+SyIrql: .space 4 // saved IRQL value
+ .space 4 // fill for alignment
+SyRa: .space 8 // saved return address
+SyA0: .space 8 // saved argument registers a0 - a2
+SyA1: .space 8 //
+SyA2: .space 8 //
+SyFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KeSynchronizeExecution, SyFrameLength, zero)
+
+ lda sp, -SyFrameLength(sp) // allocate stack frame
+ stq ra, SyRa(sp) // save return address
+ stq s0, SyS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+ stq a1, SyA1(sp) // save synchronization routine address
+ stq a2, SyA2(sp) // save synchronization routine context
+
+//
+// Raise IRQL to the synchronization level and acquire the associated
+// spin lock.
+//
+
+#if !defined(NT_UP)
+
+ ldl s0, InActualLock(a0) // get address of spin lock
+
+#endif
+
+ ldq_u t1, InSynchronizeIrql(a0)
+ extbl t1, InSynchronizeIrql % 8, a0 // get synchronization IRQL
+ SWAP_IRQL // raise irql
+ stl v0, SyIrql(sp) // save old irql
+
+#if !defined(NT_UP)
+
+10: ldl_l t0, 0(s0) // get current lock value
+ bis s0, zero, t1 // set lock ownership value
+ bne t0, 15f // if ne, spin lock owned
+ stl_c t1, 0(s0) // set spin lock owned
+ beq t1, 15f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+#endif
+
+//
+// Call specified routine passing the specified context parameter.
+//
+
+ ldl t5, SyA1(sp) // get synchronize routine address
+ ldq a0, SyA2(sp) // get synchronzie routine context
+ jsr ra, (t5) // call routine
+//
+// Release spin lock, lower IRQL to its previous level, and return the value
+// returned by the specified routine.
+//
+
+#if !defined(NT_UP)
+
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(s0) // set spin lock not owned
+
+#endif
+
+ ldl a0, SyIrql(sp) // get saved IRQL
+ extbl a0, 0, a0 // this is a uchar
+ bis v0, zero, s0 // save return value
+ SWAP_IRQL // lower IRQL to previous level
+ bis s0, zero, v0 // restore return value
+ ldq s0, SyS0(sp) // restore s0
+ ldq ra, SyRa(sp) // restore ra
+ lda sp, SyFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+15: ldl t0, 0(s0) // read current lock value
+ beq t0, 10b // if lock available, retry spinlock
+ br zero, 15b // spin in cache until lock available
+
+#endif
+
+ .end KeSynchronizeExecution
+
+ SBTTL( "Dispatch Chained Interrupt" )
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to more than one interrupt object. Its
+// function is to walk the list of connected interrupt objects and call
+// each interrupt service routine. If the mode of the interrupt is latched,
+// then a complete traversal of the chain must be performed. If any of the
+// routines require saving the volatile floating point machine state, then
+// it is only saved once.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ChS0: .space 8 // saved integer registers s0 - s5
+ChS1: .space 8 //
+ChS2: .space 8 //
+ChS3: .space 8 //
+ChS4: .space 8 //
+ChS5: .space 8 //
+ChRa: .space 8 // saved return address
+ChIrql: .space 4 // saved IRQL value
+ChSpinL: .space 4 // address of spin lock
+ChFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiChainedDispatch, ChFrameLength, zero)
+
+ lda sp, -ChFrameLength(sp) // allocate stack frame
+ stq ra, ChRa(sp) // save return address
+ stq s0, ChS0(sp) // save integer registers s0 - s6
+ stq s1, ChS1(sp) //
+ stq s2, ChS2(sp) //
+ stq s3, ChS3(sp) //
+ stq s4, ChS4(sp) //
+ stq s5, ChS5(sp) //
+
+ PROLOGUE_END
+
+// usage:
+// s0 = address of listhead
+// s1 = address of current item in list
+// s2 = floating status saved flag
+// s3 = mode of interrupt
+// s4 = irql of interrupt source
+// s5 = synchronization level requested for current list item
+
+//
+// Initialize loop variables.
+//
+
+ addl a0, InInterruptListEntry, s0 // set address of listhead
+ bis s0, zero, s1 // set address of first entry
+ bis zero, zero, s2 // clear floating state saved flag
+ ldq_u t0, InMode(a0)
+ extbl t0, InMode % 8, s3 // get mode of interrupt
+ ldq_u t1, InIrql(a0)
+ extbl t1, InIrql % 8, s4 // get interrupt source IRQL
+
+//
+// Walk the list of connected interrupt objects and call the respective
+// interrupt service routines.
+//
+
+10: subl s1, InInterruptListEntry, a0 // compute intr object address
+ ldq_u t2, InFloatingSave(a0)
+ extbl t2, InFloatingSave % 8, t0 // get floating save flag
+ bne s2, 20f // if ne, floating state already saved
+ beq t0, 20f // if eq, don't save floating state
+
+//
+// Save volatile floating registers in trap frame.
+//
+
+ bsr ra, KiSaveVolatileFloatState
+
+ ldil s2, 1 // set floating state saved flag
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+20: ldq_u t1, InSynchronizeIrql(a0)
+ extbl t1, InSynchronizeIrql % 8, s5 // get synchronization IRQL
+ cmpeq s4, s5, t0 // synchronization = source level?
+ bne t0, 25f // if ne[true], IRQL levels are same
+ bis s5, zero, a0 // set synchronization IRQL
+ SWAP_IRQL
+ stl v0, ChIrql(sp) // save old IRQL
+ subq s1, InInterruptListEntry, a0 // recompute intr obj address
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+25: //
+
+#if !defined(NT_UP)
+
+ ldl t5, InActualLock(a0) // get address of spin lock
+30: ldl_l t1, 0(t5) // get current lock value
+ bis t5, zero, t2 // set ownership value
+ bne t1, 35f // if ne, spin lock owned
+ stl_c t2, 0(t5) // set spin lock owned
+ beq t2, 35f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+ stl t5, ChSpinL(sp) // save spin lock address
+
+#endif
+
+ ldl t5, InServiceRoutine(a0) // get address of service routine
+ ldl a1, InServiceContext(a0) // get service context
+ jsr ra, (t5)
+//
+// Release the service routine spin lock.
+//
+
+#if !defined(NT_UP)
+
+ ldl t5, ChSpinL(sp) // get address of spin lock
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(t5) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ cmpeq s4, s5, t0 // synchronization = source level
+ bne t0, 37f // if ne[true], IRQL levels are same
+ bis s4, zero, a0 // set interrupt source IRQL
+ SWAP_IRQL // lower to interrupt source IRQL
+
+//
+// Get next list entry and check for end of loop.
+//
+
+37: ldl s1, LsFlink(s1) // get next interrupt object address
+ beq v0, 40f // if eq, interrupt not handled
+ beq s3, 50f // if eq, level sensitive interrupt
+40: cmpeq s0, s1, t0 // s0 = s1?
+ beq t0, 10b // if eq[false], not end of list
+
+//
+// Either the interrupt is level sensitive and has been handled or the end of
+// the interrupt object chain has been reached. Check to determine if floating
+// machine state needs to be restored.
+//
+
+50: beq s2, 60f // if eq, floating state not saved
+
+//
+// Restore volatile floating registers from trap frame.
+//
+
+ bsr ra, KiRestoreVolatileFloatState
+
+//
+// Restore integer registers s0 - s5, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+60: ldq s0, ChS0(sp) // restore integer registers s0 - s6
+ ldq s1, ChS1(sp) //
+ ldq s2, ChS2(sp) //
+ ldq s3, ChS3(sp) //
+ ldq s4, ChS4(sp) //
+ ldq s5, ChS5(sp) //
+
+ ldq ra, ChRa(sp) // restore return address
+ lda sp, ChFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+35: ldl t1, 0(t5) // read current lock value
+ beq t1, 30b // if lock available, retry spinlock
+ br zero, 35b // spin in cache until lock available
+
+#endif
+ .end KiChainedDispatch
+
+ SBTTL( "Floating Dispatch" )
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to save the volatile floating machine state and then call the specified
+// interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+FlS0: .space 8 // saved integer registers s0 - s1
+FlS1: .space 8 //
+FlIrql: .space 4 // saved IRQL value
+ .space 4 // for alignment
+FlRa: .space 8 // saved return address
+FlFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiFloatingDispatch, FlFrameLength, zero)
+
+ lda sp, -FlFrameLength(sp) // allocate stack frame
+ stq ra, FlRa(sp) // save return address
+ stq s0, FlS0(sp) // save integer registers s0 - s1
+
+#if !defined(NT_UP)
+
+ stq s1, FlS1(sp) //
+
+#endif
+
+ PROLOGUE_END
+
+//
+// Save volatile floating registers f0 - f19 in trap frame.
+//
+
+ bsr ra, KiSaveVolatileFloatState
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+ bis a0, zero, s0 // save address of interrupt object
+ ldq_u t2, InSynchronizeIrql(s0)
+ extbl t2, InSynchronizeIrql % 8, a0 // get synchronization IRQL
+ ldq_u t3, InIrql(s0)
+ extbl t3, InIrql % 8, t0 // get interrupt source IRQL
+ cmpeq a0, t0, t1 // synchronize = source IRQL ?
+ bne t1, 10f // if ne[true], IRQL levels are the same
+ SWAP_IRQL
+ stl v0, FlIrql(sp) // save old irql
+10: bis s0, zero, a0 // restore address of intr object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if !defined(NT_UP)
+
+ ldl s1, InActualLock(a0) // get address of spin lock
+20: ldl_l t1, 0(s1) // get current lock value
+ bis s1, s1, t2 // set ownership value
+ bne t1, 25f // if ne, spin lock owned
+ stl_c t2, 0(s1) // set spin lock owned
+ beq t2, 25f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+#endif
+
+ ldl t5, InServiceRoutine(a0) // get address of service routine
+ ldl a1, InServiceContext(a0) // get service context
+ jsr ra, (t5) // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if !defined(NT_UP)
+
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(s1) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ ldq_u t3, InIrql(s0)
+ extbl t3, InIrql % 8, a0 // get interrupt source IRQL
+ ldq_u t4, InSynchronizeIrql(s0)
+ extbl t4, InSynchronizeIrql % 8, t0 // get synchronization IRQL
+ cmpeq a0, t0, t1 // synchronize = source IRQL?
+ bne t1, 30f // if eq, IRQL levels are the same
+ SWAP_IRQL // lower to interrupt source IRQL
+
+//
+// Restore volatile floating registers f0 - f19 from trap frame.
+//
+
+30: bsr ra, KiRestoreVolatileFloatState
+
+//
+// Restore integer registers s0 - s1, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ ldq s0, FlS0(sp) // restore integer registers s0 - s1
+
+#if !defined(NT_UP)
+
+ ldq s1, FlS1(sp) //
+
+#endif
+
+ ldq ra, FlRa(sp) // restore return address
+ lda sp, FlFrameLength(sp) // deallocate stack frame
+ ret zero, (ra)
+
+#if !defined(NT_UP)
+
+25: ldl t1, 0(s1) // read current lock value
+ beq t1, 20b // if lock available, retry spinlock
+ br zero, 25b // spin in cache until lock available
+
+#endif
+
+ .end KiFloatingDispatch
+
+ SBTTL( "Interrupt Dispatch - Raise IRQL" )
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// N.B. This routine raises the interrupt level to the synchronization
+// level specified in the interrupt object.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 8 // insure octaword alignment
+RdS0: .space 8 // saved integer registers s0
+RdIrql: .space 4 // saved IRQL value
+ .space 4 // for alignment
+RdRa: .space 8 // saved return address
+RdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchRaise, RdFrameLength, zero)
+
+ lda sp, -RdFrameLength(sp) // allocate stack frame
+ stq ra, RdRa(sp) // save return address
+ stq s0, RdS0(sp) // save integer registers s0
+
+ PROLOGUE_END
+
+
+//
+// Raise IRQL to synchronization level
+//
+
+ bis a0, zero, s0 // save address of interrupt object
+ ldq_u t3, InSynchronizeIrql(s0) // get synchronization IRQL
+ extbl t3, InSynchronizeIrql % 8, a0
+ SWAP_IRQL
+ stl v0, RdIrql(sp) // save old irql
+10: bis s0, zero, a0 // restore address of intr object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if !defined(NT_UP)
+
+ ldl t3, InActualLock(a0)
+20: ldl_l t1, 0(t3) // get current lock value
+ bis t3, t3, t2 // set lock ownership value
+ bne t1, 25f // if ne, spin lock owned
+ stl_c t2, 0(t3) // set spin lock owned
+ beq t2, 25f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+#endif
+
+ ldl t5, InServiceRoutine(a0) // get address of service routine
+ ldl a1, InServiceContext(a0) // get service context
+ jsr ra, (t5) // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if !defined(NT_UP)
+
+ ldl t2, InActualLock(s0)
+ mb // synchronize all previous writes
+ // befor the spinlock is released
+ stl zero, 0(t2) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the previous level.
+//
+ ldl a0, RdIrql(s0) // get previous IRQL
+ SWAP_IRQL // lower to interrupt source IRQL
+
+//
+// Restore integer register s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+30: ldq s0, RdS0(sp) // restore integer register s0
+
+ ldq ra, RdRa(sp) // restore return address
+ lda sp, RdFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+
+25: ldl t1, 0(t3) // read current lock value
+ beq t1, 20b // if lock available, retry spinlock
+ br zero, 25b // spin in cache until lock available
+
+#endif
+
+ .end KiInterruptDispatchRaise
+
+ SBTTL( "Interrupt Dispatch - Same IRQL" )
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+#if defined(NT_UP)
+ LEAF_ENTRY(KiInterruptDispatchSame)
+
+ ldl t5, InServiceRoutine(a0) // get address of service routine
+ ldl a1, InServiceContext(a0) // get service context
+ jsr zero, (t5) // jump to service routine
+#else
+ .struct 0
+ .space 8 // insure octaword alignment
+SdS0: .space 8 // saved integer registers s0
+SdIrql: .space 4 // saved IRQL value
+ .space 4 // for alignment
+SdRa: .space 8 // saved return address
+SdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchSame, SdFrameLength, zero)
+
+ lda sp, -SdFrameLength(sp) // allocate stack frame
+ stq ra, SdRa(sp) // save return address
+ stq s0, SdS0(sp) // save integer registers s0
+
+ PROLOGUE_END
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+ ldl t3, InActualLock(a0)
+ bis a0, zero, s0 // save interrupt object
+20: ldl_l t1, 0(t3) // get current lock value
+ bis t3, t3, t2 // set lock ownership value
+ bne t1, 25f // if ne, spin lock owned
+ stl_c t2, 0(t3) // set spin lock owned
+ beq t2, 25f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+
+ ldl t5, InServiceRoutine(a0) // get address of service routine
+ ldl a1, InServiceContext(a0) // get service context
+ jsr ra, (t5) // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+ ldl t2, InActualLock(s0)
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(t2) // set spin lock not owned
+
+//
+// Restore integer registers s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+30: ldq s0, SdS0(sp) // restore integer register s0
+
+ ldq ra, SdRa(sp) // restore return address
+ lda sp, SdFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+
+25: ldl t1, 0(t3) // read current lock value
+ beq t1, 20b // if lock available, retry spinlock
+ br zero, 25b // spin in cache until lock available
+
+#endif
+
+ .end KiInterruptDispatchSame
+
+ SBTTL( "Interrupt Template" )
+//++
+//
+// Routine Description:
+//
+// This routine is a template that is copied into each interrupt object. Its
+// function is to determine the address of the respective interrupt object
+// and then transfer control to the appropriate interrupt dispatcher.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt template within an interrupt
+// object.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiInterruptTemplate)
+
+ .set noreorder
+ .set noat
+ ldl t5, InDispatchAddress - InDispatchCode(a0) // get dispatcher adr
+ subl a0, InDispatchCode, a0 // compute address of interrupt object
+ jmp zero, (t5) // transfer to dispatch routine
+ bis zero, zero, zero // nop for alignment
+ .set at
+ .set reorder
+
+ .end KiInterruptTemplate
+
+ SBTTL( "Disable Interrupts" )
+//++
+//
+// Routine Description:
+//
+// This routine disables interrupts on the current processor and
+// returns the previous state of the interrrupt enable bit.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// A boolean value, if true interrupts were previously turned on,
+// false indicates interrupts were previously off.
+//
+//--
+
+ LEAF_ENTRY(KiDisableInterrupts)
+
+ GET_CURRENT_PROCESSOR_STATUS_REGISTER // v0 = current PSR
+ DISABLE_INTERRUPTS // disable all interrupts
+ and v0, PSR_IE_MASK, v0 // isolate interrupt enable
+ srl v0, PSR_IE, v0 // shift to bit 0
+ ret zero, (ra)
+
+ .end KiDisableInterrupts
+
+ SBTTL( "Restore Interrupts" )
+//++
+//
+// Routine Description:
+//
+// This routine enables interrupts according to the the previous
+// interrupt enable passed as input.
+//
+// Arguments:
+//
+// a0 - Supplies previous interrupt enable state (returned by
+// KiDisableInterrupts)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreInterrupts)
+
+ beq a0, 10f // if eq, then interrupts disabled
+ ENABLE_INTERRUPTS
+ ret zero, (ra)
+
+10:
+ DISABLE_INTERRUPTS
+ ret zero, (ra)
+
+ .end KiRestoreInterrupts
+
+ SBTTL( "Unexpected Interrupts" )
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is not connected to an interrupt object. Its function
+// is to report the error and dismiss the interrupt.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// N.B. - This routine relies upon a private convention with the
+// interrupt exception dispatcher that register t12 contains the
+// interrupt vector of the unexpected interrupt. This convention
+// will only work if the first level dispatch causes the
+// unexpected interrupt.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+// t12 - Supplies the interrupt vector.
+//
+// s6/fp - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 8 // filler for 16 byte alignment
+UiRa: .space 8 // return address
+UiFrameLength:
+
+ NESTED_ENTRY(KiUnexpectedInterrupt, UiFrameLength, zero)
+
+ lda sp, -UiFrameLength(sp) // allocate stack frame
+ stq ra, UiRa(sp) // save return address
+
+ PROLOGUE_END //
+
+ ldil a0, 0xfacefeed // ****** temp ******
+ bis t12, zero, a1 // pass interrupt vector
+ bis zero, zero, a2 // zero remaining parameters
+ bis zero, zero, a3 //
+ bis zero, zero, a4 //
+ bis zero, zero, a5 //
+
+ bsr ra, KeBugCheckEx // perform system crash
+
+ .end KiUnexpectedInterrupt
+
+
+ SBTTL( KiPassiveRelease )
+//++
+//
+// RoutineDescription:
+//
+// KiPassiveRelease passively releases an interrupt that cannot/will not
+// be serviced at this time. Or there is no reason to service it and
+// maybe this routine will never be called in a million years.
+//
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiPassiveRelease )
+
+ ret zero, (ra)
+
+ .end KiPassiveRelease
+
+
diff --git a/private/ntos/ke/alpha/ipi.c b/private/ntos/ke/alpha/ipi.c
new file mode 100644
index 000000000..e717a57ee
--- /dev/null
+++ b/private/ntos/ke/alpha/ipi.c
@@ -0,0 +1,179 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ ipi.c
+
+Abstract:
+
+ This module implement Alpha AXP - specific interprocessor interrupt
+ routines.
+
+Author:
+
+ David N. Cutler 24-Apr-1993
+ Joe Notarangelo 29-Nov-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiRestoreProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the current
+ processor context structure in the processor block to the
+ specified trap and exception frames.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from the processor context structure
+ // to the specified trap and exception frames
+ //
+
+#if !defined(NT_UP)
+
+ Prcb = KeGetCurrentPrcb();
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame,
+ CONTEXT_FULL,
+ KernelMode);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSaveProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the specified trap
+ and exception frames to the processor context structure in the current
+ processor block.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from specified trap and exception
+ // frames to the current processor context structure.
+ //
+
+#if !defined(NT_UP)
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame);
+
+#endif
+
+ return;
+}
+
+BOOLEAN
+KiIpiServiceRoutine (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+
+ This function is called at IPI_LEVEL to process any outstanding
+ interprocess request for the current processor.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame
+
+Return Value:
+
+ A value of TRUE is returned, if one of more requests were service.
+ Otherwise, FALSE is returned.
+
+--*/
+
+{
+
+ ULONG RequestSummary;
+
+ //
+ // Process any outstanding interprocessor requests.
+ //
+
+ RequestSummary = KiIpiProcessRequests();
+
+ //
+ // If freeze is requested, then freeze target execution.
+ //
+
+ if ((RequestSummary & IPI_FREEZE) != 0) {
+ KiFreezeTargetExecution(TrapFrame, ExceptionFrame);
+ }
+
+ //
+ // Return whether any requests were processed.
+ //
+
+ return (RequestSummary & ~IPI_FREEZE) != 0;
+}
diff --git a/private/ntos/ke/alpha/irql.s b/private/ntos/ke/alpha/irql.s
new file mode 100644
index 000000000..0c1911fcf
--- /dev/null
+++ b/private/ntos/ke/alpha/irql.s
@@ -0,0 +1,101 @@
+// TITLE("Manipulate Interrupt Request Level")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992 Microsoft Corporation
+//
+// Module Name:
+//
+// irql.s
+//
+// Abstract:
+//
+// This module implements the code necessary to lower and raise the current
+// Interrupt Request Level (IRQL).
+//
+//
+// Author:
+//
+// David N. Cutler (davec) 12-Aug-1990
+// Joe Notarangelo 06-Apr-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//++
+//
+// VOID
+// KeLowerIrql (
+// KIRQL NewIrql
+// )
+//
+// Routine Description:
+//
+// This function lowers the current IRQL to the specified value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeLowerIrql)
+
+ SWAP_IRQL // a0 = new, on return v0 = old irql
+ ret zero, (ra) // return
+
+ .end KeLowerIrql
+
+//++
+//
+// VOID
+// KeRaiseIrql (
+// KIRQL NewIrql,
+// PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to the specified value and returns
+// the old IRQL value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that recieves the old
+// IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeRaiseIrql)
+
+ bis a1, zero, t0 // save pointer to old irql
+ SWAP_IRQL // a0 = new, on return v0 = old irql
+
+ ldq_u t1, 0(t0) // get quadword around old irql
+ bic t0, 0x3, t3 // get containing longword address
+ insbl v0, t0, t2 // put destination byte into position
+ mskbl t1, t0, t1 // clear destination byte
+ bis t1, t2, t1 // merge destination byte
+ extll t1, t3, t1 // get appropriate longword
+ stl t1, 0(t3) // store byte
+
+ ret zero, (ra) // return
+
+ .end KeRaiseIrql
diff --git a/private/ntos/ke/alpha/miscs.s b/private/ntos/ke/alpha/miscs.s
new file mode 100644
index 000000000..d95f292ef
--- /dev/null
+++ b/private/ntos/ke/alpha/miscs.s
@@ -0,0 +1,363 @@
+// TITLE("Miscellaneous Kernel Functions")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// miscs.s
+//
+// Abstract:
+//
+// This module implements machine dependent miscellaneous kernel functions.
+// Functions are provided to request a software interrupt, continue thread
+// execution, and perform last chance exception processing.
+//
+// Author:
+//
+// David N. Cutler (davec) 31-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// Thomas Van Baak (tvb) 29-Jul-1992
+//
+// Adapted for Alpha AXP.
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL("Request Software Interrupt")
+//++
+//
+// VOID
+// KiRequestSoftwareInterrupt (
+// KIRQL RequestIrql
+// )
+//
+// Routine Description:
+//
+// This function requests a software interrupt at the specified IRQL
+// level.
+//
+// Arguments:
+//
+// RequestIrql (a0) - Supplies the requested IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRequestSoftwareInterrupt)
+
+ //
+ // If we are already in an interrupt routine, do not
+ // request an interrupt from the PAL. Indicate the
+ // interrupt has been requested in the PRCB and the
+ // interrupt exit code will dispatch the software
+ // interrupt directly.
+ //
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = PRCB
+
+ ldl t0, PbInterruptActive(v0)
+ beq t0, 10f // no interrupt active, request
+ // interrupt from PAL
+ blbs a0, 10f // APC interrupt requested, use PAL
+ stl a0, PbSoftwareInterrupts(v0) // set interrupt request bit in PRCB
+ ret zero, (ra)
+
+10:
+ REQUEST_SOFTWARE_INTERRUPT
+
+ ret zero, (ra) // return
+
+ .end KiRequestSoftwareInterrupt
+
+ SBTTL("Continue Execution System Service")
+//++
+//
+// NTSTATUS
+// NtContinue (
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN TestAlert
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to continue execution after
+// an exception has occurred. Its function is to transfer information from
+// the specified context record into the trap frame that was built when the
+// system service was executed, and then exit the system as if an exception
+// had occurred.
+//
+// Arguments:
+//
+// ContextRecord (a0) - Supplies a pointer to a context record.
+//
+// TestAlert (a1) - Supplies a boolean value that specifies whether alert
+// should be tested for the previous processor mode.
+//
+// N.B. Register fp is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record is misaligned or is not accessible, then the appropriate
+// status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtContinue, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate exception frame
+ stq ra, ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ stq s0, ExIntS0(sp) // save nonvolatile integer state
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+
+ stt f2, ExFltF2(sp) // save nonvolatile floating state
+ stt f3, ExFltF3(sp) //
+ stt f4, ExFltF4(sp) //
+ stt f5, ExFltF5(sp) //
+ stt f6, ExFltF6(sp) //
+ stt f7, ExFltF7(sp) //
+ stt f8, ExFltF8(sp) //
+ stt f9, ExFltF9(sp) //
+
+//
+// Transfer information from the context frame to the exception and trap
+// frames.
+//
+
+ mov a1, s0 // preserve test alert argument in s0
+ mov sp, a1 // set address of exception frame
+ mov fp, a2 // set address of trap frame
+ bsr ra, KiContinue // transfer context to kernel frames
+
+//
+// If the kernel continuation routine returns success, then exit via the
+// exception exit code. Otherwise return to the system service dispatcher.
+//
+
+ bne v0, 20f // if ne, transfer failed
+
+//
+// Check to determine if alert should be tested for the previous processor
+// mode and restore the previous mode in the thread object.
+//
+
+ GET_CURRENT_THREAD // (PALcode) result in v0
+
+ ldl t3, TrTrapFrame(fp) // get old trap frame address
+ ldl t2, TrPreviousMode(fp) // get old previous mode
+ LoadByte(a0, ThPreviousMode(v0)) // get current previous mode
+ stl t3, ThTrapFrame(v0) // restore old trap frame address
+ StoreByte( t2, ThPreviousMode(v0) ) // restore old previous mode
+ beq s0, 10f // if eq, don't test for alert
+
+ bsr ra, KeTestAlertThread // test alert for current thread
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+10: br zero, KiExceptionExit // finish in exception exit
+
+//
+// Context record is misaligned or not accessible.
+//
+
+20: ldq ra, ExIntRa(sp) // restore return address
+ lda sp, ExceptionFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+ .end NtContinue
+
+ SBTTL("Raise Exception System Service")
+//++
+//
+// NTSTATUS
+// NtRaiseException (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN FirstChance
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to raise an exception.
+// The exception can be raised as a first or second chance exception.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// ContextRecord (a1) - Supplies a pointer to a context record.
+//
+// FirstChance (a2) - Supplies a boolean value that determines whether
+// this is the first (TRUE) or second (FALSE) chance for dispatching
+// the exception.
+//
+// N.B. Register fp is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record or exception record is misaligned or is not accessible,
+// then the appropriate status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtRaiseException, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate exception frame
+ stq ra, ExIntRa(sp) // save return address
+ stq s0, ExIntS0(sp) // save S0 and S1 in the prologue
+ stq s1, ExIntS1(sp) // so that Get/SetContext can find
+ // the right ones.
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+
+ stt f2, ExFltF2(sp) // save nonvolatile floating state
+ stt f3, ExFltF3(sp) //
+ stt f4, ExFltF4(sp) //
+ stt f5, ExFltF5(sp) //
+ stt f6, ExFltF6(sp) //
+ stt f7, ExFltF7(sp) //
+ stt f8, ExFltF8(sp) //
+ stt f9, ExFltF9(sp) //
+
+//
+// Call the raise exception kernel routine which will marshall the arguments
+// and then call the exception dispatcher.
+//
+// KiRaiseException requires five arguments: the first two are the same as
+// the first two of this function and the other three are set here.
+//
+
+ mov a2, a4 // set first chance argument
+ mov sp, a2 // set address of exception frame
+ mov fp, a3 // set address of trap frame
+ bsr ra, KiRaiseException // call raise exception routine
+
+//
+// If the raise exception routine returns success, then exit via the exception
+// exit code. Otherwise return to the system service dispatcher.
+//
+ bis v0, zero, t0 // save v0
+ ldl t1, TrTrapFrame(fp) // get old trap frame address
+ GET_CURRENT_THREAD // get current thread in v0
+
+ bne t0, 10f // if ne, dispatch not successful
+ stl t1, ThTrapFrame(v0) // restore old trap frame address
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+ br zero, KiExceptionExit // finish in exception exit
+
+//
+// The context or exception record is misaligned or not accessible, or the
+// exception was not handled.
+//
+
+10:
+ ldq ra, ExIntRa(sp) // restore return address
+ lda sp, ExceptionFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+ .end NtRaiseException
+
+
+ SBTTL("Instruction Memory Barrier")
+//++
+//
+// VOID
+// KiImb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to flush the instruction cache on the
+// current processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiImb )
+
+ IMB // flush the icache via PALcode
+
+ ret zero, (ra) // return
+
+ .end KiImb
+
+
+ SBTTL("Memory Barrier")
+//++
+//
+// VOID
+// KiImb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to issue a memory barrier on the current
+// processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiMb )
+
+ mb // memory barrier
+
+ ret zero, (ra) // return
+
+ .end KiMb
+
diff --git a/private/ntos/ke/alpha/mpipi.s b/private/ntos/ke/alpha/mpipi.s
new file mode 100644
index 000000000..95e65f477
--- /dev/null
+++ b/private/ntos/ke/alpha/mpipi.s
@@ -0,0 +1,546 @@
+// TITLE("Interprocessor Interrupt support routines")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+// Copyright (c) 1993 Digital Equipment Corporation
+//
+// Module Name:
+//
+// mpipi.s
+//
+// Abstract:
+//
+// This module implements the Alpha AXP specific functions required to
+// support multiprocessor systems.
+//
+// Author:
+//
+// David N. Cutler (davec) 22-Apr-1993
+// Joe Notarangelo 29-Nov-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL("Interprocess Interrupt Processing")
+//++
+//
+// VOID
+// KeIpiInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// );
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interprocessor interrupt.
+// It's function is to process all interprocess immediate and packet
+// requests.
+//
+// Arguments:
+//
+// TrapFrame (fp/s6) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KeIpiInterrupt, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate exception frame
+ stq ra, ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Process all interprocessor requests.
+//
+
+ bsr ra, KiIpiProcessRequests // process requests
+ and v0, IPI_FREEZE, t0 // check if freeze is requested
+ beq t0, 10f // if eq, no freeze requested
+
+//
+// Save the volatile floating state, the nonvolatile floating state,
+// and the nonvolatile integer state.
+//
+
+ bsr ra, KiSaveVolatileFloatState // save volatile float in trap
+
+ bsr ra, KiSaveNonVolatileFloatState // save nv float in exception
+
+ stq s0, ExIntS0(sp) // save nonvolatile integer state
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+ stq fp, ExIntFp(sp) //
+
+//
+// Freeze the execution of the current processor.
+//
+
+ bis fp, zero, a0 // set address of trap frame
+ bis sp, zero, a1 // set address of exception frame
+ bsr ra, KiFreezeTargetExecution // freeze current processor
+
+//
+// Restore the volatile floating state, the nonvolatile floating state,
+// and the nonvolatile integer state.
+//
+
+ ldq s0, ExIntS0(sp) // restore nonvolatile integer state
+ ldq s1, ExIntS1(sp) //
+ ldq s2, ExIntS2(sp) //
+ ldq s3, ExIntS3(sp) //
+ ldq s4, ExIntS4(sp) //
+ ldq s5, ExIntS5(sp) //
+ ldq fp, ExIntFp(sp) //
+
+ bsr ra, KiRestoreVolatileFloatState // restore volatile float
+
+ bsr ra, KiRestoreNonVolatileFloatState // restore nv float state
+
+
+//
+// Cleanup and return to the caller.
+//
+
+10:
+ ldq ra, ExIntRa(sp) // restore return address
+ lda sp, ExceptionFrameLength(sp) // deallocate exception frame
+ ret zero, (ra) // return
+
+ .end KeIpiInterrupt
+
+ SBTTL("Processor Request")
+//++
+//
+// ULONG
+// KiIpiProcessRequests (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This routine processes interprocessor requests and returns a summary
+// of the requests that were processed.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The request summary is returned as the function value.
+//
+//--
+
+ .struct 0
+PrS0: .space 8 // saved integer register s0
+PrS1: .space 8 // saved integer register s1
+ .space 8 // fill
+PrRa: .space 8 // saved return address
+PrFrameLength:
+
+ NESTED_ENTRY(KiIpiProcessRequests, PrFrameLength, zero)
+
+ lda sp, -PrFrameLength(sp) // allocate stack frame
+ stq s0, PrS0(sp) // save integer register s0
+
+
+#if NT_INST
+
+ stq s1, PrS1(sp) // save integer register s1
+
+#endif
+
+ stq ra, PrRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Read request summary and write a zero result interlocked.
+//
+ mb // get consistent view of memory
+
+ GET_PROCESSOR_CONTROL_REGION_BASE // v0 = Pcr base address
+ ldl t0, PcPrcb(v0) // get current processor block address
+
+#if NT_INST
+
+ ldl s1, PbIpiCounts(t0) // get interrupt count structure
+
+#endif
+
+10:
+ ldq_l s0, PbRequestSummary(t0) // get request summary and entry address
+ bis zero, zero, t1 // set zero value for store
+ stq_c t1, PbRequestSummary(t0) // zero request summary, conditionally
+ beq t1, 15f // if eq, store conditional failed
+ sra s0, 32, a0 // shift out entry address
+
+//
+// Check for Packet ready.
+//
+// If a packet is ready, then get the address of the requested function
+// and call the function passing the address of the packet address as a
+// parameter.
+//
+
+ and s0, IPI_PACKET_READY, t2 // check for packet ready
+ beq t2, 20f // if eq, no packet ready
+ ldl t2, PbWorkerRoutine(a0) // get address of worker function
+ ldl a1, PbCurrentPacket(a0) // get request parameters
+ ldl a2, PbCurrentPacket +4(a0)
+ ldl a3, PbCurrentPacket +8(a0)
+ jsr ra, (t2) // call worker routine
+
+ mb
+
+#if NT_INST
+
+ ldl t1, IcPacket(s1) // increment number of packet requests
+ addl t1, 1, t1 //
+ stl t1, IcPacket(s1) //
+
+#endif
+
+//
+// Check for APC interrupt request.
+//
+// If an APC interrupt is requested, then request a software interrupt at
+// APC level on the current processor.
+//
+
+20:
+ and s0, IPI_APC, t1 // check for APC interrupt request
+ beq t1, 30f // if eq no APC interrupt requested
+ ldil a0, APC_LEVEL // set interrupt request level
+
+ REQUEST_SOFTWARE_INTERRUPT // request APC interrupt
+
+#if NT_INST
+
+ ldl t1, IcAPC(s1) // increment number of APC requests
+ addl t1, 1, t1 //
+ stl t1, IcAPC(s1) //
+
+#endif
+
+//
+// Check for DPC interrupt request.
+//
+// If a DPC interrupt is requested, then request a software interrupt at
+// DPC level on the current processor.
+//
+
+30:
+ and s0, IPI_DPC, t1 // check for DPC interrupt request
+ beq t1, 40f // if eq, no DPC interrupt requested
+ ldil a0, DISPATCH_LEVEL // set interrupt request level
+
+ REQUEST_SOFTWARE_INTERRUPT
+
+#if NT_INST
+
+ ldl t1, IcDPC(s1) // increment number of DPC requests
+ addl t1, 1, t1 //
+ stl t1, IcDPC(s1) //
+
+#endif
+
+//
+// Set function return value, restore registers, and return.
+//
+
+40:
+ bis s0, zero, v0 // set function return value
+
+ ldq s0, PrS0(sp) // restore integer register s0
+
+#if NT_INST
+
+ and v0, IPI_FREEZE, t1 // check if freeze requested
+ beq t1, 50f // if eq, no freeze requested
+
+ ldl t1, IcFreeze(s1) // increment number of freeze requests
+ addl t1, 1, t1 //
+ stl t1, IcFreeze(s1) //
+50:
+ ldq s1, PrS1(sp) // restore integer register s1
+
+#endif
+
+ ldq ra, PrRa(sp) // restore return address
+ lda sp, PrFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+15:
+ br zero, 10b // store conditonal failed, retry
+
+ .end KiIpiProcessRequests
+
+ SBTTL("Send Interprocess Request")
+//++
+//
+// VOID
+// KiIpiSend (
+// IN KAFINITY TargetProcessors,
+// IN KIPI_REQUEST IpiRequest
+// );
+//
+// Routine Description:
+//
+// This routine requests the specified operation on the target set of
+// processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// IpiRequest (a1) - Supplies the request operation mask.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSend)
+
+#if !defined(NT_UP)
+
+ bis a0, zero, t0 // copy target processor set
+ lda t1, KiProcessorBlock // get processor block array address
+
+10:
+
+ blbc t0, 30f // if clear target processor not set
+
+ ldl t2, 0(t1) // get target processor block address
+
+//
+// Merge the new request into the target processor request summary.
+// The store is conditional to ensure that no updates are lost.
+//
+
+20:
+ ldq_l t3, PbRequestSummary(t2) // get target request summary
+ bis t3, a1, t4 // merge new request with summary
+ stq_c t4, PbRequestSummary(t2) // set new request summary
+ beq t4, 25f // if eq, store conditional failed
+
+
+30:
+ srl t0, 1, t0 // shift to next target
+ lda t1, 4(t1) // get next processor block element
+ bne t0, 10b // if ne, more targets requested
+
+ mb
+
+ ldl t0, __imp_HalRequestIpi
+ jmp zero, (t0) // request IPI interrupt on targets
+
+#else
+
+ ret zero, (ra) // simply return for uni-processor
+
+#endif
+
+25:
+ br zero, 20b // store conditional failed, retry
+
+ .end KiIpiSend
+
+ SBTTL("Send Interprocess Request Packet")
+//++
+//
+// VOID
+// KiIpiSendPacket (
+// IN KAFFINITY TargetProcessors,
+// IN PKIPI_WORKER WorkerFunction,
+// IN PVOID Parameter1,
+// IN PVOID Parameter2,
+// IN PVOID Parameter3
+// );
+//
+// Routine Description:
+//
+// This routine executes the specified worker function on the specified
+// set of processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// WorkerFunction (a1) - Supplies the address of the worker function.
+//
+// Parameter1 - Parameter3 - Supplies arguments for worker.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+
+ LEAF_ENTRY(KiIpiSendPacket)
+
+#if !defined(NT_UP)
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = Prcb base address
+
+ bis a0, zero, t1 // copy target processor set
+ lda t2, KiProcessorBlock // get processor block array address
+
+//
+// Store function address and parameters in the packet area of the PRCB on
+// the current processor.
+//
+ stl a0, PbTargetSet(v0) // set target processor set
+ stl a1, PbWorkerRoutine(v0) // set worker function address
+ stl a2, PbCurrentPacket(v0) // store worker function parameters
+ stl a3, PbCurrentPacket +4(v0)
+ stl a4, PbCurrentPacket +8(v0)
+
+//
+// Ensure the packet area writes get to memory before any
+// request summary is indicated
+//
+ mb
+
+//
+// Loop through the target processors and send the packet to the specified
+// recipients.
+//
+
+10:
+ blbc t1, 30f // if eq, target not specified
+ ldl t0, 0(t2) // get target processor block address
+
+ sll v0, 32, t3 // shift packet address to upper 32 bits
+ bis t3, IPI_PACKET_READY, t3 // set packet ready in low 32 bits
+20:
+ ldq_l t4, PbRequestSummary(t0) // get request summary of target
+ and t4, IPI_PACKET_READY, t6 // check if target packet busy
+ bne t6, 25f // if ne, target packet busy
+ bis t4, t3, t4 // set entry address in request summary
+ stq_c t4, PbRequestSummary(t0) // store request summary and address
+ beq t4, 20b // if eq, store conditional failed
+
+30:
+ lda t2, 4(t2) // advance to next array element
+ srl t1, 1, t1 // shift to next target
+ bne t1, 10b // if ne, more targets to process
+
+//
+// Ensure writes get to memory
+//
+ mb
+
+ ldl t0, __imp_HalRequestIpi
+ jmp zero, (t0) // request IPI interrupt on targets
+
+25:
+//
+// Packet not ready, spin in cache until it looks available.
+//
+ ldq t4, PbRequestSummary(t0) // get request summary of target
+ and t4, IPI_PACKET_READY, t6 // check if target packet busy
+ beq t6, 20b // looks available, try again
+ br zero, 25b // spin again
+
+#else
+ ret zero, (ra)
+
+#endif //!NT_UP
+
+ .end KiIpiSendPacket
+
+ SBTTL("Save Processor Control State")
+//++
+//
+// VOID
+// KiSaveProcessorState (
+// IN PKPROCESSOR_STATE ProcessorState
+// );
+//
+// Routine Description:
+//
+// This routine saves the processor's control state for debugger.
+//
+// Arguments:
+//
+// ProcessorState (a0) - Pointer to PROCSSOR_STATE
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSaveProcessorControlState)
+
+ ret zero, (ra) // return
+
+ .end KiSaveProcessorControlState
+
+#if !defined(NT_UP)
+
+
+ SBTTL("Signal Packet Done")
+//++
+//
+// VOID
+// KiIpiSignalPacketDone (
+// IN PVOID SignalDone
+// );
+//
+// Routine Description:
+//
+// This routine signals that a processor has completed a packet by
+// clearing the calling processor's set member of the requesting
+// processor's packet.
+//
+// Arguments:
+//
+// SignalDone (a0) - Supplies a pointer to the processor block of the
+// sending processor.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSignalPacketDone)
+
+ GET_PROCESSOR_CONTROL_REGION_BASE
+ ldl a1, PcSetMember(v0) // get processor set member
+ mb
+
+10:
+ ldl_l a2, PbTargetSet(a0) // get request target set
+ bic a2, a1, a2 // clear processor set member
+ stl_c a2, PbTargetSet(a0) // store target set
+ beq a2, 15f // if eq, store conditional failed
+
+
+
+ ret zero, (ra) // return
+
+15:
+ br zero, 10b // store conditional failed, retry
+
+ .end KiIpiSignalPacketDone
+#endif
diff --git a/private/ntos/ke/alpha/pcr.s b/private/ntos/ke/alpha/pcr.s
new file mode 100644
index 000000000..36c012a74
--- /dev/null
+++ b/private/ntos/ke/alpha/pcr.s
@@ -0,0 +1,214 @@
+// TITLE("Processor Control Registers")
+//++
+//
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// pcr.s
+//
+// Abstract:
+//
+// This module implements the code necessary to access the
+// processor control registers (pcr) on an alpha processor.
+// On mips processors the pcr (which contains processor-specific data)
+// was mapped in the virtual address space using a fixed tb entry.
+// For alpha, we don't have fixed tb entries so we will get pcr data
+// via routine interfaces that will vary depending upon whether we are
+// on a multi- or uni-processor system..
+//
+// N.B.
+// ***********************************************************************
+// There is a clone of this file in NTOS\KD\ALPHA\KDPPCR.S. Whenever this
+// file is modified, a corresponding change should be made to KDPPCR.S.
+// ***********************************************************************
+//
+// Author:
+//
+// Joe Notarangelo 15-Apr-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//++
+//
+// KIRQL
+// KeGetCurrentIrql(
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function returns the current irql of the processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Current processor irql.
+//
+//--
+
+ LEAF_ENTRY(KeGetCurrentIrql)
+
+
+ GET_CURRENT_IRQL // v0 = current irql
+
+ ret zero, (ra) // return
+
+
+ .end KeGetCurrentIrql
+
+
+
+//++
+//
+// PPRCB
+// KeGetCurrentPrcb
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function returns the current processor control block for this
+// processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Pointer to current processor's prcb.
+//
+//--
+
+ LEAF_ENTRY(KeGetCurrentPrcb)
+
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = prcb base
+
+ ret zero, (ra) // return
+
+
+ .end KeGetCurrentPrcb
+
+
+
+//++
+//
+// PKTHREAD
+// KeGetCurrentThread
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function return the current thread running on this processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Pointer to current thread.
+//
+//--
+
+ LEAF_ENTRY(KeGetCurrentThread)
+
+
+ GET_CURRENT_THREAD // v0 = current thread address
+
+ ret zero, (ra) // return
+
+
+ .end KeGetCurrentThread
+
+
+//++
+//
+// PKPCR
+// KeGetPcr(
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function returns the base address of the processor control
+// region for the current processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Pointer to current thread executing on this processor.
+//
+//--
+
+ LEAF_ENTRY(KeGetPcr)
+
+
+ GET_PROCESSOR_CONTROL_REGION_BASE // v0 = pcr base address
+
+ ret zero, (ra) // return
+
+ .end KeGetPcr
+
+
+
+//++
+//
+// BOOLEAN
+// KeIsExecutingDpc(
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function returns the DPC Active flag on the current processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Current DPC Active flag. This flag indicates if a DPC routine is
+// currently running on this processor.
+//
+//--
+
+ LEAF_ENTRY(KeIsExecutingDpc)
+
+#if !defined(NT_UP)
+ DISABLE_INTERRUPTS // disable interrupts to prevent context
+ // switch to another processor
+#endif
+ GET_PROCESSOR_CONTROL_REGION_BASE // get PCR address
+ ldl t0, PcPrcb(v0) // get PRCB address
+ ldl v0, PbDpcRoutineActive(t0) // get DPC routine active flag
+#if !defined(NT_UP)
+ ENABLE_INTERRUPTS // disable interrupts to prevent context
+ // switch to another processor
+#endif
+ ret zero, (ra) // return
+
+ .end KeIsExecutingDpc
+
+
+
+
+
diff --git a/private/ntos/ke/alpha/regsav.s b/private/ntos/ke/alpha/regsav.s
new file mode 100644
index 000000000..9cd37fbdf
--- /dev/null
+++ b/private/ntos/ke/alpha/regsav.s
@@ -0,0 +1,407 @@
+// TITLE("Register Save and Restore")
+//++
+//
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// regsav.s
+//
+// Abstract:
+//
+// Implements save/restore general purpose processor
+// registers during exception handling
+//
+// Author:
+//
+// Joe Notarangelo 06-May-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+
+#include "ksalpha.h"
+
+
+ SBTTL( "Generate Trap Frame" )
+//++
+//
+// Routine Description:
+//
+// Save volatile register state (integer/float) in
+// a trap frame.
+//
+// Note: control registers, ra, sp, fp, gp have already
+// been saved, argument registers a0-a3 have also been saved.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiGenerateTrapFrame )
+
+ stq v0, TrIntV0(fp) // save integer register v0
+ stq t0, TrIntT0(fp) // save integer registers t0 - t7
+ stq t1, TrIntT1(fp) //
+ stq t2, TrIntT2(fp) //
+ stq t3, TrIntT3(fp) //
+ stq t4, TrIntT4(fp) //
+ stq t5, TrIntT5(fp) //
+ stq t6, TrIntT6(fp) //
+ stq t7, TrIntT7(fp) //
+ stq a4, TrIntA4(fp) // save integer registers a4 - a5
+ stq a5, TrIntA5(fp) //
+ stq t8, TrIntT8(fp) // save integer registers t8 - t12
+ stq t9, TrIntT9(fp) //
+ stq t10, TrIntT10(fp) //
+ stq t11, TrIntT11(fp) //
+ stq t12, TrIntT12(fp) //
+
+ .set noat
+ stq AT, TrIntAt(fp) // save integer register AT
+ .set at
+
+
+ br zero, KiSaveVolatileFloatState // save volatile float state
+
+ .end KiGenerateTrapFrame
+
+
+
+ SBTTL( "Restore Trap Frame" )
+//++
+//
+// Routine Description:
+//
+// Restore volatile register state (integer/float) from
+// a trap frame
+//
+// Note: control registers, ra, sp, fp, gp will be
+// restored by the PALcode, as will argument registers a0-a3.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiRestoreTrapFrame )
+
+ ldq v0, TrIntV0(fp) // restore integer register v0
+ ldq t0, TrIntT0(fp) // restore integer registers t0 - t7
+ ldq t1, TrIntT1(fp) //
+ ldq t2, TrIntT2(fp) //
+ ldq t3, TrIntT3(fp) //
+ ldq t4, TrIntT4(fp) //
+ ldq t5, TrIntT5(fp) //
+ ldq t6, TrIntT6(fp) //
+ ldq t7, TrIntT7(fp) //
+ ldq a4, TrIntA4(fp) // restore integer registers a4 - a5
+ ldq a5, TrIntA5(fp) //
+ ldq t8, TrIntT8(fp) // restore integer registers t8 - t12
+ ldq t9, TrIntT9(fp) //
+ ldq t10, TrIntT10(fp) //
+ ldq t11, TrIntT11(fp) //
+ ldq t12, TrIntT12(fp) //
+
+ .set noat
+ ldq AT, TrIntAt(fp) // restore integer register AT
+ .set at
+
+//
+// Restore the volatile floating register state
+//
+
+ br zero, KiRestoreVolatileFloatState
+
+ .end KiRestoreTrapFrame
+
+
+
+ SBTTL( "Save Volatile Floating Registers" )
+//++
+//
+// Routine Description:
+//
+// Save volatile floating registers in a trap frame.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiSaveVolatileFloatState )
+
+ //
+ // asaxp is broken, it does not know that mf_fpcr f0
+ // destroys f0.
+ //
+.set noreorder
+ stt f0, TrFltF0(fp) // save floating register f0
+ mf_fpcr f0 // save fp control register
+.set reorder
+ stt f0, TrFpcr(fp) //
+ stt f1, TrFltF1(fp) // save floating register f1
+ stt f10, TrFltF10(fp) // save floating registers f10 - f30
+ stt f11, TrFltF11(fp) //
+ stt f12, TrFltF12(fp) //
+ stt f13, TrFltF13(fp) //
+ stt f14, TrFltF14(fp) //
+ stt f15, TrFltF15(fp) //
+ stt f16, TrFltF16(fp) //
+ stt f17, TrFltF17(fp) //
+ stt f18, TrFltF18(fp) //
+ stt f19, TrFltF19(fp) //
+ stt f20, TrFltF20(fp) //
+ stt f21, TrFltF21(fp) //
+ stt f22, TrFltF22(fp) //
+ stt f23, TrFltF23(fp) //
+ stt f24, TrFltF24(fp) //
+ stt f25, TrFltF25(fp) //
+ stt f26, TrFltF26(fp) //
+ stt f27, TrFltF27(fp) //
+ stt f28, TrFltF28(fp) //
+ stt f29, TrFltF29(fp) //
+ stt f30, TrFltF30(fp) //
+
+ ret zero, (ra) // return
+
+ .end KiSaveVolatileFloatState
+
+
+ SBTTL( "Restore Volatile Floating State" )
+//++
+//
+// Routine Description:
+//
+// Restore volatile floating registers from a trap frame.
+//
+//
+// Arguments:
+//
+// fp - pointer to trap frame
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiRestoreVolatileFloatState )
+
+ ldt f0, TrFpcr(fp) // restore fp control register
+ mt_fpcr f0 //
+ ldt f0, TrFltF0(fp) // restore floating registers f0 - f1
+ ldt f1, TrFltF1(fp) //
+ ldt f10, TrFltF10(fp) // restore floating registers f10 - f30
+ ldt f11, TrFltF11(fp) //
+ ldt f12, TrFltF12(fp) //
+ ldt f13, TrFltF13(fp) //
+ ldt f14, TrFltF14(fp) //
+ ldt f15, TrFltF15(fp) //
+ ldt f16, TrFltF16(fp) //
+ ldt f17, TrFltF17(fp) //
+ ldt f18, TrFltF18(fp) //
+ ldt f19, TrFltF19(fp) //
+ ldt f20, TrFltF20(fp) //
+ ldt f21, TrFltF21(fp) //
+ ldt f22, TrFltF22(fp) //
+ ldt f23, TrFltF23(fp) //
+ ldt f24, TrFltF24(fp) //
+ ldt f25, TrFltF25(fp) //
+ ldt f26, TrFltF26(fp) //
+ ldt f27, TrFltF27(fp) //
+ ldt f28, TrFltF28(fp) //
+ ldt f29, TrFltF29(fp) //
+ ldt f30, TrFltF30(fp) //
+
+ ret zero, (ra) // return
+
+ .end KiRestoreVolatileFloatState
+
+
+ SBTTL( "Save Non-Volatile Floating State" )
+//++
+//
+// Routine Description:
+//
+// Save nonvolatile floating registers in
+// an exception frame
+//
+//
+// Arguments:
+//
+// sp - pointer to exception frame
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiSaveNonVolatileFloatState )
+
+ stt f2, ExFltF2(sp) // save floating registers f2 - f9
+ stt f3, ExFltF3(sp) //
+ stt f4, ExFltF4(sp) //
+ stt f5, ExFltF5(sp) //
+ stt f6, ExFltF6(sp) //
+ stt f7, ExFltF7(sp) //
+ stt f8, ExFltF8(sp) //
+ stt f9, ExFltF9(sp) //
+
+ ret zero, (ra) // return
+
+ .end KiSaveNonVolatileFloatState
+
+
+ SBTTL( "Restore Non-Volatile Floating State" )
+//++
+//
+// Routine Description:
+//
+// Restore nonvolatile floating registers from an exception frame.
+//
+//
+// Arguments:
+//
+// sp - Supplies a pointer to an exception frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+
+ LEAF_ENTRY( KiRestoreNonVolatileFloatState )
+
+ ldt f2, ExFltF2(sp) // restore floating registers f2 - f9
+ ldt f3, ExFltF3(sp) //
+ ldt f4, ExFltF4(sp) //
+ ldt f5, ExFltF5(sp) //
+ ldt f6, ExFltF6(sp) //
+ ldt f7, ExFltF7(sp) //
+ ldt f8, ExFltF8(sp) //
+ ldt f9, ExFltF9(sp) //
+
+ ret zero, (ra) // return
+
+ .end KiRestoreNonVolatileFloatState
+
+
+ SBTTL( "Save Volatile Integer State" )
+//++
+//
+// Routine Description:
+//
+// Save volatile integer register state in a trap frame.
+//
+// Note: control registers, ra, sp, fp, gp have already been saved
+// as have argument registers a0-a3.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiSaveVolatileIntegerState)
+
+ stq v0, TrIntV0(fp) // save integer register v0
+ stq t0, TrIntT0(fp) // save integer registers t0 - t7
+ stq t1, TrIntT1(fp) //
+ stq t2, TrIntT2(fp) //
+ stq t3, TrIntT3(fp) //
+ stq t4, TrIntT4(fp) //
+ stq t5, TrIntT5(fp) //
+ stq t6, TrIntT6(fp) //
+ stq t7, TrIntT7(fp) //
+ stq a4, TrIntA4(fp) // save integer registers a4 - a5
+ stq a5, TrIntA5(fp) //
+ stq t8, TrIntT8(fp) // save integer registers t8 - t12
+ stq t9, TrIntT9(fp) //
+ stq t10, TrIntT10(fp) //
+ stq t11, TrIntT11(fp) //
+ stq t12, TrIntT12(fp) //
+
+ .set noat
+ stq AT, TrIntAt(fp) // save integer register AT
+ .set at
+
+ ret zero, (ra) // return
+
+ .end KiSaveVolatileIntegerState
+
+
+
+ SBTTL( "Restore Volatile Integer State" )
+//++
+//
+// Routine Description:
+//
+// Restore volatile integer register state from a trap frame.
+//
+// Note: control registers, ra, sp, fp, gp and argument registers
+// a0 - a3 will be restored by the PALcode.
+//
+// Arguments:
+//
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiRestoreVolatileIntegerState)
+
+ ldq v0, TrIntV0(fp) // restore integer register v0
+ ldq t0, TrIntT0(fp) // restore integer registers t0 - t7
+ ldq t1, TrIntT1(fp) //
+ ldq t2, TrIntT2(fp) //
+ ldq t3, TrIntT3(fp) //
+ ldq t4, TrIntT4(fp) //
+ ldq t5, TrIntT5(fp) //
+ ldq t6, TrIntT6(fp) //
+ ldq t7, TrIntT7(fp) //
+ ldq a4, TrIntA4(fp) // restore integer registers a4 - a5
+ ldq a5, TrIntA5(fp) //
+ ldq t8, TrIntT8(fp) // restore integer registers t8 - t12
+ ldq t9, TrIntT9(fp) //
+ ldq t10, TrIntT10(fp) //
+ ldq t11, TrIntT11(fp) //
+ ldq t12, TrIntT12(fp) //
+
+ .set noat
+ ldq AT, TrIntAt(fp) // restore integer register AT
+ .set at
+
+ ret zero, (ra) // return
+
+ .end KiRestoreVolatileIntegerState
diff --git a/private/ntos/ke/alpha/services.stb b/private/ntos/ke/alpha/services.stb
new file mode 100644
index 000000000..ed7e69b0c
--- /dev/null
+++ b/private/ntos/ke/alpha/services.stb
@@ -0,0 +1,66 @@
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// sysstubs.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch stub procedures.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// User or kernel mode.
+//
+// Revision History:
+//
+// Joe Notarangelo 08-Jul-1992
+// alpha version
+//--
+
+#include "ksalpha.h"
+
+#define STUBS_BEGIN1( t ) .rdata
+#define STUBS_BEGIN2( t ) .align 4
+#define STUBS_BEGIN3( t )
+#define STUBS_BEGIN4( t )
+#define STUBS_BEGIN5( t )
+#define STUBS_BEGIN6( t )
+#define STUBS_BEGIN7( t )
+#define STUBS_BEGIN8( t )
+
+#define STUBS_END
+
+#define SYSSTUBS_ENTRY1( ServiceNumber, Name, NumArgs ) LEAF_ENTRY(Zw##Name)
+#define SYSSTUBS_ENTRY2( ServiceNumber, Name, NumArgs ) ldiq v0, ServiceNumber
+#define SYSSTUBS_ENTRY3( ServiceNumber, Name, NumArgs ) SYSCALL
+#define SYSSTUBS_ENTRY4( ServiceNumber, Name, NumArgs ) .end Zw##Name ;
+#define SYSSTUBS_ENTRY5( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY6( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY7( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY8( ServiceNumber, Name, NumArgs )
+
+#define USRSTUBS_ENTRY1( ServiceNumber, Name, NumArgs) LEAF_ENTRY(Zw##Name)
+#define USRSTUBS_ENTRY2( ServiceNumber, Name, NumArgs) ALTERNATE_ENTRY(Nt##Name)
+#define USRSTUBS_ENTRY3( ServiceNumber, Name, NumArgs) ldiq v0, ServiceNumber
+#define USRSTUBS_ENTRY4( ServiceNumber, Name, NumArgs) SYSCALL
+#define USRSTUBS_ENTRY5( ServiceNumber, Name, NumArgs) .end Zw##Name ;
+#define USRSTUBS_ENTRY6( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY7( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY8( ServiceNumber, Name, NumArgs)
+
+
+ STUBS_BEGIN1( "System Service Stub Procedures" )
+ STUBS_BEGIN2( "System Service Stub Procedures" )
+ STUBS_BEGIN3( "System Service Stub Procedures" )
+ STUBS_BEGIN4( "System Service Stub Procedures" )
+ STUBS_BEGIN5( "System Service Stub Procedures" )
+ STUBS_BEGIN6( "System Service Stub Procedures" )
+ STUBS_BEGIN7( "System Service Stub Procedures" )
+ STUBS_BEGIN8( "System Service Stub Procedures" )
diff --git a/private/ntos/ke/alpha/sources b/private/ntos/ke/alpha/sources
new file mode 100644
index 000000000..0523febc9
--- /dev/null
+++ b/private/ntos/ke/alpha/sources
@@ -0,0 +1,41 @@
+MSC_WARNING_LEVEL=/W3 /WX
+GPSIZE=32
+ALPHA_SOURCES=..\alpha\byteme.s \
+ ..\alpha\xxalign.s \
+ ..\alpha\alignem.c \
+ ..\alpha\allproc.c \
+ ..\alpha\apcint.s \
+ ..\alpha\apcuser.c \
+ ..\alpha\buserror.c \
+ ..\alpha\byteem.c \
+ ..\alpha\callback.c \
+ ..\alpha\callout.s \
+ ..\alpha\clock.s \
+ ..\alpha\ctxsw.s \
+ ..\alpha\dmpstate.c \
+ ..\alpha\exceptn.c \
+ ..\alpha\floatem.c \
+ ..\alpha\flush.c \
+ ..\alpha\flushtb.c \
+ ..\alpha\getsetrg.c \
+ ..\alpha\initkr.c \
+ ..\alpha\intobj.c \
+ ..\alpha\intsup.s \
+ ..\alpha\ipi.c \
+ ..\alpha\irql.s \
+ ..\alpha\miscs.s \
+ ..\alpha\mpipi.s \
+ ..\alpha\pcr.s \
+ ..\alpha\regsav.s \
+ ..\alpha\spinlock.s \
+ ..\alpha\start.s \
+ ..\alpha\sysstubs.s \
+ ..\alpha\systable.s \
+ ..\alpha\tb.s \
+ ..\alpha\threadbg.s \
+ ..\alpha\thredini.c \
+ ..\alpha\timindex.s \
+ ..\alpha\trap.s \
+ ..\alpha\trigger.c \
+ ..\alpha\vdm.c
+
diff --git a/private/ntos/ke/alpha/spinlock.s b/private/ntos/ke/alpha/spinlock.s
new file mode 100644
index 000000000..183742bf7
--- /dev/null
+++ b/private/ntos/ke/alpha/spinlock.s
@@ -0,0 +1,571 @@
+// TITLE("Spin Locks")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// spinlock.s
+//
+// Abstract:
+//
+// This module implements the routines for acquiring and releasing
+// spin locks.
+//
+// Author:
+//
+// David N. Cutler (davec) 23-Mar-1990
+// Joe Notarangelo 06-Apr-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//++
+//
+// VOID
+// KeInitializeSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function initializes an executive spin lock.
+//
+// Argument:
+//
+// SpinLock (a0) - Supplies a pointer to the executive spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KeInitializeSpinLock )
+
+ stl zero, 0(a0) // set spin lock not owned
+ ret zero, (ra) // return
+
+ .end KeInitializeSpinLock
+
+
+//++
+//
+// VOID
+// KeAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and acquires
+// the specified executive spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeAcquireSpinLock)
+
+//
+// Raise IRQL to DISPATCH_LEVEL and acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+// N.B. The previous IRQL must not be stored until the lock is owned.
+//
+// N.B. The longword surrounding the previous IRQL must not be read
+// until the lock is owned.
+//
+
+
+
+ bis a0, zero, t5 // t5 = address of spin lock
+ ldil a0, DISPATCH_LEVEL // set new IRQL
+ bis a1, zero, t0 // t0 = a1, a1 may be destroyed
+ SWAP_IRQL // swap irql, on return v0 = old irql
+
+
+//
+// Acquire the specified spinlock.
+//
+// N.B. code below branches forward if spinlock fails intentionally
+// because branch forwards are predicted to miss
+//
+
+#if !defined(NT_UP)
+
+10: //
+ ldl_l t3, 0(t5) // get current lock value
+ bis t5, zero, t4 // set ownership value
+ bne t3, 15f // if ne => lock owned
+ stl_c t4, 0(t5) // set lock owned
+ beq t4, 15f // if eq => stx_c failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+#endif
+//
+// Save the old Irql at the address saved by the caller.
+// Insure that the old Irql is updated with longword granularity.
+//
+
+ ldq_u t1, 0(t0) // read quadword surrounding KIRQL
+ bic t0, 3, t2 // get address of containing longword
+ mskbl t1, t0, t1 // clear KIRQL byte in quadword
+ insbl v0, t0, v0 // get new KIRQL to correct byte
+ bis t1, v0, t1 // merge KIRQL into quadword
+ extll t1, t2, t1 // get longword containg KIRQL
+
+ stl t1, 0(t2) // store containing longword
+ ret zero, (ra) // return
+
+
+#if !defined(NT_UP)
+15: //
+ ldl t3, 0(t5) // get current lock value
+ beq t3, 10b // retry acquire lock if unowned
+ br zero, 15b // loop in cache until lock free
+#endif
+ .end KeAcquireSpinLock
+
+
+ SBTTL("Acquire SpinLock and Raise to Synch")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToSynch (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to synchronization level and
+// acquires the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KeAcquireSpinLockRaiseToSynch)
+
+#if !defined(NT_UP)
+ bis a0, zero, t5
+ ldl a0, KiSynchIrql
+10:
+//
+// Raise IRQL and attempt to acquire the specified spinlock.
+//
+ SWAP_IRQL // save previous IRQL in v0
+ ldl_l t3, 0(t5) // get current lock value
+ bis t5, zero, t4 // set ownership value
+ bne t3, 25f // if ne, lock owned
+ stl_c t4, 0(t5) // set lock owned
+ beq t4, 25f // if eq, stl_c failed
+ mb // synchronize subsequent reads
+
+ ret zero, (ra)
+
+25:
+//
+// Spinlock is owned, lower IRQL and spin in cache
+// until it looks free.
+//
+ bis v0, zero, a0
+ SWAP_IRQL
+ bis v0, zero, a0
+
+26:
+ ldl t3, 0(t5) // get current lock value
+ beq t3, 10b // retry acquire if unowned
+ br zero, 26b // loop in cache until free
+
+#else
+ ldl a0, KiSynchIrql
+ SWAP_IRQL
+ ret zero, (ra) // return
+ .end KeAcquireSpinLockRaiseToSynch
+#endif
+
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToDpc (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to dispatcher level and acquires
+// the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+#if !defined(NT_UP)
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToDpc)
+
+ bis a0, zero, t5
+ ldil a0, DISPATCH_LEVEL
+ br 10b // finish in common code
+ .end KeAcquireSpinLockRaiseToSynch
+#else
+ LEAF_ENTRY(KeAcquireSpinLockRaiseToDpc)
+
+ ldil a0, DISPATCH_LEVEL // set new IRQL
+ SWAP_IRQL // old irql in v0
+ ret zero, (ra)
+
+ .end KeAcquireSpinLockRaiseToDpc
+#endif
+
+
+
+//++
+//
+// VOID
+// KeReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function releases an executive spin lock and lowers the IRQL
+// to its previous value.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// OldIrql (a1) - Supplies the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KeReleaseSpinLock)
+
+//
+// Release the specified spinlock.
+//
+
+#if !defined(NT_UP)
+
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(a0) // set spin lock not owned
+
+#endif
+
+10:
+
+//
+// Lower the IRQL to the specified level.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+ bis a1, zero, a0 // a0 = new irql
+ SWAP_IRQL // change to new irql
+
+ ret zero, (ra) // return
+
+ .end KeReleaseSpinLock
+
+//++
+//
+// BOOLEAN
+// KeTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and attempts
+// to acquires the specified executive spinlock. If the spinlock can be
+// acquired, then TRUE is returned. Otherwise, the IRQL is restored to
+// its previous value and FALSE is returned.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KeTryToAcquireSpinLock)
+
+//
+// Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+ bis a0, zero, t5 // t5 = address of spin lock
+ ldil a0, DISPATCH_LEVEL // new irql
+ bis a1, zero, t11 // t11 = a1, a1 may be clobbered
+ SWAP_IRQL // a0 = new, on return v0 = old irql
+
+
+//
+// Try to acquire the specified spinlock.
+//
+// N.B. A noninterlocked test is done before the interlocked attempt. This
+// allows spinning without interlocked cycles.
+//
+
+#if !defined(NT_UP)
+
+ ldl t0, 0(t5) // get current lock value
+ bne t0, 20f // if ne, lock owned
+10: ldl_l t0, 0(t5) // get current lock value
+ bis t5, zero, t3 // t3 = ownership value
+ bne t0, 20f // if ne, spin lock owned
+ stl_c t3, 0(t5) // set lock owned
+ beq t3, 15f // if eq, store conditional failure
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+#endif
+
+//
+// The attempt to acquire the specified spin lock succeeded.
+//
+
+//
+// Save the old Irql at the address saved by the caller.
+// Insure that the old Irql is updated with longword granularity.
+//
+
+ ldq_u t1, 0(t11) // read quadword containing KIRQL
+ bic t11, 3, t2 // get address of containing longword
+ mskbl t1, t11, t1 // clear byte position of KIRQL
+ bis v0, zero, a0 // save old irql
+ insbl v0, t11, v0 // get KIRQL to correct byte
+ bis t1, v0, t1 // merge KIRQL into quadword
+ extll t1, t2, t1 // extract containing longword
+ stl t1, 0(t2) // store containing longword
+
+ ldil v0, TRUE // set return value
+ ret zero, (ra) // return
+
+//
+// The attempt to acquire the specified spin lock failed. Lower IRQL to its
+// previous value and return FALSE.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+#if !defined(NT_UP)
+
+20: //
+ bis v0, zero, a0 // set old IRQL value
+
+ SWAP_IRQL // change back to old irql(a0)
+
+ ldil v0, FALSE // set return to failed
+ ret zero, (ra) // return
+
+
+15: //
+ br zero, 10b // retry spinlock
+
+#endif
+
+ .end KeTryToAcquireSpinLock
+
+//++
+//
+// KIRQL
+// KiAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function acquires a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiAcquireSpinLock)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
+
+#if !defined(NT_UP)
+
+ GET_CURRENT_THREAD // v0 = current thread address
+10: //
+ ldl_l t2, 0(a0) // get current lock value
+ bis v0, zero, t3 // set ownership value
+ bne t2, 15f // if ne, spin lock owned
+ stl_c t3, 0(a0) // set spin lock owned
+ beq t3, 15f // if eq, store conditional failure
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+ ret zero, (ra) // return
+
+15: //
+ ldl t2, 0(a0) // get current lock value
+ beq t2, 10b // retry acquire lock if unowned
+ br zero, 15b // loop in cache until lock free
+
+#else
+
+ ret zero, (ra) // return
+
+#endif
+
+ .end KiAcquireSpinLock
+
+//++
+//
+// VOID
+// KiReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function releases a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiReleaseSpinLock)
+
+ ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
+
+#if !defined(NT_UP)
+
+ mb // synchronize all previous writes
+ // before the spinlock is released
+ stl zero, 0(a0) // set spin lock not owned
+
+#endif
+
+ ret zero, (ra) // return
+
+ .end KiReleaseSpinLock
+
+//++
+//
+// KIRQL
+// KiTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquires the specified kernel spinlock. If
+// the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
+// is returned.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KiTryToAcquireSpinLock)
+
+#if !defined(NT_UP)
+
+ GET_CURRENT_THREAD // v0 = current thread address
+10: //
+ ldl_l t2, 0(a0) // get current lock value
+ bis v0, zero, t3 // set ownership value
+ bne t2, 20f // if ne, spin lock owned
+ stl_c t3, 0(a0) // set spin lock owned
+ beq t3, 15f // if eq, stl_c failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+ ldil v0, TRUE // set success return value
+ ret zero, (ra) // return
+
+20: //
+ ldil v0, FALSE // set failure return value
+ ret zero, (ra) // return
+
+15: //
+ br zero, 10b // retry
+
+
+#else
+
+ ldil v0, TRUE // set success return value
+ ret zero, (ra) // return
+
+#endif
+
+ .end KiTryToAcquireSpinLock
diff --git a/private/ntos/ke/alpha/start.s b/private/ntos/ke/alpha/start.s
new file mode 100644
index 000000000..cd78fc6f0
--- /dev/null
+++ b/private/ntos/ke/alpha/start.s
@@ -0,0 +1,785 @@
+// TITLE( "Start System" )
+//++
+//
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module:
+//
+// start.s
+//
+// Abstract:
+//
+// This module implements the code necessary to iniitially start NT
+// on an alpha - it includes the routine that first receives control
+// when the loader executes the kernel.
+//
+// Author:
+//
+// Joe Notarangelo 02-Apr-1992
+//
+// Environment:
+//
+// Kernel Mode only.
+//
+// Revision History:
+//
+//
+//--
+
+#include "ksalpha.h"
+
+#define TotalFrameLength (KERNEL_STACK_SIZE - (TrapFrameLength + \
+ ExceptionFrameLength) )
+//
+// Global Variables
+//
+
+ .data
+
+#ifdef NT_UP
+
+//
+// These global variables are useful only for uni-processor systems
+// as they are per-processor values on MP systems.
+//
+
+ .globl KiPcrBaseAddress
+KiPcrBaseAddress:
+ .long 0 : 1
+
+ .globl KiCurrentThread
+KiCurrentThread:
+ .long 0 : 1
+
+#endif //NT_UP
+
+
+ SBTTL( "System Startup" )
+//++
+//
+// Routine Description:
+//
+// This routine represents the final stage of the loader. It is
+// responsible for installing the loaded PALcode image and transfering
+// control to the startup code in the kernel.
+//
+// KiSystemStartupContinue is the routine called when NT begins execution.
+// The first code that must be executed is the PALcode, it must be entered
+// in PAL mode. The PALcode will return to the address in the return
+// address register (ra). This function sets ra to the beginning of the
+// native system code that normally executes to setup the NT operating
+// environment - so that the PAL "returns" to the normal system start code.
+//
+// N.B. This code assumes that the I-cache is coherent.
+//
+// N.B. This routine does not execute in the context of the operating
+// system but instead executes in the context of the firmware
+// PAL environment. This routine can only use those services
+// guaranteed to exist in the firmware. The only PAL services
+// that can be counted on are: swppal, imb, and halt.
+//
+// Arguments:
+//
+// LoaderBlock (a0) - Supplies pointer to Loader Parameter Block.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SsRa: .space 8 // Save ra
+ .space 8 // for stack alignment
+SsFrameLength:
+
+ NESTED_ENTRY(KiSystemStartup, SsFrameLength, ra)
+
+ ALTERNATE_ENTRY( KiStartProcessor )
+
+ lda sp, -SsFrameLength(sp) // allocate stack frame
+ stq ra, SsRa(sp) // save ra
+
+ PROLOGUE_END
+
+ //
+ // Prepare arguments for SWPPAL and Kernel. This assumes that
+ // the SWPPAL does not destroy any of the argument registers.
+ //
+ // a0 = Physical base address of PAL.
+ // a1 = PCR page frame number.
+ // a2 = Pointer to loader paramter block.
+ // ra = Address to return to from pal.
+ // Equals kernel start address.
+ //
+
+ bis a0, zero, a2 // copy Loader Block to a2
+ ldl a1, LpbPcrPage(a2) // get pcr page number
+ ldl a0, LpbPalBaseAddress(a2) // get PAL base address
+ sll a0, 32+3, a0 // strip off top bits
+ srl a0, 32+3, a0 // clear upper lw and kseg bits
+ lda ra, KiSystemStartupContinue // store OS start address in ra
+
+ //
+ // Jump to PAL via SWPPAL. Then return to continuation address in OS.
+ //
+
+ // a0 = new PAL base address
+ // ra = continuation address
+ SWPPAL // swap PAL images
+
+ //
+ // We should never get here!
+ //
+
+ ldq ra, SsRa(sp) // Restore ra
+ lda sp, SsFrameLength(sp) // Restore stack pointer
+ ret zero, (ra) // shouldn't get here
+
+ .end KiSystemStartup
+
+ SBTTL( "System Startup Continue" )
+//++
+//
+// Routine Description:
+//
+// KiSystemStartupContinue is the routine called when NT begins execution
+// after loading the Kernel environment from the PAL.
+// It's function is to register exception routines and system values
+// with the pal code, call kernel initialization and fall into the idle
+// thread code
+//
+// Arguments:
+//
+// PalBaseAddress(a0) - Supplies base address of the operating system
+// PALcode.
+//
+// PcrPage(a1) - Supplies the PFN of the PCR page.
+//
+// LoaderBlock(a2) - Supplies a pointer to the loader parameter block.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SscRa: .space 8 // return address
+Fill: .space 8 // filler for alignment
+SscFrameLength: // size of stack frame
+
+ NESTED_ENTRY( KiSystemStartupContinue, SscFrameLength, ra )
+
+ lda sp, -SscFrameLength(sp) // allocate stack frame
+ stq ra, SscRa(sp) // save ra
+
+ PROLOGUE_END
+
+//
+// Establish kernel stack pointer and kernel global pointer from
+// parameter block.
+//
+
+ ldl sp, LpbKernelStack(a2) // establish kernel sp
+ ldl gp, LpbGpBase(a2) // establish kernel gp
+
+
+//
+// Initialize PAL values, sp, gp, pcr, pdr, initial thread
+//
+
+ bis a2, zero, s2 // save pointer to loader block
+ ldl s0, LpbPcrPage(s2) // get pcr page number
+ ldl a0, LpbPdrPage(s2) // get pdr page number
+ ldl a1, LpbThread(s2) // get idle thread address
+ ldil t0, KSEG0_BASE // kseg0 base address
+ bis a0, zero, s3 // save copy of pdr page number
+ sll a0, PAGE_SHIFT, a0 // physical address of pdr
+ sll s0, PAGE_SHIFT, s0 // physical address of pcr
+ bis a0, t0, a0 // kseg0 address of pdr
+ bis s0, t0, s0 // kseg0 address of pcr
+ bis a0, zero, s1 // save copy of pdr address
+ bis zero, zero, a2 // zero Teb for initial thread
+ ldl a3, LpbPanicStack(s2) // get Interrupt stack base
+ ldil a4, TotalFrameLength // set maximum kernel stack size
+
+
+ // sp - initial kernel sp
+ // gp - system gp
+ // a0 - pdr kseg0 address
+ // a1 - thread kseg0 address
+ // a2 - Teb address for initial thread
+ // a3 - Interrupt stack base
+ // a4 - Maximum kernel stack size
+ INITIALIZE_PAL
+
+
+#ifdef NT_UP
+
+//
+// Save copies of the per-processor values in global variables for
+// uni-processor systems.
+//
+
+ lda t0, KiPcrBaseAddress // get address of PCR address
+ stl s0, 0(t0) // save PCR address
+ ldl t1, LpbThread(s2) // get address of idle thread
+ lda t0, KiCurrentThread // get address of thread address
+ stl t1, 0(t0) // save idle address as thread
+
+#endif //NT_UP
+
+//
+// Establish recursive mapping of pde for ptes and hyperspace
+// N.B. - page table page for hyperspace is page after pdr page
+//
+
+ ldil t0, PTE_BASE // get pte base
+ sll t0, 32, t0 // clean upper bits
+ srl t0, 32+PDI_SHIFT-2, t0 // get offset of pde
+ bic t0, 3, t0 // longword aligned, clear low bits
+ addq t0, s1, t0 // kseg0 addr of pde
+ sll s3, PTE_PFN, t1 // shift pfn into place
+ bis t1, PTE_VALID_MASK, t1 // set valid bit
+ bis t1, PTE_DIRTY_MASK, t1 // set dirty bit
+ stl t1, 0(t0) // store pde for pdr
+
+ ldil t2, (1 << PTE_PFN) // increment pfn by 1
+ addq t1, t2, t1 //
+ stl t1, 4(t0) // store hyperspace pde
+
+//
+// Establish mapping for special user data page.
+// N.B. - page table page for this is page after hyperspace page table page
+// actual data page is the next page.
+//
+ ldil t0, SharedUserData // get shared data base
+ zap t0, 0xf0, t3 // clean upper bits
+ srl t3, PDI_SHIFT-2, t0 // get offset of pde
+ bic t0, 3, t0 // longword aligned, clear low bits
+ addq t0, s1, t0 // kseg0 addr of pde
+ addq t1, t2, t1 // increment pfn by 1
+ stl t1, 0(t0) // store user data page pde
+
+ zap t3, 0xf8, t3 // clean upper bits
+ srl t3, PTI_SHIFT-2, t3 // get offset of pte
+ bic t3, 3, t3 // longword aligned, clear low bits
+ addq t3, s1, t3
+ ldil t4, 2*PAGE_SIZE
+ addq t3, t4, t3 // kseg0 addr of pte
+ addq t1, t2, t1 // increment pfn by 1
+ stl t1, 0(t3)
+
+
+
+
+//
+// Register kernel exception entry points with the PALcode
+//
+
+ lda a0, KiPanicException // bugcheck entry point
+ ldil a1, entryBugCheck //
+ WRITE_KERNEL_ENTRY_POINT //
+
+ lda a0, KiGeneralException // general exception entry point
+ ldil a1, entryGeneral //
+ WRITE_KERNEL_ENTRY_POINT //
+
+ lda a0, KiMemoryManagementException // memory mgmt exception entry
+ ldil a1, entryMM //
+ WRITE_KERNEL_ENTRY_POINT //
+
+ lda a0, KiInterruptException // interrupt exception entry point
+ ldil a1, entryInterrupt //
+ WRITE_KERNEL_ENTRY_POINT //
+
+ lda a0, KiSystemServiceException // syscall entry point
+ ldil a1, entrySyscall //
+ WRITE_KERNEL_ENTRY_POINT //
+
+
+//
+// Initialize fields in the pcr
+//
+
+ ldil t1, PCR_MINOR_VERSION // get minor version
+ ldil t2, PCR_MAJOR_VERSION // get major version
+ stl t1, PcMinorVersion(s0) // store minor version number
+ stl t2, PcMajorVersion(s0) // store major version number
+
+ ldl t0, LpbThread(s2) // save idle thread in pcr
+ stl t0, PcIdleThread(s0) //
+
+ ldl t0, LpbPanicStack(s2) // save panic stack in pcr
+ stl t0, PcPanicStack(s0) //
+
+ ldl t0, LpbProcessorType(s2) // save processor type in pcr
+ stl t0, PcProcessorType(s0) //
+
+ ldl t0, LpbProcessorRevision(s2) // save processor revision
+ stl t0, PcProcessorRevision(s0) //
+
+ ldl t0, LpbPhysicalAddressBits(s2) // save physical address bits
+ stl t0, PcPhysicalAddressBits(s0) //
+
+ ldl t0, LpbMaximumAddressSpaceNumber(s2) // save max asn
+ stl t0, PcMaximumAddressSpaceNumber(s0) //
+
+ ldl t0, LpbFirstLevelDcacheSize(s2) // save first level dcache size
+ stl t0, PcFirstLevelDcacheSize(s0) //
+
+ ldl t0, LpbFirstLevelDcacheFillSize(s2) // save dcache fill size
+ stl t0, PcFirstLevelDcacheFillSize(s0) //
+
+ ldl t0, LpbFirstLevelIcacheSize(s2) // save first level icache size
+ stl t0, PcFirstLevelIcacheSize(s0) //
+
+ ldl t0, LpbFirstLevelIcacheFillSize(s2) // save icache fill size
+ stl t0, PcFirstLevelIcacheFillSize(s0) //
+
+ ldl t0, LpbSystemType(s2) // save system type
+ stl t0, PcSystemType(s0) //
+ ldl t0, LpbSystemType+4(s2) //
+ stl t0, PcSystemType+4(s0) //
+
+ ldl t0, LpbSystemVariant(s2) // save system variant
+ stl t0, PcSystemVariant(s0) //
+
+ ldl t0, LpbSystemRevision(s2) // save system revision
+ stl t0, PcSystemRevision(s0) //
+
+ ldl t0, LpbSystemSerialNumber(s2) // save system serial number
+ stl t0, PcSystemSerialNumber(s0) //
+ ldl t0, LpbSystemSerialNumber+4(s2) //
+ stl t0, PcSystemSerialNumber+4(s0) //
+ ldl t0, LpbSystemSerialNumber+8(s2) //
+ stl t0, PcSystemSerialNumber+8(s0) //
+ ldl t0, LpbSystemSerialNumber+12(s2) //
+ stl t0, PcSystemSerialNumber+12(s0) //
+
+ ldl t0, LpbCycleClockPeriod(s2) // save cycle counter period
+ stl t0, PcCycleClockPeriod(s0) //
+
+ ldl t0, LpbRestartBlock(s2) // save Restart Block address
+ stl t0, PcRestartBlock(s0) //
+
+ ldq t0, LpbFirmwareRestartAddress(s2) // save firmware restart
+ stq t0, PcFirmwareRestartAddress(s0) //
+
+ ldq t0, LpbFirmwareRevisionId(s2) // save firmware revision
+ stq t0, PcFirmwareRevisionId(s0) //
+
+ ldl t0, LpbDpcStack(s2) // save Dpc Stack
+ stl t0, PcDpcStack(s0) //
+
+ ldl t0, LpbPrcb(s2) // save Prcb
+ stl t0, PcPrcb(s0) //
+
+ stl zero, PbDpcRoutineActive(t0) // clear DPC Active flag
+
+ stl zero, PcMachineCheckError(s0) // indicate no HAL mchk handler
+
+//
+// Set system service dispatch address limits used by get and set context.
+//
+
+ lda t0, KiSystemServiceDispatchStart // set start address of range
+ stl t0, PcSystemServiceDispatchStart(s0) //
+ lda t0, KiSystemServiceDispatchEnd // set end address of range
+ stl t0, PcSystemServiceDispatchEnd(s0) //
+
+//
+// Setup arguments and call kernel initialization routine.
+//
+
+ ldl s0, LpbProcess(s2) // get idle process address
+ ldl s1, LpbThread(s2) // get idle thread address
+ bis s0, zero, a0 // a0 = idle process address
+ bis s1, zero, a1 // a1 = idle thread address
+ ldl a2, LpbKernelStack(s2) // a2 = idle thread stack
+ ldl a3, LpbPrcb(s2) // a3 = processor block address
+ LoadByte(a4, PbNumber(a3)) // a4 = processor number
+ bis s2, zero, a5 // a5 = loader parameter block
+ bsr ra, KiInitializeKernel // initialize system data
+
+//
+// Control is returned to the idle thread with IRQL at HIGH_LEVEL.
+// Lower IRQL level to DISPATCH_LEVEL and set wait IREQL of idle thread.
+//
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // get prcb
+ bis v0, zero, s0 // s0 = prcb address
+
+ lda s3, PbDpcListHead(s0) // get DPC listhead address
+
+#if !defined(NT_UP)
+
+ lda s5, KiDispatcherLock // get address of dispatcher lock
+
+#endif
+
+ ldil a0, DISPATCH_LEVEL // get dispatch level IRQL
+ StoreByte( a0, ThWaitIrql(s1) ) // set wait IRQL of idle thread
+ bsr ra, KeLowerIrql // lower IRQL
+
+
+ ENABLE_INTERRUPTS // enable interrupts
+
+ bis zero, zero, s2 // clear breakin loop counter
+
+ bis zero, zero, ra // set bogus RA to stop debugger
+
+ br zero, KiIdleLoop
+ .end KiSystemStartupContinue
+
+//
+// The following code represents the idle thread for a processor. The
+// idle thread executes at IRQL DISPATCH_LEVEL and continually polls for work
+// to do. Control may be given to this loop either as a result of a return
+// from the system initialization routine or as the result of starting up
+// another processor in a multiprocessor configuration.
+//
+ NESTED_ENTRY(KiIdleLoop, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate context frame
+ stq ra, ExIntRa(sp) // set bogus RA to stop debugger
+ stq s0, ExIntS0(sp) // save integer registers s0 - s5
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+#if !defined(NT_UP)
+ stq s5, ExIntS5(sp) //
+#endif
+
+ PROLOGUE_END
+
+ lda t0, KiIdleReturn // set return address from SwapContext
+ stq t0, ExSwapReturn(sp) // directly into exception frame
+
+ bsr ra, KiSaveNonVolatileFloatState
+
+//
+// restore registers we need after swap context
+//
+KiIdleReturn:
+//
+// Lower IRQL back to DISPATCH_LEVEL
+//
+ ldil a0, DISPATCH_LEVEL
+ SWAP_IRQL
+
+#if DBG
+ bis zero, zero, s2 // reset breakin loop counter
+#endif
+
+//
+// N.B. The address of the current processor block (s0) is preserved across
+// the switch from idle call.
+//
+ ldq s3, ExIntS3(sp) // restore address of DPC listhead
+
+#if !defined(NT_UP)
+ ldl t2, KeNumberProcessors // get number of processors
+ stq t2, ExIntS0(sp) // store number of processors
+ ldq s5, ExIntS5(sp) // restore address of dispatcher lock
+#endif
+
+IdleLoop:
+
+#if DBG
+
+ subl s2, 1, s2 // decrement breakin loop counter
+ bge s2, 5f // if ge, not time for breakin check
+ ldil s2, 200 * 1000 // set breakin loop counter
+ bsr ra, KdPollBreakIn // check if breakin is requested
+ beq v0, 5f // if eq, then no breakin requested
+ lda a0, DBG_STATUS_CONTROL_C
+ bsr ra, DbgBreakPointWithStatus
+
+5:
+
+#endif //DBG
+
+//
+// Disable interrupts and check if there is any work in the DPC list
+// of the current processor or a target processor.
+//
+
+CheckDpcList:
+ ENABLE_INTERRUPTS // give interrupts a chance
+ DISABLE_INTERRUPTS // to interrupt spinning
+
+//
+// Process the deferred procedure call list for the current processor.
+//
+ ldl t0, PbDpcQueueDepth(s0) // get current queue depth
+ beq t0, CheckNextThread // if eq, DPC list is empty
+
+//
+// Clear dispatch interrupt.
+//
+ ldil a0, DISPATCH_LEVEL
+ ldl t0, PbSoftwareInterrupts(s0) // clear any pending SW interrupts.
+ bic t0, a0, t1
+ stl t1, PbSoftwareInterrupts(s0)
+ DEASSERT_SOFTWARE_INTERRUPT // clear any PAL-requested interrupts.
+
+ bis zero, zero, s2 // clear breakin loop counter
+ bsr ra, KiRetireDpcList
+
+//
+// Check if a thread has been selected to run on this processor.
+//
+
+CheckNextThread:
+
+ ldl a0, PbNextThread(s0) // get address of next thread object
+ beq a0, IdleProcessor // if eq, no thread selected
+
+//
+// A thread has been selected for execution on this processor. Acquire
+// dispatcher database lock, get the thread address again (it may have
+// changed), clear the address of the next thread in the processor block,
+// and call swap context to start execution of the selected thread.
+//
+// N.B. If the dispatcher database lock cannot be obtained immediately,
+// then attempt to process another DPC rather than spinning on the
+// dispatcher database lock.
+//
+
+#if !defined(NT_UP)
+
+130:
+ ldl_l t0, 0(s5) // get current lock value
+ bis s5, zero, t1 // set lock ownership value
+ bne t0, CheckDpcList // if ne, spin lock owned, go try the DPC list again
+ stl_c t1, 0(s5) // set spin lock owned
+ beq t1, 135f // if eq, store conditional failed
+ mb // synchronize subsequent reads after
+ // the spinlock is acquired
+#endif
+
+//
+// Raise IRQL to sync level and re-enable interrupts
+//
+ ldl a0, KiSynchIrql
+ SWAP_IRQL
+ ENABLE_INTERRUPTS
+
+ ldl s2, PbNextThread(s0) // get address of next thread object
+ ldl s1, PbIdleThread(s0) // get address of current thread
+ stl zero, PbNextThread(s0) // clear next thread address
+ stl s2, PbCurrentThread(s0) // set address of current thread object
+
+//
+// Set new thread's state to running. Note this must be done
+// under the dispatcher lock so that KiSetPriorityThread sees
+// the correct state.
+//
+ ldil t0, Running
+ StoreByte( t0, ThState(s2) )
+
+#if !defined(NT_UP)
+//
+// Acquire the context swap lock so the address space of the old thread
+// cannot be deleted and then release the dispatcher database lock. In
+// this case the old thread is the idle thread, but the context swap code
+// releases the context swap lock so it must be acquired.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+ lda t0, KiContextSwapLock // get context swap lock address
+140:
+ ldl_l t1, 0(t0) // get current lock value
+ bis t0, zero, t2 // set ownership value
+ bne t1, 145f // if ne, lock already owned
+ stl_c t2, 0(t0) // set lock ownership value
+ beq t2, 145f // if eq, store conditional failed
+ mb // synchronize reads and writes
+ stl zero, 0(s5) // set lock not owned
+#endif
+
+ bsr ra, SwapFromIdle // swap context to new thread
+
+//
+// Note control returns directly from SwapFromIdle to the top
+// of the loop (KiIdleReturn) since SwapContext gets ra directly from ExSwapReturn(sp)
+// which was explicitly set when the idle loop was originally entered.
+//
+
+IdleProcessor:
+//
+// There are no entries in the DPC list and a thread has not been selected
+// for execution on this processor. Call the HAL so power management can
+// be performed.
+//
+
+//
+// N.B. The HAL is called with interrupts disabled. The HAL will return
+// with interrupts enabled.
+//
+ bsr ra, HalProcessorIdle // notify HAL of idle state
+ br zero, IdleLoop // restart idle loop
+
+
+#if !defined(NT_UP)
+
+135:
+//
+// Conditional store of dispatcher lock failed. Retry. Do not
+// spin in cache here. If the lock is owned, we want to check
+// the DPC list again.
+//
+ ENABLE_INTERRUPTS
+ DISABLE_INTERRUPTS
+ br zero, 130b
+
+145:
+ ldl t1, 0(t0) // spin in cache until lock free
+ beq t1, 140b // retry spin lock
+ br zero, 145b
+
+#endif
+ .end KiSwapThread
+
+
+ SBTTL("Retire Deferred Procedure Call List")
+//++
+//
+// Routine Description:
+//
+// This routine is called to retire the specified deferred procedure
+// call list. DPC routines are called using the idle thread (current)
+// stack.
+//
+// N.B. Interrupts must be disabled and the DPC list lock held on entry
+// to this routine. Control is returned to the caller with the same
+// conditions true.
+//
+// Arguments:
+//
+// s0 - Address of the processor control block.
+//
+// Return value:
+//
+// None.
+//
+//--
+ .struct 0
+DpRa: .space 8 // return address
+ .space 8 // fill
+
+#if DBG
+
+DpStart:.space 8 // DPC start time in ticks
+DpFunct:.space 8 // DPC function address
+DpCount:.space 8 // interrupt count at start of DPC
+DpTime: .space 8 // interrupt time at start of DPC
+
+#endif
+
+DpcFrameLength: // DPC frame length
+ NESTED_ENTRY(KiRetireDpcList, DpcFrameLength, zero)
+
+ lda sp, -DpcFrameLength(sp) // allocate stack frame
+ stq ra, DpRa(sp) // save return address
+
+ PROLOGUE_END
+
+5:
+ stl sp, PbDpcRoutineActive(s0) // set DPC routine active
+
+//
+// Process the DPC list.
+//
+10: ldl t0, PbDpcQueueDepth(s0) // get current DPC queue depth
+ beq t0, 60f // if eq, list is empty
+ lda t2, PbDpcListHead(s0) // compute DPC list head address
+
+20:
+#if !defined(NT_UP)
+
+ ldl_l t1, PbDpcLock(s0) // get current lock value
+ bis s0, zero, t3 // set lock ownership value
+ bne t1, 25f // if ne, spin lock owned
+ stl_c t3, PbDpcLock(s0) // set spin lock owned
+ beq t3, 25f // if eq, store conditional failed
+ mb
+ ldl t0, PbDpcQueueDepth(s0) // get current DPC queue depth
+ beq t0, 50f // if eq, DPC list is empty
+
+#endif
+
+ ldl a0, LsFlink(t2) // get address of next entry
+ ldl t1, LsFlink(a0) // get address of next entry
+ lda a0, -DpDpcListEntry(a0) // compute address of DPC object
+ stl t1, LsFlink(t2) // set address of next in header
+ stl t2, LsBlink(t1) // set address of previous in next
+ ldl a1, DpDeferredContext(a0) // get deferred context argument
+ ldl a2, DpSystemArgument1(a0) // get first system argument
+ ldl a3, DpSystemArgument2(a0) // get second system argument
+ ldl t1, DpDeferredRoutine(a0) // get deferred routine address
+ stl zero, DpLock(a0) // clear DPC inserted state
+ subl t0, 1, t0 // decrement DPC queue depth
+ stl t0, PbDpcQueueDepth(s0) // update DPC queue depth
+
+#if !defined(NT_UP)
+
+ mb // synchronize previous writes
+ stl zero, PbDpcLock(s0) // set spinlock not owned
+
+#endif
+ ENABLE_INTERRUPTS // enable interrupts
+
+ jsr ra, (t1)
+
+ DISABLE_INTERRUPTS
+ br zero, 10b
+
+//
+// Unlock DPC list and clear DPC active.
+//
+50:
+#if !defined(NT_UP)
+ mb // synchronize previous writes
+ stl zero, PbDpcLock(s0) // set spin lock not owned
+#endif
+
+60:
+ stl zero, PbDpcRoutineActive(s0) // clear DPC routine active
+ stl zero, PbDpcInterruptRequested(s0) // clear DPC interrupt requested
+
+//
+// Check one last time that the DPC list is empty. This is required to
+// close a race condition with the DPC queuing code where it appears that
+// a DPC routine is active (and thus an interrupt is not requested), but
+// this code has decided the DPC list is empty and is clearing the DPC
+// active flag.
+//
+#if !defined(NT_UP)
+ mb
+#endif
+ ldl t0, PbDpcQueueDepth(s0) // get current DPC queue depth
+ beq t0, 70f // if eq, DPC list is still empty
+
+ stl sp, PbDpcRoutineActive(s0) // set DPC routine active
+ lda t2, PbDpcListHead(s0) // compute DPC list head address
+ br zero, 20b
+
+70:
+ ldq ra, DpRa(sp) // restore RA
+ lda sp, DpcFrameLength(sp) // deallocate stack frame
+ ret zero, (ra) // return
+
+#if !defined(NT_UP)
+25:
+ ldl t1, PbDpcLock(s0) // spin in cache until lock free
+ beq t1, 20b // retry spinlock
+ br zero, 25b
+
+#endif
+ .end KiRetireDpcList
diff --git a/private/ntos/ke/alpha/table.stb b/private/ntos/ke/alpha/table.stb
new file mode 100644
index 000000000..c4cac362a
--- /dev/null
+++ b/private/ntos/ke/alpha/table.stb
@@ -0,0 +1,84 @@
+6 // This is the number of argument registers for Alpha (used by gensrv).
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// systable.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch table.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// Joe Notarangelo 08-Jul-1992
+// alpha version
+// - 6 argument registers for alpha
+// - size of allocation for each table entry address = 64 bits
+// (this was necessary with BL3 of the compiler, it may no
+// longer be the case but this shouldn't hurt, the system
+// service exception code will load long anyway)
+//
+// Thomas Van Baak (tvb) 04-Sep-1992
+//
+// The first line of this file was "6 8" which the new gensrv used
+// to set the number of in-register arguments (6) and the size of a
+// register (8).
+//
+// Now instead, the ARGTBL_ENTRY macro itself converts i386/Mips 4
+// byte units into Alpha 8 byte units. This way, if an old version
+// of gensrv is used, we still get the proper byte counts for Alpha.
+//
+//--
+
+//
+// To add a system service simply add the name of the service to the below
+// table. If the system service has in memory arguments, then immediately
+// follow the name of the serice with a comma and following that the number
+// of bytes of in memory arguments, e.g. CreateObject,40.
+//
+
+#define TABLE_BEGIN1( t ) .rdata
+#define TABLE_BEGIN2( t ) .align 4
+#define TABLE_BEGIN3( t ) .globl KiServiceTable
+#define TABLE_BEGIN4( t ) KiServiceTable:
+#define TABLE_BEGIN5( t )
+#define TABLE_BEGIN6( t )
+#define TABLE_BEGIN7( t )
+#define TABLE_BEGIN8( t )
+
+#define TABLE_ENTRY( l,bias,numargs ) .long Nt##l+bias
+
+#define TABLE_END( n ) .sdata ; .globl KiServiceLimit ; KiServiceLimit: .long n + 1
+
+#define ARGTBL_BEGIN .rdata ; .align 4 ; .globl KiArgumentTable ; KiArgumentTable:
+
+//
+// Convert gensrv 4-byte units to Alpha 8-bytes-per-register units.
+//
+
+#define ARGTBL_ENTRY( e0,e1,e2,e3,e4,e5,e6,e7 ) \
+ .byte e0 *2, e1 *2, e2 *2, e3 *2, e4 *2, e5 *2, e6 *2, e7 *2
+
+#define ARGTBL_END
+
+
+ TABLE_BEGIN1( "System Service Dispatch Table" )
+ TABLE_BEGIN2( "System Service Dispatch Table" )
+ TABLE_BEGIN3( "System Service Dispatch Table" )
+ TABLE_BEGIN4( "System Service Dispatch Table" )
+ TABLE_BEGIN5( "System Service Dispatch Table" )
+ TABLE_BEGIN6( "System Service Dispatch Table" )
+ TABLE_BEGIN7( "System Service Dispatch Table" )
+ TABLE_BEGIN8( "System Service Dispatch Table" )
+ \ No newline at end of file
diff --git a/private/ntos/ke/alpha/tb.s b/private/ntos/ke/alpha/tb.s
new file mode 100644
index 000000000..04aa5d821
--- /dev/null
+++ b/private/ntos/ke/alpha/tb.s
@@ -0,0 +1,281 @@
+// TITLE("Flush Translation Buffers")
+//++
+//
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// tb.s
+//
+// Abstract:
+//
+// This module implements the code to flush the tbs on the current
+// processor.
+//
+// Author:
+//
+// Joe Notarangelo 21-apr-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL( "Flush All Translation Buffers" )
+//++
+//
+// VOID
+// KiFlushEntireTb(
+// )
+//
+// Routine Description:
+//
+// This function flushes the data and instruction tbs on the current
+// processor. All entries are flushed with the exception of any entries
+// that are fixed in the tb (either in hdw or via sfw).
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KiFlushEntireTb)
+
+ TB_INVALIDATE_ALL // invalidate all tb entries
+ ret zero, (ra) // return
+
+ .end KiFlushEntireTb
+
+ SBTTL( "Flush All Translation Buffers" )
+//++
+//
+// VOID
+// KeFlushCurrentTb(
+// )
+//
+// Routine Description:
+//
+// This function flushes the data and instruction tbs on the current
+// processor. All entries are flushed with the exception of any entries
+// that are fixed in the tb (either in hdw or via sfw).
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KeFlushCurrentTb)
+
+ TB_INVALIDATE_ALL // invalidate all tb entries
+ ret zero, (ra) // return
+
+ .end KeFlushCurrentTb
+
+ SBTTL( "Flush A Single Translation Buffer" )
+//++
+//
+// VOID
+// KiFlushSingleTb
+// IN BOOLEAN Invalid,
+// IN PVOID Virtual
+//
+// Routine Description:
+//
+// This function flushes a single entry from both the instruction
+// and data translation buffers. Note: it may flush more that just
+// the single entry.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason that
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to be
+// flushed from the buffers.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushSingleTb)
+
+ bis a1, zero, a0 // a0 = va to flush
+ TB_INVALIDATE_SINGLE // flush va(a0) from tbs
+
+ ret zero, (ra) // return
+
+ .end KiFlushSingleTb
+
+
+ SBTTL( "Flush Multiple Translation Buffer" )
+//++
+//
+// VOID
+// KiFlushMultipleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID *Virtual,
+// IN ULONG Count
+// )
+//
+// Routine Description:
+//
+// This function flushes multiple entries from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies a pointer to an array of virtual addresses of
+// the entries that are flushed from the translation buffer.
+//
+// Count (a2) - Supplies the number of TB entries to flush.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushMultipleTb)
+
+ bis a1, zero, a0 // a0 = pointer to array
+ bis a2, zero, a1 // a1 = count
+ TB_INVALIDATE_MULTIPLE // invalidate the entries
+ ret zero, (ra) // return
+
+ .end KiFlushMultipleTb
+
+
+ SBTTL( "Flush Multiple Translation Buffer by PID" )
+//++
+//
+// VOID
+// KiFlushMultipleTbByPid (
+// IN BOOLEAN Invalid,
+// IN PVOID *Virtual,
+// IN ULONG Count,
+// IN ULONG Pid
+// )
+//
+// Routine Description:
+//
+// This function flushes multiple entries from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies a pointer to an array of virtual addresses of
+// the entries that are flushed from the translation buffer.
+//
+// Count (a2) - Supplies the number of TB entries to flush.
+//
+// Pid (a3) - Supplies the PID for which the multiple TB entries are
+// flushed.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushMultipleTbByPid)
+
+ bis a1, zero, a0 // a0 = pointer to array
+ bis a2, zero, a1 // a1 = count
+ bis a3, zero, a2 // a2 = PID
+ TB_INVALIDATE_MULTIPLE_ASN // invalidate the entries
+
+ ret zero, (ra) // return
+
+ .end KiFlushMultipleTbByPid
+
+
+ SBTTL( "Flush A Single Translation Buffer" )
+//++
+//
+// VOID
+// KiFlushSingleTbByPid
+// IN BOOLEAN Invalid,
+// IN PVOID Virtual,
+// IN ULONG Asn
+//
+// Routine Description:
+//
+// This function flushes a single entry from both the instruction
+// and data translation buffers. Note: it may flush more that just
+// the single entry.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason that
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to be
+// flushed from the buffers.
+//
+// Asn (a2) - Supplies the address space number (aka PID) of the process
+// for which the virtual address is to be flushed.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushSingleTbByPid)
+
+ bis a1, zero, a0 // a0 = va to flush
+ bis a2, zero, a1 // a1 = Asn for flush
+ TB_INVALIDATE_SINGLE_ASN // flush va(a0) from tbs
+
+ ret zero, (ra) // return
+
+ .end KiFlushSingleTbByPid
+
+
+//++
+//
+// VOID
+// KiFlushSingleDataTb(
+// )
+//
+// Routine Description:
+//
+// This function flushes a single entry for the data tb only.
+//
+// Arguments:
+//
+// VirtualAddress(a0) - Supplies the virtual address of the entry
+// that is to be flushed from the data translations.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushSingleDataTb)
+
+ DATA_TB_INVALIDATE_SINGLE // flush va(a0) from data tbs
+
+ ret zero, (ra) // return
+
+ .end KiFlushSingleDataTb
diff --git a/private/ntos/ke/alpha/threadbg.s b/private/ntos/ke/alpha/threadbg.s
new file mode 100644
index 000000000..8daf36aa8
--- /dev/null
+++ b/private/ntos/ke/alpha/threadbg.s
@@ -0,0 +1,152 @@
+// TITLE("Thread Startup")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992, 1993 Digital Equipment Corporation
+//
+// Module Name:
+//
+// threadbg.s
+//
+// Abstract:
+//
+// This module implements the MIPS machine dependent code necessary to
+// startup a thread in kernel mode.
+//
+// Author:
+//
+// David N. Cutler (davec) 28-Mar-1990
+// Joe Notarangelo 21-Apr-1992
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+
+//++
+//
+// RoutineDescription:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through thread startup
+// and to support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiThreadDispatch, ExceptionFrameLength, zero)
+
+ lda sp, -ExceptionFrameLength(sp) // allocate exception frame
+ stq ra, ExIntRa(sp) // save return address
+ stq s0, ExIntS0(sp) // save integer regs s0-s5
+ stq s1, ExIntS1(sp)
+ stq s2, ExIntS2(sp)
+ stq s3, ExIntS3(sp)
+ stq s4, ExIntS4(sp)
+ stq s5, ExIntS5(sp)
+
+ stt f2, ExFltF2(sp) // save floating regs f2 - f9
+ stt f3, ExFltF3(sp)
+ stt f4, ExFltF4(sp)
+ stt f5, ExFltF5(sp)
+ stt f6, ExFltF6(sp)
+ stt f7, ExFltF7(sp)
+ stt f8, ExFltF8(sp)
+ stt f9, ExFltF9(sp)
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// This routine is called at thread startup. Its function is to call the
+// initial thread procedure. If control returns from the initial thread
+// procedure and a user mode context was established when the thread
+// was initialized, then the user mode context is restored and control
+// is transfered to user mode. Otherwise a bug check will occur.
+//
+//
+// Arguments:
+//
+// sp - Supplies a pointer to the exception frame which contains the
+// startup parameters.
+//
+// Within Exception frame:
+//
+// s0 - Supplies a boolean value that specified whether a user mode
+// thread context was established when the thread was initialized.
+//
+// s1 - Supplies the starting context parameter for the initial thread
+// procedure.
+//
+// s2 - Supplies the starting address of the initial thread routine.
+//
+// s3 - Supplies the starting address of the initial system routine.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiThreadStartup)
+
+//
+// Capture the arguments for startup from the exception frame.
+// After the arguments are captured, deallocate the exception frame.
+//
+
+ ldq s0, ExIntS0(sp) // capture user context boolean
+ ldq s1, ExIntS1(sp) // set startup context parameter
+ ldq s2, ExIntS2(sp) // set address of thread routine
+ ldq s3, ExIntS3(sp) // capture startup routine address
+ ldq s4, ExIntS4(sp) // restore s4
+ ldq s5, ExIntS5(sp) // restore s5
+ ldq fp, ExIntFp(sp) // restore trap frame pointer
+
+ lda sp, ExceptionFrameLength(sp) // deallocate context frame
+
+//
+// Lower Irql to APC level.
+//
+
+ ldil a0, APC_LEVEL // lower IRQL to APC level
+ SWAP_IRQL
+
+//
+// Jump to the startup routine with the address of the thread routine and
+// the startup context parameter.
+//
+
+ bis s2, zero, a0 // set address of thread routine
+ bis s1, zero, a1 // set startup context parameter
+ jsr ra, (s3) // call system startup routine
+
+//
+// If we return and no user context was supplied then we have trouble.
+//
+
+ beq s0, 20f // if eq, no user context
+
+//
+// Finish in common exception exit code which will restore the nonvolatile
+// registers and exit to user mode.
+//
+
+ br zero, KiExceptionExit // finish in exception exit code
+
+//
+// An attempt was made to enter user mode for a thread that has no user mode
+// context. Generate a bug check.
+//
+
+20: ldil a0, NO_USER_MODE_CONTEXT // set bug check code
+ bsr ra, KeBugCheck // call bug check routine
+
+ .end KiThreadDispatch
diff --git a/private/ntos/ke/alpha/thredini.c b/private/ntos/ke/alpha/thredini.c
new file mode 100644
index 000000000..81a439664
--- /dev/null
+++ b/private/ntos/ke/alpha/thredini.c
@@ -0,0 +1,327 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ thredini.c
+
+Abstract:
+
+ This module implements the machine dependent functions to set the initial
+ context and data alignment handling mode for a process or thread object.
+
+Author:
+
+ David N. Cutler (davec) 1-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Joe Notarangelo 21-Apr-1992
+ very minor changes for ALPHA
+ 1. psr definition
+ 2. pte and mask (from 3ffffc to 1ffffc), mips page size is 4k
+ our first alpha page size is 8k
+ mips code shifts right 10 (12-2) and then turns off the
+ upper 10 bits, alpha shifts right 11 (13-2) and so must
+ turn off upper 11 bits
+ 3. Insert register values into context structure as quadwords
+ to insure that the written values are in canonical form
+
+ Thomas Van Baak (tvb) 9-Oct-1992
+
+ Adapted for Alpha AXP.
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macros are used to check that an input object is
+// really the proper type.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextRecord OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the machine dependent context of a thread object.
+
+ N.B. This function does not check the accessibility of the context record.
+ It is assumed the the caller of this routine is either prepared to
+ handle access violations or has probed and copied the context record
+ as appropriate.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextRecord - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKEXCEPTION_FRAME CxFrame;
+ PKEXCEPTION_FRAME ExFrame;
+ ULONG InitialStack;
+ PKTRAP_FRAME TrFrame;
+
+ //
+ // If a context frame is specified, then initialize a trap frame and
+ // and an exception frame with the specified user mode context.
+ //
+
+ InitialStack = (ULONG)Thread->InitialStack;
+ if (ARGUMENT_PRESENT(ContextRecord)) {
+ TrFrame = (PKTRAP_FRAME)(((InitialStack) -
+ sizeof(KTRAP_FRAME)) & 0xfffffff0);
+ ExFrame = (PKEXCEPTION_FRAME)(((ULONG)TrFrame -
+ sizeof(KEXCEPTION_FRAME)) & 0xfffffff0);
+ CxFrame = (PKEXCEPTION_FRAME)(((ULONG)ExFrame -
+ sizeof(KEXCEPTION_FRAME)) & 0xfffffff0);
+
+ //
+ // Zero the exception and trap frames and copy information from the
+ // specified context frame to the trap and exception frames.
+ //
+
+ RtlZeroMemory((PVOID)ExFrame, sizeof(KEXCEPTION_FRAME));
+ RtlZeroMemory((PVOID)TrFrame, sizeof(KTRAP_FRAME));
+ KeContextToKframes(TrFrame, ExFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags | CONTEXT_CONTROL,
+ UserMode);
+
+ //
+ // If the FPCR quadword in the specified context record is zero,
+ // then assume it is a default value and force floating point round
+ // to nearest mode (the hardware default mode is chopped rounding
+ // which is not desirable for NT). It would be nice to initialize
+ // the SoftFpcr here also but not all threads have a Teb.
+ //
+
+ if (TrFrame->Fpcr == 0) {
+ ((PFPCR)(&TrFrame->Fpcr))->DynamicRoundingMode = ROUND_TO_NEAREST;
+ }
+
+ //
+ // Set the saved previous processor mode in the trap frame and the
+ // previous processor mode in the thread object to user mode.
+ //
+
+ TrFrame->PreviousMode = UserMode;
+ Thread->PreviousMode = UserMode;
+
+ //
+ // Initialize the return address in the exception frame.
+ //
+
+ ExFrame->IntRa = 0;
+
+ } else {
+ ExFrame = NULL;
+ CxFrame = (PKEXCEPTION_FRAME)(((InitialStack) -
+ sizeof(KEXCEPTION_FRAME)) & 0xfffffff0);
+
+ //
+ // Set the previous mode in thread object to kernel.
+ //
+
+ Thread->PreviousMode = KernelMode;
+ }
+
+ //
+ // Initialize context switch frame and set thread start up parameters.
+ //
+ // N.B. ULONG becomes canonical longword with (ULONGLONG)(LONG) cast.
+ //
+
+ CxFrame->SwapReturn = (ULONGLONG)(LONG)KiThreadStartup;
+ if (ExFrame == NULL) {
+ CxFrame->IntFp = (ULONGLONG)(LONG)ExFrame;
+
+ } else {
+ CxFrame->IntFp = (ULONGLONG)(LONG)TrFrame;
+ }
+
+ CxFrame->IntS0 = (ULONGLONG)(LONG)ContextRecord;
+ CxFrame->IntS1 = (ULONGLONG)(LONG)StartContext;
+ CxFrame->IntS2 = (ULONGLONG)(LONG)StartRoutine;
+ CxFrame->IntS3 = (ULONGLONG)(LONG)SystemRoutine;
+
+ CxFrame->Psr = 0; // clear everything
+ ((PSR *)(&CxFrame->Psr))->INTERRUPT_ENABLE = 1;
+ ((PSR *)(&CxFrame->Psr))->IRQL = DISPATCH_LEVEL;
+ ((PSR *)(&CxFrame->Psr))->MODE = 0;
+
+ //
+ // Set the initial kernel stack pointer.
+ //
+
+ Thread->KernelStack = (PVOID)(ULONGLONG)(LONG)CxFrame;
+ return;
+}
+
+BOOLEAN
+KeSetAutoAlignmentProcess (
+ IN PKPROCESS Process,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ process and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the process. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Process->AutoAlignment;
+ Process->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
+
+BOOLEAN
+KeSetAutoAlignmentThread (
+ IN PKTHREAD Thread,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ thread and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the thread. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Thread->AutoAlignment;
+ Thread->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
diff --git a/private/ntos/ke/alpha/timindex.s b/private/ntos/ke/alpha/timindex.s
new file mode 100644
index 000000000..ca3c5fece
--- /dev/null
+++ b/private/ntos/ke/alpha/timindex.s
@@ -0,0 +1,111 @@
+// TITLE("Compute Timer Table Index")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// timindex.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the timer table
+// index for a timer.
+//
+// Author:
+//
+// David N. Cutler (davec) 17-May-1993
+// Joe Notarangelo 20-Jul-1993 (Alpha AXP version)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+ SBTTL("Compute Timer Table Index")
+//++
+//
+// ULONG
+// KiComputeTimerTableIndex (
+// IN LARGE_INTEGER Interval,
+// IN LARGE_INTEGER CurrentTime,
+// IN PKTIMER Timer
+// )
+//
+// Routine Description:
+//
+// This function computes the timer table index for the specified timer
+// object and stores the due time in the timer object.
+//
+// N.B. The interval parameter is guaranteed to be negative since it is
+// expressed as relative time.
+//
+// The formula for due time calculation is:
+//
+// Due Time = Current Time - Interval
+//
+// The formula for the index calculation is:
+//
+// Index = (Due Time / Maximum Time) & (Table Size - 1)
+//
+// The index division is performed using reciprocal multiplication.
+//
+// Arguments:
+//
+// Interval (a0) - Supplies the relative time at which the timer is
+// to expire.
+//
+// CurrentTime (a1) - Supplies the current interrupt time.
+//
+// Timer (a2) - Supplies a pointer to a dispatch object of type timer.
+//
+// Return Value:
+//
+// The time table index is returned as the function value and the due
+// time is stored in the timer object.
+//
+//--
+
+ LEAF_ENTRY(KiComputeTimerTableIndex)
+
+//
+// Compute the due time and store in the timer object.
+//
+
+ subq a1, a0, t0 // compute due time
+ stq t0, TiDueTime(a2) // set due time of timer object
+
+//
+// Capture global values for magic divide, the reciprocal multiply value
+// and the shift count.
+//
+
+ lda t2, KiTimeIncrementReciprocal // get address of reciprocal
+ ldq t1, 0(t2) // get timer reciprocal for magic divide
+ lda t2, KiTimeIncrementShiftCount // get address of shift count
+ ldq_u t10, 0(t2) // read surrounding quadword
+ extbl t10, t2, t10 // extract shift count for magic divide
+
+//
+// Do the reciprocal multiply and capture the upper 64 bits of the
+// 128 bit product with umulh instruction.
+//
+
+ umulh t0, t1, t11 // t11 = upper 64 bits of product
+
+//
+// Right shift the result by the specified shift count and mask off extra
+// bits.
+//
+
+ srl t11, t10, t0 // t0 = division result
+ ldil t3, TIMER_TABLE_SIZE - 1 // get mask value
+ and t0, t3, v0 // v0 = mask result
+ ret zero, (ra) // return
+
+ .end KiComputeTimerTableIndex
diff --git a/private/ntos/ke/alpha/trap.s b/private/ntos/ke/alpha/trap.s
new file mode 100644
index 000000000..d63baddb7
--- /dev/null
+++ b/private/ntos/ke/alpha/trap.s
@@ -0,0 +1,1423 @@
+// TITLE( "Kernel Trap Handler" )
+//++
+// Copyright (c) 1990 Microsoft Corporation
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// trap.s
+//
+//
+// Abstract:
+//
+// Implements trap routines for ALPHA, these are the
+// entry points that the palcode calls for exception
+// processing.
+//
+//
+// Author:
+//
+// David N. Cutler (davec) 4-Apr-1990
+// Joe Notarangelo 06-Feb-1992
+//
+//
+// Environment:
+//
+// Kernel mode only.
+//
+//
+// Revision History:
+//
+// Nigel Haslock 05-May-1995 preserve fpcr across system calls
+//
+//--
+
+
+#include "ksalpha.h"
+
+//
+// Define exception handler frame
+//
+
+ .struct 0
+HdRa: .space 8 // return address
+ .space 3*8 // round to cache block
+HandlerFrameLength:
+
+
+ SBTTL( "General Exception Dispatch" )
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+// N.B. The volatile registers must be saved in this prologue because
+// the compiler will occasionally generate code that uses volatile
+// registers to save the contents of nonvolatile registers when
+// a function only calls another function with a known register
+// signature (such as _OtsDivide)
+//
+//--
+
+ NESTED_ENTRY( KiGeneralExceptionDispatch, TrapFrameLength, zero )
+
+ .set noreorder
+ stq sp, TrIntSp(sp) // save stack pointer
+ stq ra, TrIntRa(sp) // save return address
+ stq ra, TrFir(sp) // save return address
+ stq fp, TrIntFp(sp) // save frame pointer
+ stq gp, TrIntGp(sp) // save global pointer
+ bis sp, sp, fp // set frame pointer
+ .set reorder
+
+ stq v0, TrIntV0(sp) // save integer register v0
+ stq t0, TrIntT0(sp) // save integer registers t0 - t7
+ stq t1, TrIntT1(sp) //
+ stq t2, TrIntT2(sp) //
+ stq t3, TrIntT3(sp) //
+ stq t4, TrIntT4(sp) //
+ stq t5, TrIntT5(sp) //
+ stq t6, TrIntT6(sp) //
+ stq t7, TrIntT7(sp) //
+ stq a4, TrIntA4(sp) // save integer registers a4 - a5
+ stq a5, TrIntA5(sp) //
+ stq t8, TrIntT8(sp) // save integer registers t8 - t12
+ stq t9, TrIntT9(sp) //
+ stq t10, TrIntT10(sp) //
+ stq t11, TrIntT11(sp) //
+ stq t12, TrIntT12(sp) //
+
+ .set noat
+ stq AT, TrIntAt(sp) // save integer register AT
+ .set at
+
+ PROLOGUE_END
+
+
+
+//++
+//
+// Routine Description:
+//
+// PALcode dispatches to this kernel entry point when a "general"
+// exception occurs. These general exceptions are any exception
+// other than an interrupt, system service call or memory management
+// fault. The types of exceptions that will dispatch through this
+// routine will be: breakpoints, unaligned accesses, machine check
+// errors, illegal instruction exceptions, and arithmetic exceptions.
+// The purpose of this routine is to save the volatile state and
+// enter the common exception dispatch code.
+//
+// Arguments:
+//
+// fp - Supplies pointer to the trap frame.
+// sp - Supplies pointer to the exception frame.
+// a0 = pointer to exception record
+// a3 = previous psr
+//
+// Note: control registers, ra, sp, fp, gp have already been saved
+// argument registers a0-a3 have been saved as well
+//
+//--
+
+ ALTERNATE_ENTRY( KiGeneralException )
+
+ bsr ra, KiGenerateTrapFrame // store volatile state
+ br ra, KiExceptionDispatch // handle the exception
+
+ .end KiGeneralExceptionDispatch
+
+
+ SBTTL( "Exception Dispatch" )
+//++
+//
+// Routine Description:
+//
+// This routine begins the common code for raising an exception.
+// The routine saves the non-volatile state and dispatches to the
+// next level exception dispatcher.
+//
+// Arguments:
+//
+// fp - points to trap frame
+// sp - points to exception frame
+// a0 = pointer to exception record
+// a3 = psr
+//
+// gp, ra - saved in trap frame
+// a0-a3 - saved in trap frame
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiExceptionDispatch, ExceptionFrameLength, zero )
+
+//
+// Build exception frame
+//
+
+ lda sp, -ExceptionFrameLength(sp)
+ stq ra, ExIntRa(sp) // save ra
+ stq s0, ExIntS0(sp) // save integer registers s0 - s5
+ stq s1, ExIntS1(sp) //
+ stq s2, ExIntS2(sp) //
+ stq s3, ExIntS3(sp) //
+ stq s4, ExIntS4(sp) //
+ stq s5, ExIntS5(sp) //
+ stt f2, ExFltF2(sp) // save floating registers f2 - f9
+ stt f3, ExFltF3(sp) //
+ stt f4, ExFltF4(sp) //
+ stt f5, ExFltF5(sp) //
+ stt f6, ExFltF6(sp) //
+ stt f7, ExFltF7(sp) //
+ stt f8, ExFltF8(sp) //
+ stt f9, ExFltF9(sp) //
+
+ PROLOGUE_END
+
+
+ ldil a4, TRUE // a4 = set first chance to true
+ and a3, PSR_MODE_MASK, a3 // a3 = previous mode
+ bis fp, zero, a2 // a2 = pointer to trap frame
+ bis sp, zero, a1 // a1 = pointer to exception frame
+ bsr ra, KiDispatchException // handle exception
+
+
+ SBTTL( "Exception Exit" )
+//++
+//
+// Routine Description:
+//
+// This routine is called to exit from an exception.
+//
+// N.B. This transfer of control occurs from:
+//
+// 1. fall-through from above
+// 2. exit from continue system service
+// 3. exit from raise exception system service
+// 4. exit into user mode from thread startup
+//
+// Arguments:
+//
+// fp - pointer to trap frame
+// sp - pointer to exception frame
+//
+// Return Value:
+//
+// Does not return.
+//
+//--
+
+
+ ALTERNATE_ENTRY(KiExceptionExit)
+
+ ldq s0, ExIntS0(sp) // restore integer registers s0 - s5
+ ldq s1, ExIntS1(sp) //
+ ldq s2, ExIntS2(sp) //
+ ldq s3, ExIntS3(sp) //
+ ldq s4, ExIntS4(sp) //
+ ldq s5, ExIntS5(sp) //
+
+ ldl a0, TrPsr(fp) // get previous psr
+
+ bsr ra, KiRestoreNonVolatileFloatState // restore nv float state
+
+ ALTERNATE_ENTRY(KiAlternateExit)
+
+ //
+ // on entry:
+ // a0 = previous psr
+ //
+
+ //
+ // rti will do the following for us:
+ //
+ // set sfw interrupt requests as per a1
+ // restore previous irql and mode from previous psr
+ // restore registers, a0-a3, fp, sp, ra, gp
+ // return to saved exception address in the trap frame
+ //
+ // here, we need to restore the trap frame and determine
+ // if we must request an APC interrupt
+ //
+
+ bis zero, zero, a1 // a1 = 0, no sfw interrupt requests
+ blbc a0, 30f // if kernel skip apc check
+
+ //
+ // should an apc interrupt be generated?
+ //
+
+ GET_CURRENT_THREAD // v0 = current thread addr
+ ldq_u t1, ThApcState+AsUserApcPending(v0) // get user APC pending
+ extbl t1, (ThApcState+AsUserApcPending) % 8, t0 //
+ ZeroByte( ThAlerted(v0) ) // clear kernel mode alerted
+ cmovne t0, APC_INTERRUPT, a1 // if pending set APC interrupt
+
+30:
+
+ bsr ra, KiRestoreTrapFrame // restore volatile state
+
+
+ // a0 = previous psr
+ // a1 = sfw interrupt requests
+ RETURN_FROM_TRAP_OR_INTERRUPT // return from trap
+
+ .end KiExceptionDispatch
+
+
+ SBTTL( "Memory Management Exception Dispatch" )
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+// N.B. The volatile registers must be saved in this prologue because
+// the compiler will occasionally generate code that uses volatile
+// registers to save the contents of nonvolatile registers when
+// a function only calls another function with a known register
+// signature (such as _OtsMove)
+//--
+
+ NESTED_ENTRY( KiMemoryManagementDispatch, TrapFrameLength, zero )
+
+ .set noreorder
+ stq sp, TrIntSp(sp) // save stack pointer
+ stq ra, TrIntRa(sp) // save return address
+ stq ra, TrFir(sp) // save return address
+ stq fp, TrIntFp(sp) // save frame pointer
+ stq gp, TrIntGp(sp) // save global pointer
+ bis sp, sp, fp // set frame pointer
+ .set reorder
+ stq v0, TrIntV0(sp) // save integer register v0
+ stq t0, TrIntT0(sp) // save integer registers t0 - t7
+ stq t1, TrIntT1(sp) //
+ stq t2, TrIntT2(sp) //
+ stq t3, TrIntT3(sp) //
+ stq t4, TrIntT4(sp) //
+ stq t5, TrIntT5(sp) //
+ stq t6, TrIntT6(sp) //
+ stq t7, TrIntT7(sp) //
+ stq a4, TrIntA4(sp) // save integer registers a4 - a5
+ stq a5, TrIntA5(sp) //
+ stq t8, TrIntT8(sp) // save integer registers t8 - t12
+ stq t9, TrIntT9(sp) //
+ stq t10, TrIntT10(sp) //
+ stq t11, TrIntT11(sp) //
+ stq t12, TrIntT12(sp) //
+
+ .set noat
+ stq AT, TrIntAt(sp) // save integer register AT
+ .set at
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// This routine is called from the PALcode when a translation not valid
+// fault or an access violation is encountered. This routine will
+// MmAccessFault to attempt to resolve the fault. If the fault
+// cannot be resolved then the routine will dispatch to the exception
+// dispatcher so the exception can be raised.
+//
+// Arguments:
+//
+// fp - points to trap frame
+// sp - points to trap frame
+// a0 = store indicator, 1 = store, 0 = load
+// a1 = bad va
+// a2 = previous mode
+// a3 = previous psr
+//
+// gp, ra - saved in trap frame
+// a0-a3 - saved in trap frame
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY( KiMemoryManagementException )
+
+ bsr ra, KiGenerateTrapFrame // store volatile state
+
+ //
+ // save parameters in exception record
+ //
+
+ stl a0, TrExceptionRecord + ErExceptionInformation(fp)
+ stl a1, TrExceptionRecord + ErExceptionInformation+4(fp)
+
+ //
+ // save previous psr in case needed after call
+ //
+
+ stl a3, TrExceptionRecord + ErExceptionCode(fp)
+
+ //
+ // call memory managment to handle the access fault
+ //
+
+ bsr ra, MmAccessFault // memory management fault handler
+
+ //
+ // Check if working set watch is enabled.
+ //
+ ldl t0, PsWatchEnabled // get working set watch enable flag
+ bis v0, zero, a0 // get status of fault resolution
+ blt v0, 40f // if fault status ltz, unsuccessful
+ beq t0, 35f // if eq. zero, watch not enabled
+ ldl a1, TrExceptionRecord + ErExceptionAddress(fp) // get exception address
+ ldl a2, TrExceptionRecord + ErExceptionInformation + 4(fp) // set bad address
+ bsr ra, PsWatchWorkingSet // record working set information.
+
+35:
+ //
+ // check if debugger has any
+ // breakpoints that should be inserted
+ //
+ ldl t0, KdpOweBreakpoint // get owned breakpoint flag
+ zap t0, 0xfe, t1 // mask off high bytes
+ beq t1, 37f
+
+ bsr ra, KdSetOwedBreakpoints
+37:
+ //
+ // if success then mem mgmt handled the exception, otherwise
+ // fill in remainder of the exception record and attempt
+ // to dispatch the exception
+ //
+
+ ldl a0, TrPsr(fp) // get previous psr
+ br zero, KiAlternateExit // exception handled
+
+ //
+ // failure returned from MmAccessFault
+ //
+ // status = STATUS_IN_PAGE_ERROR | 0x10000000
+ // is a special status that indicates a page fault at Irql > APC
+ // the following statuses can be forwarded:
+ // STATUS_ACCESS_VIOLATION
+ // STATUS_GUARD_PAGE_VIOLATION
+ // STATUS_STACK_OVERFLOW
+ // all other status will be set to:
+ // STATUS_IN_PAGE_ERROR
+ //
+ // dispatch exception via common code in KiDispatchException
+ // Following must be done:
+ // allocate exception frame via sp
+ // complete data in ExceptionRecord
+ // a0 points to ExceptionRecord
+ // a1 points to ExceptionFrame
+ // a2 points to TrapFrame
+ // a3 = previous psr
+ //
+ // Exception record information has the following values
+ // offset value
+ // 0 read vs write indicator (set on entry)
+ // 4 bad virtual address (set on entry)
+ // 8 real status (only if status was not "recognized")
+ //
+
+40:
+ //
+ // Check for special status that indicates a page fault at
+ // Irql above APC_LEVEL.
+ //
+
+ ldil t1, STATUS_IN_PAGE_ERROR | 0x10000000 // get special status
+ cmpeq v0, t1, t2 // status = special?
+ bne t2, 60f // if ne[true], handle it
+
+ //
+ // Check for expected return statuses.
+ //
+
+ addq fp, TrExceptionRecord, a0 // get exception record addr
+ bis zero, 2, t0 // number of exception params
+ ldil t1, STATUS_ACCESS_VIOLATION // get access violation code
+ cmpeq v0, t1, t2 // status was access violation?
+ bne t2, 50f // if ne [true], dispatch
+ ldil t1, STATUS_GUARD_PAGE_VIOLATION // get guard page vio. code
+ cmpeq v0, t1, t2 // status was guard page vio.?
+ bne t2, 50f // if ne [true], dispatch
+ ldil t1, STATUS_STACK_OVERFLOW // get stack overflow code
+ cmpeq v0, t1, t2 // status was stack overflow?
+ bne t2, 50f // if ne [true], dispatch
+
+ //
+ // Status is not recognized, save real status, bump the number
+ // of exception parameters, and set status to STATUS_IN_PAGE_ERROR
+ //
+
+ stl v0, ErExceptionInformation+8(a0) // save real status code
+ bis zero, 3, t0 // set number of params
+ ldil v0, STATUS_IN_PAGE_ERROR // set status to in page error
+
+50:
+ ldl a3, ErExceptionCode(a0) // restore previous psr
+ stl v0, ErExceptionCode(a0) // save exception status code
+ stl zero, ErExceptionFlags(a0) // zero flags
+ stl zero, ErExceptionRecord(a0) // zero record pointer
+ stl t0, ErNumberParameters(a0) // save in exception record
+
+ br ra, KiExceptionDispatch // does not return
+
+ //
+ // Handle the special case status returned from MmAccessFault,
+ // we have taken a page fault at Irql > APC_LEVEL.
+ // Call KeBugCheckEx with the following parameters:
+ // a0 = bugcheck code = IRQL_NOT_LESS_OR_EQUAL
+ // a1 = bad virtual address
+ // a2 = current Irql
+ // a3 = load/store indicator
+ // a4 = exception pc
+ //
+60:
+
+ ldil a0, IRQL_NOT_LESS_OR_EQUAL // set bugcheck code
+ ldl a1, TrExceptionRecord + ErExceptionInformation+4(fp) // bad va
+ ldl a2, TrExceptionRecord + ErExceptionCode(fp) // read psr
+ srl a2, PSR_IRQL, a2 // extract Irql
+ ldl a3, TrExceptionRecord + ErExceptionInformation(fp) // ld vs st
+ ldq a4, TrFir(fp) // read exception pc
+
+ br ra, KeBugCheckEx // handle bugcheck
+
+ .end KiMemoryManagementDispatch
+
+
+
+ SBTTL( "Primary Interrupt Dispatch" )
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+// N.B. The volatile registers must be saved in this prologue because
+// the compiler will occasionally generate code that uses volatile
+// registers to save the contents of nonvolatile registers when
+// a function only calls another function with a known register
+// signature (such as _OtsMove)
+//
+//--
+
+ EXCEPTION_HANDLER(KiInterruptHandler)
+
+ NESTED_ENTRY(KiInterruptDistribution, TrapFrameLength, zero);
+
+ .set noreorder
+ stq sp,TrIntSp(sp) // save stack pointer
+ stq ra,TrIntRa(sp) // save return address
+ stq ra,TrFir(sp) // save return address
+ stq fp,TrIntFp(sp) // save frame pointer
+ stq gp,TrIntGp(sp) // save general pointer
+ bis sp, sp, fp // set frame pointer
+ .set reorder
+ stq v0, TrIntV0(sp) // save integer register v0
+ stq t0, TrIntT0(sp) // save integer registers t0 - t7
+ stq t1, TrIntT1(sp) //
+ stq t2, TrIntT2(sp) //
+ stq t3, TrIntT3(sp) //
+ stq t4, TrIntT4(sp) //
+ stq t5, TrIntT5(sp) //
+ stq t6, TrIntT6(sp) //
+ stq t7, TrIntT7(sp) //
+ stq a4, TrIntA4(sp) // save integer registers a4 - a5
+ stq a5, TrIntA5(sp) //
+ stq t8, TrIntT8(sp) // save integer registers t8 - t12
+ stq t9, TrIntT9(sp) //
+ stq t10, TrIntT10(sp) //
+ stq t11, TrIntT11(sp) //
+ stq t12, TrIntT12(sp) //
+
+ .set noat
+ stq AT, TrIntAt(sp) // save integer register AT
+ .set at
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// The PALcode dispatches to this routine when an enabled interrupt
+// is asserted.
+//
+// When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to determine the highest priority
+// pending interrupt, raise the IRQL to the level of the highest interrupt,
+// and then dispatch the interrupt to the proper service routine.
+//
+//
+// Arguments:
+//
+// a0 - interrupt vector
+// a1 - pcr base pointer
+// a3 - previous psr
+// gp - Supplies a pointer to the system short data area.
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiInterruptException)
+
+ bsr ra, KiSaveVolatileIntegerState // save integer registers
+10:
+
+//
+// Count the number of interrupts
+//
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = PRCB
+ ldl t0, PbInterruptCount(v0) // get current count of interrupts
+ addl t0, 1, t1 // increment count
+ stl t1, PbInterruptCount(v0) // save new interrupt count
+
+//
+// If interrupt vector > DISPATCH_LEVEL, indicate interrupt active in PRCB
+//
+ cmpule a0, DISPATCH_LEVEL, t4 // compare vector to DISPATCH_LEVEL
+ bne t4, 12f // if ne, <= DISPATCH_LEVEL
+ ldl t2, PbInterruptActive(v0) // get current interrupt active
+ addl t2, 1, t3 // increment
+ stl t3, PbInterruptActive(v0) // store new interrupt active
+
+12:
+
+ s4addl a0, a1, a0 // convert index to offset + PCR base
+ ldl a0, PcInterruptRoutine(a0) // get service routine address
+ jsr ra, (a0) // call interrupt service routine
+
+//
+// Restore state and exit interrupt.
+//
+
+ ldl a0, TrPsr(fp) // get previous processor status
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = PRCB
+ ldl t0, PbInterruptActive(v0) // get current interrupt active
+ beq t0, 50f // if eq, original vector <= DISPATCH_LEVEL
+ subl t0, 1, t1 // decrement
+ stl t1, PbInterruptActive(v0)
+ bne t1, 50f // if an interrupt is still active,
+ // skip the SW interrupt check
+//
+// If a dispatch interrupt is pending, lower IRQL to DISPATCH_LEVEL, and
+// directly call the dispatch interrupt handler.
+//
+ ldl t2, PbSoftwareInterrupts(v0) // get pending SW interrupts
+ beq t2, 50f // skip if no pending SW interrupts
+ stl zero, PbSoftwareInterrupts(v0) // clear pending SW interrupts
+ and a0, PSR_IRQL_MASK, a1 // extract IRQL from PSR
+ cmpult a1, DISPATCH_LEVEL << PSR_IRQL, t3 // check return IRQL
+ beq t3, 70f // if not lt DISPATCH_LEVEL, can't bypass
+//
+// Update count of bypassed dispatch interrupts
+//
+ ldl t4, PbDpcBypassCount(v0) // get old bypass count
+ addl t4, 1, t5 // increment
+ stl t5, PbDpcBypassCount(v0) // store new bypass count
+
+ ldil a0, DISPATCH_LEVEL
+ SWAP_IRQL // lower IRQL to DISPATCH_LEVEL
+ bsr ra, KiDispatchInterrupt // directly dispatch interrupt
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // v0 = PRCB
+45:
+ ldl a0, TrPsr(fp) // restore a0.
+50:
+//
+// Check if an APC interrupt should be generated.
+//
+
+ bis zero, zero, a1 // clear sfw interrupt request
+ blbc a0, 60f // if kernel no apc
+
+ GET_CURRENT_THREAD // v0 = current thread address
+
+ ldq_u t1, ThApcState+AsUserApcPending(v0) // get user APC pending
+ extbl t1, (ThApcState+AsUserApcPending) % 8, t0 //
+ ZeroByte( ThAlerted(v0) ) // clear kernel mode alerted
+
+ cmovne t0, APC_INTERRUPT, a1 // if pending set APC interrupt
+
+
+60:
+ bsr ra, KiRestoreVolatileIntegerState // restore volatile state
+
+ // a0 = previous mode
+ // a1 = sfw interrupt requests
+ RETURN_FROM_TRAP_OR_INTERRUPT // return from trap/interrupt
+
+70:
+//
+// Previous IRQL is >= DISPATCH_LEVEL, so a pending software interrupt cannot
+// be short-circuited. Request a software interrupt from the PAL.
+//
+ ldil a0, DISPATCH_LEVEL
+ REQUEST_SOFTWARE_INTERRUPT // request interrupt from PAL
+ br zero, 45b // rejoin common code
+ .end KiInterruptDistribution
+
+
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiInterruptHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+//
+// Routine Description:
+//
+// Control reaches here when an exception is not handled by an interrupt
+// service routine or an unwind is initiated in an interrupt service
+// routine that would result in an unwind through the interrupt dispatcher.
+// This is considered to be a fatal system error and bug check is called.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved fp register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ NESTED_ENTRY(KiInterruptHandler, HandlerFrameLength, zero)
+
+ lda sp, -HandlerFrameLength(sp) // allocate stack frame
+ stq ra, HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ ldl t0, ErExceptionFlags(a0) // get exception flags
+ ldil a0, INTERRUPT_UNWIND_ATTEMPTED // assume unwind in progress
+ and t0, EXCEPTION_UNWIND, t1 // check if unwind in progress
+ bne t1, 10f // if ne, unwind in progress
+ ldil a0, INTERRUPT_EXCEPTION_NOT_HANDLED // set bug check code
+10: bsr ra, KeBugCheck // call bug check routine
+
+
+ .end KiInterruptHandler
+
+
+ SBTTL( "System Service Dispatch" )
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+ .struct 0
+ScThread: .space 4 // thread address
+ .space 3 * 4 // pad to octaword
+SyscallFrameLength:
+
+ EXCEPTION_HANDLER(KiSystemServiceHandler)
+
+ NESTED_ENTRY(KiSystemServiceDispatch, TrapFrameLength, zero);
+
+ .set noreorder
+ stq sp, TrIntSp - TrapFrameLength(sp) // save stack pointer
+ lda sp, -TrapFrameLength(sp) // allocate stack frame
+ stq ra,TrIntRa(sp) // save return address
+ stq ra,TrFir(sp) // save return address
+ stq fp,TrIntFp(sp) // save frame pointer
+ stq gp,TrIntGp(sp) // save general pointer
+ bis sp, sp, fp // set frame pointer
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when we have a system call call pal executed.
+// When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to call the specified system service.
+//
+//
+// Arguments:
+//
+// v0 - Supplies the system service code.
+// t0 - Previous processor mode
+// t1 - Current thread address
+// gp - Supplies a pointer to the system short data area.
+// fp - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+//
+// register usage
+// t0 - system service number, address in argument table
+// t1 - service limit number, argument table address, previous sp
+// t2 - previous mode, user probe address
+// t3 - address system service
+// t4 - system service table address, in mem argument flag, temp
+// t5 = address of routine to jump to
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceException)
+
+ START_REGION(KiSystemServiceDispatchStart)
+
+ mf_fpcr f0
+ stt f0, TrFpcr(fp) // save fp control register
+
+ lda sp, -SyscallFrameLength(sp) // allocate local frame
+ stl t1, ScThread(sp) // save thread value
+
+//
+// If the system service code is negative, then the service is a fast path
+// event pair client/server service. This service is only executed from
+// user mode and its performance must be as fast as possible. Therefore,
+// the path to execute this service has been specialized for performance.
+//
+
+ bge v0, StandardService // if service number ge then standard
+
+ ldl a0, EtEventPair(t1) // get address of event pair object
+ and v0, 1, t10 // test if set low or set high
+ addl a0, EpEventHigh, a1 // assume set low wait high service
+ addl a0, EpEventLow, a0 //
+ cmovne t10, a0, t2 // if ne, set high wait low service
+ cmovne t10, a1, a0 // swap arguments
+ cmovne t10, t2, a1 //
+
+ beq a0, 20f // if eq, no event pair associated
+
+ bis zero, 1, a2 // previous mode = user
+ jsr ra, KiSetServerWaitClientEvent // call the kernel service
+
+10:
+ ldt f0, TrFpcr(fp)
+ mt_fpcr f0 // restore fp control register
+
+ ldl a0, TrPsr(fp) // get previous processor status
+ ldl t5, ScThread(sp) // get current thread address
+//
+// Check if an APC interrupt should be generated.
+//
+
+ bis zero, zero, a1 // clear sfw interrupt request
+
+ ldq_u t1, ThApcState+AsUserApcPending(t5) // get user APC pending
+ extbl t1, (ThApcState+AsUserApcPending) % 8, t0 //
+ ZeroByte( ThAlerted(t5) ) // clear kernel mode alerted
+
+ cmovne t0, APC_INTERRUPT, a1 // if pending set APC interrupt
+
+
+ // a0 = previous psr
+ // a1 = sfw interrupt requests
+ RETURN_FROM_SYSTEM_CALL // return to caller
+
+//
+// No event pair is associated with the thread, set the status and
+// return back.
+//
+
+20:
+ ldil v0, STATUS_NO_EVENT_PAIR // set service status
+ br zero, 10b // return from the service
+
+
+//
+// A standard system service has been executed.
+//
+// v0 = service number
+// t0 = previous mode
+// t1 = current thread address
+//
+
+StandardService:
+
+ ldq_u t4, ThPreviousMode(t1) // get old previous thread mode
+ ldl t5, ThTrapFrame(t1) // get current trap frame address
+ extbl t4, ThPreviousMode % 8, t3
+ stl t3, TrPreviousMode(fp) // save old previous mode of thread
+ StoreByte( t0, ThPreviousMode(t1) ) // set new previous mode in thread
+ stl t5, TrTrapFrame(fp) // save current trap frame address
+
+//
+// If the specified system service number is not within range, then
+// attempt to convert the thread to a GUI thread and retry the service
+// dispatch.
+//
+// N.B. The argument registers a0-a3, the system service number in v0,
+// and the thread address in t1 must be preserved while attempting
+// to convert the thread to a GUI thread.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceRepeat)
+
+ stl fp, ThTrapFrame(t1) // save address of trap frame
+ ldl t10, ThServiceTable(t1) // get service descriptor table address
+ srl v0, SERVICE_TABLE_SHIFT, t2 // isolate service descriptor offset
+ and t2, SERVICE_TABLE_MASK, t2 //
+ addl t2, t10, t10 // compute service descriptor address
+ ldl t3, SdLimit(t10) // get service number limit
+ and v0, SERVICE_NUMBER_MASK, t7 // isolate service table offset
+
+ cmpult t7, t3, t4 // check if valid service number
+ beq t4, 80f // if eq[false] not valid
+
+ ldl t4, SdBase(t10) // get service table address
+
+ s4addl t7, t4, t3 // compute address in service table
+ ldl t5, 0(t3) // get address of service routine
+
+#if DBG
+ ldl t6, SdCount(t10) // get service count table address
+ beq t6, 5f // if eq, table not defined
+ s4addl t7, t6, t6 // compute system service offset value
+ ldl t11, 0(t6) // increment system service count
+ addl t11, 1, t11
+ stl t11, 0(t6) // store result
+5:
+#endif
+
+//
+// If the system service is a GUI service and the GDI user batch queue is
+// not empty, then call the appropriate service to flush the user batch.
+//
+
+ cmpeq t2, SERVICE_TABLE_TEST, t2 // check if GUI system service
+ beq t2, 15f // if eq, not GUI system service
+ ldl t3, ThTeb(t1) // get current thread TEB address
+ stq t5, TrIntT5(fp) // save service routine address
+ ldl t4, TeGdiBatchCount(t3) // get number of batched GDI calls
+ beq t4, 15f // if eq, no batched calls
+ ldl t5, KeGdiFlushUserBatch // get address of flush routine
+ stq a0, TrIntA0(fp) // save possible arguments
+ stq a1, TrIntA1(fp) //
+ stq a2, TrIntA2(fp) //
+ stq a3, TrIntA3(fp) //
+ stq a4, TrIntA4(fp) //
+ stq a5, TrIntA5(fp) //
+ stq t10, TrIntT10(fp) // save service descriptor address
+ stq t7, TrIntT7(fp) // save service table offset
+ jsr ra, (t5) // flush GDI user batch
+ ldq t5, TrIntT5(fp) // restore service routine address
+ ldq a0, TrIntA0(fp) // restore possible arguments
+ ldq a1, TrIntA1(fp) //
+ ldq a2, TrIntA2(fp) //
+ ldq a3, TrIntA3(fp) //
+ ldq a4, TrIntA4(fp) //
+ ldq a5, TrIntA5(fp) //
+ ldq t10, TrIntT10(fp) // restore service descriptor address
+ ldq t7, TrIntT7(fp) // restore service table offset
+
+15:
+ blbc t5, 30f // if clear no in-memory arguments
+
+ ldl t10, SdNumber(t10) // get argument table address
+ addl t7, t10, t11 // compute address in argument table
+
+//
+// The following code captures arguments that were passed in memory on the
+// callers stack. This is necessary to ensure that the caller does not modify
+// the arguments after they have been probed and is also necessary in kernel
+// mode because a trap frame has been allocated on the stack.
+//
+// If the previous mode is user, then the user stack is probed for readability.
+//
+
+ ldl t10, TrIntSp(fp) // get previous stack pointer
+ beq t0, 10f // if eq, previous mode was kernel
+
+ ldil t2, MM_USER_PROBE_ADDRESS
+ cmpult t10, t2, t4 // check if stack in user region
+ cmoveq t4, t2, t10 // set invalid user stack address
+ // if stack not lt MM_USER_PROBE
+
+10: ldq_u t4, 0(t11)
+ extbl t4, t11, t9 // get number of memory arguments * 8
+
+ addl t9, 0x1f, t3 // round up to hexaword (32 bytes)
+ bic t3, 0x1f, t3 // insure hexaword alignment
+
+ subl sp, t3, sp // allocate space on kernel stack
+
+ bis sp, zero, t2 // set destination copy address
+ addl t2, t3, t4 // compute destination end address
+
+ START_REGION(KiSystemServiceStartAddress)
+
+ //
+ // This code is set up to load the cache block in the first
+ // instruction and then perform computations that do not require
+ // the cache while waiting for the data. In addition, the stores
+ // are setup so they will be in order.
+ //
+
+20: ldq t6, 24(t10) // get argument from previous stack
+ addl t10, 32, t10 // next hexaword on previous stack
+ addl t2, 32, t2 // next hexaword on kernel stack
+ cmpeq t2, t4, t11 // at end address?
+ stq t6, -8(t2) // store argument on kernel stack
+ ldq t7, -16(t10) // argument from previous stack
+ ldq t8, -24(t10) // argument from previous stack
+ ldq t9, -32(t10) // argument from previous stack
+ stq t7, -16(t2) // save argument on kernel stack
+ stq t8, -24(t2) // save argument on kernel stack
+ stq t9, -32(t2) // save argument on kernel stack
+ beq t11, 20b // if eq[false] get next block
+
+ END_REGION(KiSystemServiceEndAddress)
+
+ bic t5, 3, t5 // clean lower bits of service addr
+
+//
+// Call system service.
+//
+
+30: jsr ra, (t5)
+
+//
+// Exit handling for standard system service.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceExit)
+//
+// Restore old trap frame address from the current trap frame.
+//
+
+//
+// Update the number of system calls
+//
+
+ bis v0, zero, t1 // save return status
+
+ GET_PROCESSOR_CONTROL_BLOCK_BASE // get processor block address
+
+ ldl t2, -SyscallFrameLength + ScThread(fp) // get current thread address
+ ldl t3, TrTrapFrame(fp) // get old trap frame address
+ ldl t10, PbSystemCalls(v0) // increment number of calls
+ addl t10, 1, t10 //
+ stl t10, PbSystemCalls(v0) // store result
+ stl t3, ThTrapFrame(t2) // restore old trap frame address
+ bis t1, zero, v0 // restore return status
+
+ ldt f0, TrFpcr(fp)
+ mt_fpcr f0 // restore fp control register
+
+ ldl a0, TrPsr(fp) // get previous processor status
+
+ ldl t5, TrPreviousMode(fp) // get old previous mode
+
+ StoreByte( t5, ThPreviousMode(t2) ) // store previous mode in thread
+
+//
+// Check if an APC interrupt should be generated.
+//
+
+ bis zero, zero, a1 // clear sfw interrupt request
+ blbc a0, 70f // if kernel mode skip apc check
+
+ ldq_u t1, ThApcState+AsUserApcPending(t2) // get user APC pending
+ extbl t1, (ThApcState+AsUserApcPending) % 8, t0 //
+ ZeroByte( ThAlerted(t2) ) // clear kernel mode alerted
+
+ cmovne t0, APC_INTERRUPT, a1 // if pending set APC interrupt
+
+70:
+
+ // a0 = previous psr
+ // a1 = sfw interrupt requests
+ RETURN_FROM_SYSTEM_CALL // return to caller
+
+//
+// The specified system service number is not within range. Attempt to
+// convert the thread to a GUI thread if specified system service is a
+// a GUI service.
+//
+// N.B. The argument register a0-a5, the system service number in v0
+// must be preserved if an attempt is made to convert the thread to
+// a GUI thread.
+//
+
+80: cmpeq t2, SERVICE_TABLE_TEST, t2 // check if GUI system service
+ beq t2, 55f // if eq, not GUI system service
+ stq v0, TrIntV0(fp) // save system service number
+ stq a0, TrIntA0(fp) // save argument register a0
+ stq a1, TrIntA1(fp) // save argument registers a1-a5
+ stq a2, TrIntA2(fp)
+ stq a3, TrIntA3(fp)
+ stq a4, TrIntA4(fp)
+ stq a5, TrIntA5(fp)
+ bsr ra, PsConvertToGuiThread // attempt to convert to GUI thread
+ bis v0, zero, t0 // save completion status
+ addq sp, SyscallFrameLength, fp // reset trap frame address
+ GET_CURRENT_THREAD
+ bis v0, zero, t1 // get current thread address
+ ldq v0, TrIntV0(fp) // restore system service number
+ ldq a0, TrIntA0(fp) // restore argument registers a0-a5
+ ldq a1, TrIntA1(fp)
+ ldq a2, TrIntA2(fp)
+ ldq a3, TrIntA3(fp)
+ ldq a4, TrIntA4(fp)
+ ldq a5, TrIntA5(fp)
+ beq t0, KiSystemServiceRepeat // if eq, successful conversion
+
+//
+// Return invalid system service status for invalid service code.
+//
+55:
+ ldil v0, STATUS_INVALID_SYSTEM_SERVICE // completion status
+ br zero, KiSystemServiceExit //
+
+ START_REGION(KiSystemServiceDispatchEnd)
+
+ .end KiSystemServiceDispatch
+
+
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiSystemServiceHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+// )
+//
+// Routine Description:
+//
+// Control reaches here when a exception is raised in a system service
+// or the system service dispatcher, and for an unwind during a kernel
+// exception.
+//
+// If an unwind is being performed and the system service dispatcher is
+// the target of the unwind, then an exception occured while attempting
+// to copy the user's in-memory argument list. Control is transfered to
+// the system service exit by return a continue execution disposition
+// value.
+//
+// If an unwind is being performed and the previous mode is user, then
+// bug check is called to crash the system. It is not valid to unwind
+// out of a system service into user mode.
+//
+// If an unwind is being performed, the previous mode is kernel, the
+// system service dispatcher is not the target of the unwind, and the
+// thread does not own any mutexes, then the previous mode field from
+// the trap frame is restored to the thread object. Otherwise, bug
+// check is called to crash the system. It is invalid to unwind out of
+// a system service while owning a mutex.
+//
+// If an exception is being raised and the exception PC is within the
+// range of the system service dispatcher in-memory argument copy code,
+// then an unwind to the system service exit code is initiated.
+//
+// If an exception is being raised and the exception PC is not within
+// the range of the system service dispatcher, and the previous mode is
+// not user, then a continue searh disposition value is returned. Otherwise,
+// a system service has failed to handle an exception and bug check is
+// called. It is invalid for a system service not to handle all exceptions
+// that can be raised in the service.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved fp register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// If bug check is called, there is no return from this routine and the
+// system is crashed. If an exception occured while attempting to copy
+// the user in-memory argument list, then there is no return from this
+// routine, and unwind is called. Otherwise, ExceptionContinueSearch is
+// returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSystemServiceHandler)
+
+ lda sp, -HandlerFrameLength(sp) // allocate stack frame
+ stq ra, HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ ldl t0, ErExceptionFlags(a0) // get exception flags
+ and t0, EXCEPTION_UNWIND, t1 // check if unwind in progress
+ bne t1, 40f // if ne, unwind in progress
+
+//
+// An exception is in progress.
+//
+// If the exception PC is within the in-memory argument copy code of the
+// system service dispatcher, then call unwind to transfer control to the
+// system service exit code. Otherwise, check if the previous mode is user
+// or kernel mode.
+//
+//
+
+ ldl t0, ErExceptionAddress(a0) // get address of exception
+ lda t1, KiSystemServiceStartAddress // address of system service
+ cmpult t0, t1, t3 // check if before start range
+
+ lda t2, KiSystemServiceEndAddress // end address
+
+ bne t3, 10f // if ne, before start of range
+ cmpult t0, t2, t3 // check if before end of range
+ bne t3, 30f // if ne, before end of range
+
+//
+// If the previous mode was kernel mode, then a continue search disposition
+// value is returned. Otherwise, the exception was raised in a system service
+// and was not handled by that service. Call bug check to crash the system.
+//
+
+10:
+ GET_CURRENT_THREAD // v0 = current thread address
+ ldq_u t4, ThPreviousMode(v0) // get previous mode from thread
+ extbl t4, ThPreviousMode % 8, t1
+ bne t1, 20f // if ne, previous mode was user
+
+//
+// Previous mode is kernel mode.
+//
+
+ ldil v0, ExceptionContinueSearch // set disposition code
+ lda sp, HandlerFrameLength(sp) // deallocate stack frame
+ jmp zero, (ra) // return
+
+//
+// Previous mode is user mode. Call bug check to crash the system.
+//
+
+20:
+ ldil a0, SYSTEM_SERVICE_EXCEPTION // set bug check code
+ bsr ra, KeBugCheck // call bug check routine
+
+//
+// The exception was raised in the system service dispatcher. Unwind to the
+// the system service exit code.
+//
+
+30: ldl a3, ErExceptionCode(a0) // set return value
+ bis zero, zero, a2 // set exception record address
+ bis a1, zero, a0 // set target frame address
+
+ lda a1, KiSystemServiceExit // set target PC address
+ bsr ra, RtlUnwind // unwind to system service exit
+
+//
+// An unwind is in progress.
+//
+// If a target unwind is being performed, then continue execution is returned
+// to transfer control to the system service exit code. Otherwise, restore the
+// previous mode if the previous mode is not user and there are no mutexes owned
+// by the current thread.
+//
+
+40: and t0, EXCEPTION_TARGET_UNWIND, t1 // check if target unwnd in progres
+ bne t1, 60f // if ne, target unwind in progress
+
+//
+// An unwind is being performed through the system service dispatcher. If the
+// previous mode is not kernel or the current thread owns one or more mutexes,
+// then call bug check and crash the system. Otherwise, restore the previous
+// mode in the current thread object.
+//
+
+ GET_CURRENT_THREAD // v0 = current thread address
+ ldl t1, CxIntFp(a2) // get address of trap frame
+ ldq_u t4, ThPreviousMode(v0) // get previous mode from thread
+ extbl t4, ThPreviousMode % 8, t3
+ ldl t4,TrPreviousMode(t1) // get previous mode from trap frame
+ bne t3, 50f // if ne, previous mode was user
+
+//
+// Restore previous from trap frame to thread object and continue the unwind
+// operation.
+//
+
+ StoreByte( t4, ThPreviousMode(v0) ) // restore previous mode from trap frame
+
+ ldil v0, ExceptionContinueSearch // set disposition value
+ lda sp, HandlerFrameLength(sp) // deallocate stack frame
+ jmp zero, (ra) // return
+
+//
+// An attempt is being made to unwind into user mode. Call bug check to crash
+// the system.
+//
+
+50:
+ ldil a0, SYSTEM_UNWIND_PREVIOUS_USER // set bug check code
+ bsr ra, KeBugCheck // call bug check
+
+//
+// A target unwind is being performed. Return a continue search disposition
+// value.
+//
+
+60:
+ ldil v0, ExceptionContinueSearch // set disposition value
+ lda sp, HandlerFrameLength(sp) // deallocate stack frame
+ jmp zero, (ra) // return
+
+ .end KiSystemServiceHandler
+
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//--
+
+ NESTED_ENTRY( KiPanicDispatch, TrapFrameLength, zero )
+
+ .set noreorder
+ stq sp, TrIntSp(sp) // save stack pointer
+ stq ra, TrIntRa(sp) // save return address
+ stq ra, TrFir(sp) // save return address
+ stq fp, TrIntFp(sp) // save frame pointer
+ stq gp, TrIntGp(sp) // save global pointer
+ bis sp, sp, fp // set frame pointer
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// PALcode dispatches to this entry point when a panic situation
+// is detected while in PAL mode. The panic situation may be that
+// the kernel stack is about to overflow/underflow or there may be
+// a condition that was not expected to occur while in PAL mode
+// (eg. arithmetic exception while in PAL). This entry point is
+// here to help us debug the condition.
+//
+// Arguments:
+//
+// fp - points to trap frame
+// sp - points to exception frame
+// a0 = Bug check code
+// a1 = Exception address
+// a2 = Bugcheck parameter
+// a3 = Bugcheck parameter
+//
+// gp, ra - saved in trap frame
+// a0-a3 - saved in trap frame
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY( KiPanicException )
+
+ stq ra, TrIntRa(fp) // PAL is supposed to do this, but it doesn't!
+//
+// Save state, volatile float and integer state via KiGenerateTrapFrame
+//
+
+ bsr ra, KiGenerateTrapFrame // save volatile state
+
+//
+// Dispatch to KeBugCheckEx, does not return
+//
+
+ br ra, KeBugCheckEx // do the bugcheck
+
+ .end KiPanicDispatch
+
+
+//++
+//
+// VOID
+// KiBreakinBreakpoint(
+// VOID
+// );
+//
+// Routine Description:
+//
+// This routine issues a breakin breakpoint.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY( KiBreakinBreakpoint )
+
+ BREAK_BREAKIN // execute breakin breakpoint
+ ret zero, (ra) // return to caller
+
+ .end KiBreakinBreakpoint
diff --git a/private/ntos/ke/alpha/trigger.c b/private/ntos/ke/alpha/trigger.c
new file mode 100644
index 000000000..b8f6749a3
--- /dev/null
+++ b/private/ntos/ke/alpha/trigger.c
@@ -0,0 +1,580 @@
+/*++
+
+Copyright (c) 1993 Digital Equipment Corporation
+
+Module Name:
+
+ trigger.c
+
+Abstract:
+
+ This module implements functions that handle synchronous and asynchronous
+ arithmetic exceptions. The Alpha SRM specifies certain code generation
+ rules which if followed allow this code (in conjunction with internal
+ processor register state) to effect a precise, synchronous exception
+ given an imprecise, asynchronous exception. This capability is required
+ for software emulation of the IEEE single and double floating operations.
+
+Author:
+
+ Thomas Van Baak (tvb) 5-Mar-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#include "alphaops.h"
+
+//
+// Define forward referenced function prototypes.
+//
+
+BOOLEAN
+KiLocateTriggerPc (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKTRAP_FRAME TrapFrame
+ );
+
+//
+// Define debugging macros.
+//
+
+#if DBG
+
+extern ULONG RtlDebugFlags;
+#define DBGPRINT ((RtlDebugFlags & 0x4) != 0) && DbgPrint
+
+#else
+
+#define DBGPRINT 0 && DbgPrint
+
+#endif
+
+//
+// Define non-IEEE (a/k/a `high performance') arithmetic exception types.
+// The PALcode exception record is extended by one word and the 4th word
+// contains the reason the arithmetic exception is not an IEEE exception.
+//
+
+#define NON_IEEE(ExceptionRecord, Reason) \
+ (ExceptionRecord)->NumberParameters = 4; \
+ (ExceptionRecord)->ExceptionInformation[3] = (Reason);
+
+#define TRIGGER_FLOATING_REGISTER_MASK_CLEAR 1
+#define TRIGGER_INTEGER_REGISTER_MASK_SET 2
+#define TRIGGER_NO_SOFTWARE_COMPLETION 3
+#define TRIGGER_INVALID_INSTRUCTION_FOUND 4
+#define TRIGGER_INSTRUCTION_FETCH_ERROR 5
+#define TRIGGER_INSTRUCTION_NOT_FOUND 6
+#define TRIGGER_SOURCE_IS_DESTINATION 7
+#define TRIGGER_WRONG_INSTRUCTION 8
+
+BOOLEAN
+KiFloatingException (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN BOOLEAN ImpreciseTrap,
+ IN OUT PULONG SoftFpcrCopy
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate a floating operation and convert the
+ exception status to the proper value. If the exception is a fault, the
+ faulting floating point instruction is emulated. If the exception is an
+ imprecise trap, an attempt is made to locate and to emulate the original
+ trapping floating point instruction.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ImpreciseTrap - Supplies a boolean value that specifies whether the
+ exception is an imprecise trap.
+
+ SoftFpcrCopy - Supplies a pointer to a longword variable that receives
+ a copy of the software FPCR.
+
+Return Value:
+
+ A value of TRUE is returned if the floating exception is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Status;
+ PSW_FPCR SoftwareFpcr;
+ PTEB Teb;
+
+ try {
+
+ //
+ // Obtain a copy of the software FPCR longword from the TEB.
+ //
+
+ Teb = NtCurrentTeb();
+ *SoftFpcrCopy = Teb->FpSoftwareStatusRegister;
+ SoftwareFpcr = (PSW_FPCR)SoftFpcrCopy;
+ DBGPRINT("KiFloatingException: SoftFpcr = %.8lx\n", *SoftFpcrCopy);
+
+#if DBG
+ //
+ // If the floating emulation inhibit flag is set, then bypass all
+ // software emulation by the kernel and return FALSE to raise the
+ // original PALcode exception.
+ //
+ // N.B. This is for user-mode development and testing and is not
+ // part of the API.
+ //
+
+ if (SoftwareFpcr->NoSoftwareEmulation != 0) {
+ DBGPRINT("KiFloatingException: NoSoftwareEmulation\n");
+ return FALSE;
+ }
+#endif
+
+ //
+ // If the arithmetic exception is an imprecise trap, the address of
+ // the trapping instruction is somewhere before the exception address.
+ //
+ // Otherwise the exception is a fault and the address of the faulting
+ // instruction is the exception address.
+ //
+
+ if (ImpreciseTrap != FALSE) {
+
+ //
+ // If the arithmetic trap ignore mode is enabled, then do not
+ // spend time to locate or to emulate the trapping instruction,
+ // leave unpredictable results in the destination register, do
+ // not set correct IEEE sticky bits in the software FPCR, leave
+ // the hardware FPCR sticky status bits as they are, and return
+ // TRUE to continue execution. It is assumed that user code will
+ // check the hardware FPCR exception status bits to determine if
+ // the instruction succeeded or not (Insignia SoftPc feature).
+ //
+
+ if (SoftwareFpcr->ArithmeticTrapIgnore != 0) {
+ return TRUE;
+ }
+
+ //
+ // Attempt to locate the trapping instruction. If the instruction
+ // stream is such that this is not possible or was not intended,
+ // then set an exception code that best reflects the exception
+ // summary register bits and return FALSE to raise the exception.
+ //
+ // Otherwise emulate the trigger instruction in order to compute
+ // the correct destination result value, the correct IEEE status
+ // bits, and raise any enabled IEEE exceptions.
+ //
+
+ if (KiLocateTriggerPc(ExceptionRecord, TrapFrame) == FALSE) {
+ KiSetFloatingStatus(ExceptionRecord);
+ return FALSE;
+ }
+ Status = KiEmulateFloating(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ SoftwareFpcr);
+
+ } else {
+
+ //
+ // Attempt to emulate the faulting instruction in order to perform
+ // floating operations not supported by EV4, to compute the correct
+ // destination result value, the correct IEEE status bits, and
+ // raise any enabled IEEE exceptions.
+ //
+
+ Status = KiEmulateFloating(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ SoftwareFpcr);
+
+ //
+ // If the emulation resulted in a floating point exception and
+ // the arithmetic trap ignore mode is enabled, then set the return
+ // value to TRUE to suppress the exception and continue execution.
+ //
+
+ if ((Status == FALSE) &&
+ (SoftwareFpcr->ArithmeticTrapIgnore != 0) &&
+ (ExceptionRecord->ExceptionCode != STATUS_ILLEGAL_INSTRUCTION)) {
+ Status = TRUE;
+ }
+ }
+
+ //
+ // Store the updated software FPCR longword in the TEB.
+ //
+
+ Teb->FpSoftwareStatusRegister = *SoftFpcrCopy;
+ DBGPRINT("KiFloatingException: SoftFpcr = %.8lx\n", *SoftFpcrCopy);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception occurred accessing the TEB.
+ //
+
+ ExceptionRecord->ExceptionCode = GetExceptionCode();
+ return FALSE;
+ }
+
+ return Status;
+}
+
+BOOLEAN
+KiLocateTriggerPc (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to try to determine the precise location of the
+ instruction that caused an arithmetic exception. The instruction that
+ caused the trap to occur is known as the trigger instruction. On entry,
+ the actual address of the trigger instruction is unknown and the exception
+ address is the continuation address. The continuation address is the
+ address of the instruction that would have executed had the trap not
+ occurred. The instructions following the trigger instruction up to the
+ continuation address are known as the trap shadow of the trigger
+ instruction.
+
+ Alpha AXP produces imprecise, asynchronous arithmetic exceptions. The
+ exceptions are imprecise because the exception address when a trap is
+ taken may be more than one instruction beyond the address of the
+ instruction that actually caused the trap to occur.
+
+ The arithmetic exceptions are traps (rather than faults) because the
+ exception address is not the address of the trapping instruction
+ itself, but the address of the next instruction to execute, which is
+ always (at least) one instruction beyond the address of the trapping
+ instruction.
+
+ It is possible for multiple exceptions to occur and result in a single
+ trap. This function only determines the address of the first trapping
+ instruction.
+
+ Unpredictable values may have been stored in the destination register
+ of trapping instructions. Thus to insure that the trigger instruction
+ can be located, and that the trigger instruction and any instructions
+ in the trap shadow can be re-executed, certain restrictions are placed
+ on the type of instructions or the mix of operands in the trap shadow.
+
+ The code generation rules serve only to guarantee that the instruction
+ backup algorithm and subsequent re-execution can always be successful.
+ Hence the restrictions on such constructs as branches, jumps, and the
+ re-use of source or destination operands within the trap shadow.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ If the trigger PC was precisely determined, the exception address in
+ the exception record is set to the trigger PC, the continuation address
+ in the trap frame is updated, and a value of TRUE is returned. Otherwise
+ no values are stored and a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXC_SUM ExceptionSummary;
+ ULONG Fa;
+ ULONG Fb;
+ ULONG Fc;
+ ULONG FloatRegisterTrashMask;
+ ULONG FloatRegisterWriteMask;
+ ALPHA_INSTRUCTION Instruction;
+ ULONG IntegerRegisterWriteMask;
+ ULONG Opcode;
+ ULONG TrapShadowLowLimit;
+ ULONG TriggerPc;
+
+ //
+ // Obtain a copy of the float and integer register write mask registers
+ // and the exception summary register from the exception record built by
+ // PALcode.
+ //
+
+ FloatRegisterWriteMask = ExceptionRecord->ExceptionInformation[0];
+ IntegerRegisterWriteMask = ExceptionRecord->ExceptionInformation[1];
+ ExceptionSummary = (PEXC_SUM)&(ExceptionRecord->ExceptionInformation[2]);
+ DBGPRINT("KiLocateTriggerPc: WriteMask %.8lx.%.8lx, ExceptionSummary %.8lx\n",
+ FloatRegisterWriteMask, IntegerRegisterWriteMask,
+ *(PULONG)ExceptionSummary);
+
+ if (FloatRegisterWriteMask == 0) {
+
+ //
+ // It should not be possible to have a floating point exception without
+ // at least one of the destination float register bits set. The trap
+ // shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: FloatRegisterWriteMask clear\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_FLOATING_REGISTER_MASK_CLEAR);
+ return FALSE;
+ }
+ if (IntegerRegisterWriteMask != 0) {
+
+ //
+ // It is not possible to precisely locate the trigger instruction
+ // when the integer overflow bit is set. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: IntegerRegisterMask set.\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INTEGER_REGISTER_MASK_SET);
+ return FALSE;
+ }
+ if (ExceptionSummary->SoftwareCompletion == 0) {
+
+ //
+ // The exception summary software completion bit is the AND of the
+ // /S bits of all trapping instructions in the trap shadow. Since
+ // the software completion bit is not set, it can be assumed the
+ // code that was executing does not want precise exceptions, or if
+ // it does, the code does not comply with the Alpha AXP guidelines
+ // for locating the trigger PC. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: SoftwareCompletion clear\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_NO_SOFTWARE_COMPLETION);
+ return FALSE;
+ }
+
+ //
+ // Search for the trigger instruction starting with the instruction before
+ // the continuation PC (the instruction pointed to by Fir either did not
+ // complete or did not even start). Limit the search to the arbitrary
+ // limit of N instructions back from the current PC to prevent unbounded
+ // searches. The search is complete when all trapping destination register
+ // bits in the float write mask register have been accounted for.
+ //
+
+ FloatRegisterTrashMask = 0;
+ TriggerPc = (ULONG)TrapFrame->Fir;
+ TrapShadowLowLimit = TriggerPc - (500 * sizeof(ULONG));
+
+ try {
+ do {
+ TriggerPc -= 4;
+ if (TriggerPc < TrapShadowLowLimit) {
+
+ //
+ // The trigger PC is too far away from the exception PC to
+ // be reasonable. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Trap shadow too long\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INSTRUCTION_NOT_FOUND);
+ return FALSE;
+ }
+
+ Instruction.Long = ProbeAndReadUlong((PULONG)TriggerPc);
+
+ //
+ // Examine the opcode of this instruction to determine if the
+ // trap shadow is invalid.
+ //
+
+ Opcode = Instruction.Memory.Opcode;
+ if (Opcode == JMP_OP) {
+
+ //
+ // This is one of the jump instructions: jump, return, or
+ // either form of jsr. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Jump within Trap Shadow\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INVALID_INSTRUCTION_FOUND);
+ return FALSE;
+
+ } else if ((Opcode >= BR_OP) && (Opcode <= BGT_OP)) {
+
+ //
+ // The instruction is one of 16 branch opcodes that consists
+ // of BR, the 6 floating point branch, BSR, and the 8 integer
+ // branch instructions. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Branch within Trap Shadow\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INVALID_INSTRUCTION_FOUND);
+ return FALSE;
+
+ } else if ((Instruction.Memory.Opcode == MEMSPC_OP) &&
+ ((Instruction.Memory.MemDisp == TRAPB_FUNC) ||
+ (Instruction.Memory.MemDisp == EXCB_FUNC))) {
+
+ //
+ // The instruction is a type of TRAPB instruction. The trap
+ // shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Trapb within Trap Shadow\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INVALID_INSTRUCTION_FOUND);
+ return FALSE;
+
+ } else if (Opcode == CALLPAL_OP) {
+
+ //
+ // The instruction is a Call PAL. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Call PAL within Trap Shadow\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INVALID_INSTRUCTION_FOUND);
+ return FALSE;
+
+ } else if ((Opcode == IEEEFP_OP) || (Opcode == FPOP_OP)) {
+
+ //
+ // The instruction is an IEEE floating point instruction.
+ // Decode the destination register of the floating point
+ // instruction in order to check against the register mask.
+ //
+
+ Fc = Instruction.FpOp.Fc;
+ if (Fc != FZERO_REG) {
+ FloatRegisterTrashMask |= (1 << Fc);
+ }
+ FloatRegisterWriteMask &= ~(1 << Fc);
+ }
+
+ } while (FloatRegisterWriteMask != 0);
+
+ //
+ // If the instruction thought to be the trigger instruction does not
+ // have the /S bit set, then the trap shadow is invalid (some other
+ // instruction must have caused software completion bit to be set).
+ //
+
+ if ((Instruction.FpOp.Function & FP_TRAP_ENABLE_S) == 0) {
+ DBGPRINT("KiLocateTriggerPc: Trigger instruction missing /S\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_WRONG_INSTRUCTION);
+ return FALSE;
+ }
+
+ //
+ // If either of the operand registers of the trigger instruction is
+ // also the destination register of the trigger instruction or any
+ // instruction in the trap shadow, then the trap shadow in invalid.
+ // This is because the original value of the operand register(s) may
+ // have been destroyed making it impossible to re-execute the trigger
+ // instruction.
+ //
+
+ Fa = Instruction.FpOp.Fa;
+ Fb = Instruction.FpOp.Fb;
+ if ((FloatRegisterTrashMask & ((1 << Fa) | (1 << Fb))) != 0) {
+ DBGPRINT("KiLocateTriggerPc: Source is destination\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_SOURCE_IS_DESTINATION);
+ return FALSE;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ //
+ // An exception occurred while fetching the value of the
+ // next previous instruction. The trap shadow is invalid.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Instruction fetch error\n");
+ NON_IEEE(ExceptionRecord, TRIGGER_INSTRUCTION_FETCH_ERROR);
+ return FALSE;
+ }
+
+ //
+ // The trigger instruction was successfully located. Set the precise
+ // exception address in the exception record, set the new continuation
+ // address in the trap frame, and return a value of TRUE.
+ //
+
+ DBGPRINT("KiLocateTriggerPc: Exception PC = %.8lx, Trigger PC = %.8lx\n",
+ ExceptionRecord->ExceptionAddress, TriggerPc);
+ ExceptionRecord->ExceptionAddress = (PVOID)TriggerPc;
+ TrapFrame->Fir = (ULONGLONG)(LONG)(TriggerPc + 4);
+ return TRUE;
+}
+
+VOID
+KiSetFloatingStatus (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert the exception summary register bits
+ into a status code value.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PEXC_SUM ExceptionSummary;
+
+ //
+ // Perform the following triage on the exception summary register to
+ // report the type of exception, even if though the PC reported is
+ // imprecise.
+ //
+
+ DBGPRINT("KiSetFloatingStatus: ExceptionSummary = %.8lx\n",
+ ExceptionRecord->ExceptionInformation[2]);
+
+ ExceptionSummary = (PEXC_SUM)(&ExceptionRecord->ExceptionInformation[2]);
+ if (ExceptionSummary->InvalidOperation != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+
+ } else if (ExceptionSummary->DivisionByZero != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+
+ } else if (ExceptionSummary->Overflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+
+ } else if (ExceptionSummary->Underflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+
+ } else if (ExceptionSummary->InexactResult != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+
+ } else if (ExceptionSummary->IntegerOverflow != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_OVERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_STACK_CHECK;
+ }
+}
diff --git a/private/ntos/ke/alpha/vdm.c b/private/ntos/ke/alpha/vdm.c
new file mode 100644
index 000000000..165ac94d5
--- /dev/null
+++ b/private/ntos/ke/alpha/vdm.c
@@ -0,0 +1,54 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ VDM.C
+
+Abstract:
+
+ This routine has a stub for the x86 only api NtStartVdmExecution.
+
+Author:
+
+ Dave Hastings (daveh) 2 Apr 1991
+
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+
+
+NTSTATUS
+NtInitializeVDM(
+ VOID
+ )
+{
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+NtVdmStartExecution (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns STATUS_NOT_IMPLEMENTED
+
+Arguments:
+
+Return Value:
+
+ STATUS_NOT_IMPLEMENTED
+--*/
+{
+
+ return STATUS_NOT_IMPLEMENTED;
+
+}
diff --git a/private/ntos/ke/alpha/xxalign.s b/private/ntos/ke/alpha/xxalign.s
new file mode 100644
index 000000000..3997dd85e
--- /dev/null
+++ b/private/ntos/ke/alpha/xxalign.s
@@ -0,0 +1,405 @@
+// TITLE("Alignment emulation")
+//++
+//
+//
+// Copyright (c) 1992 Digital Equipment Corporation
+//
+// Module Name:
+//
+// align.s
+//
+// Abstract:
+//
+// This module implements the code to complete unaligned access
+// emulation.
+//
+// Author:
+//
+// Joe Notarangelo 14-May-1992
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksalpha.h"
+
+//++
+//
+// UQUAD
+// KiEmulateLoadLong(
+// IN PULONG UnalignedAddress
+// )
+//
+// Routine Description:
+//
+// This routine returns the longword value stored at the unaligned
+// address passed in UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to long data value.
+//
+// Return Value:
+//
+// The longword value at the address pointed to by UnalignedAddress.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateLoadLong)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u v0, 3(a0) // get 2nd quadword
+
+ extll t0, a0, t0 // extract bytes from low quadword
+ extlh v0, a0, v0 // extract bytes from high quadword
+ bis v0, t0, v0 // v0 = longword
+
+ addl v0, zero, v0 // insure canonical longword form
+
+ ret zero, (ra) // return
+
+ .end KiEmulateLoadLong
+
+
+
+//++
+//
+// UQUAD
+// KiEmulateLoadQuad(
+// IN PUQUAD UnalignedAddress
+// )
+//
+// Routine Description:
+//
+// This routine returns the quadword value stored at the unaligned
+// address passed in UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to quad data value.
+//
+// Return Value:
+//
+// The quadword value at the address pointed to by UnalignedAddress.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateLoadQuad)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u v0, 7(a0) // get 2nd quadword
+
+ extql t0, a0, t0 // extract bytes from low quadword
+ extqh v0, a0, v0 // extract bytes from high quadword
+ bis v0, t0, v0 // v0 = longword
+
+ ret zero, (ra) // return
+
+ .end KiEmulateLoadQuad
+
+//++
+//
+// VOID
+// KiEmulateStoreLong(
+// IN PULONG UnalignedAddress
+// IN UQUAD Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the longword in Data to the UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to longword destination.
+// Data(a1) - Supplies data value to store.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateStoreLong)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u t1, 3(a0) // get 2nd quadword
+
+ inslh a1, a0, t2 // get bytes for high quadword
+ insll a1, a0, t3 // get bytes for low quadword
+
+ msklh t1, a0, t1 // clear corresponding bytes
+ mskll t0, a0, t0 // clear corresponding bytes
+
+ bis t1, t2, t1 // merge in bytes for high qw
+ bis t0, t3, t0 // merge in bytes for low qw
+
+ stq_u t1, 3(a0) // must store high first in case
+ stq_u t0, 0(a0) // address was actually aligned
+
+ ret zero, (ra) // return
+
+ .end KiEmulateStoreLong
+
+
+//++
+//
+// VOID
+// KiEmulateStoreQuad(
+// IN PUQUAD UnalignedAddress
+// IN UQUAD Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the quadword in Data to the UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to quadword destination.
+// Data(a1) - Supplies the data value to store.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateStoreQuad)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u t1, 7(a0) // get 2nd quadword
+
+ insqh a1, a0, t2 // get bytes for high quadword
+ insql a1, a0, t3 // get bytes for low quadword
+
+ mskqh t1, a0, t1 // clear corresponding bytes
+ mskql t0, a0, t0 // clear corresponding bytes
+
+ bis t1, t2, t1 // merge in bytes for high qw
+ bis t0, t3, t0 // merge in bytes for low qw
+
+ stq_u t1, 7(a0) // must store high first in case
+ stq_u t0, 0(a0) // address was actually aligned
+
+ ret zero, (ra) // return
+
+ .end KiEmulateStoreQuad
+
+
+//++
+//
+// UQUAD
+// KiEmulateLoadFloatIEEESingle(
+// IN PULONG UnalignedAddress
+// )
+//
+// Routine Description:
+//
+// This routine returns the IEEE Single value stored at the unaligned
+// address passed in UnalignedAddress.
+//
+// N.B. The value is returned as the memory format T-formatted
+// interpretation of the read S-format value.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to float single data.
+//
+// Return Value:
+//
+// The single float value at the address pointed to by UnalignedAddress.
+//
+//--
+
+ .struct 0
+FlTemp: .space 8 // temporary memory
+ .space 8 // filler for alignment
+FlFrameLength: // length of stack frame
+
+ LEAF_ENTRY(KiEmulateLoadFloatIEEESingle)
+
+ lda sp, -FlFrameLength(sp) // allocate temp space
+
+ //
+ // get the value into an integer register
+ //
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u v0, 3(a0) // get 2nd quadword
+
+ extll t0, a0, t0 // extract bytes from low quadword
+ extlh v0, a0, v0 // extract bytes from high quadword
+ bis v0, t0, v0 // v0 = longword
+
+
+ //
+ // v0 now is S memory format, however return from exception
+ // sequence will restore floating registers as T memory format
+ // convert v0 to T memory format
+
+ stl v0, FlTemp(sp) // store bytes, S-mem-format
+ lds f0, FlTemp(sp) // now S-reg-format
+ stt f0, FlTemp(sp) // write as T-mem-format
+ ldq v0, FlTemp(sp) // return as T-mem_format
+
+ lda sp, FlFrameLength(sp) // deallocate stack frame
+
+ ret zero, (ra) // return
+
+ .end KiEmulateLoadFloatIEEESingle
+
+
+
+//++
+//
+// UQUAD
+// KiEmulateLoadFloatIEEEDouble(
+// IN PUQUAD UnalignedAddress
+// )
+//
+// Routine Description:
+//
+// This routine returns the quadword value stored at the unaligned
+// address passed in UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to double float data value.
+//
+// Return Value:
+//
+// The double float value at the address pointed to by UnalignedAddress.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateLoadFloatIEEEDouble)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u v0, 7(a0) // get 2nd quadword
+
+ extql t0, a0, t0 // extract bytes from low quadword
+ extqh v0, a0, v0 // extract bytes from high quadword
+ bis v0, t0, v0 // v0 = longword
+
+ ret zero, (ra) // return
+
+ .end KiEmulateLoadFloatIEEEDouble
+
+//++
+//
+// VOID
+// KiEmulateStoreFloatIEEESingle(
+// IN PULONG UnalignedAddress
+// IN UQUAD Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the float value in Data to the UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to float destination.
+// Data(a1) - Supplies the data value to store.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+FsTemp: .space 8 // temporary memory
+ .space 8 // filler for alignment
+FsFrameLength: // length of stack frame
+
+ LEAF_ENTRY(KiEmulateStoreFloatIEEESingle)
+
+ lda sp, -FsFrameLength(sp) // allocate stack space
+
+ //
+ // a1 is an integer version of the T-memory format
+ // convert it to integer version of S-memory format
+ //
+
+ stq a1, FsTemp(sp) // store bytes, T-mem-format
+ ldt f10, FsTemp(sp) // load back in now in S-reg-format
+ sts f10, FsTemp(sp) // now in S-mem-format
+ ldl a1, FsTemp(sp) // now integer version of S-mem
+
+
+ //
+ // now problem is just to store an unaligned longword
+ //
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u t1, 3(a0) // get 2nd quadword
+
+ inslh a1, a0, t2 // get bytes for high quadword
+ insll a1, a0, t3 // get bytes for low quadword
+
+ msklh t1, a0, t1 // clear corresponding bytes
+ mskll t0, a0, t0 // clear corresponding bytes
+
+ bis t1, t2, t1 // merge in bytes for high qw
+ bis t0, t3, t0 // merge in bytes for low qw
+
+ stq_u t1, 3(a0) // must store high first in case
+ stq_u t0, 0(a0) // was actually aligned
+
+ lda sp, FsFrameLength(sp) // restore stack frame
+
+ ret zero, (ra) // return
+
+ .end KiEmulateStoreFloatIEEESingle
+
+
+//++
+//
+// VOID
+// KiEmulateStoreFloatIEEEDouble(
+// IN PUQUAD UnalignedAddress
+// IN UQUAD Data
+// )
+//
+// Routine Description:
+//
+// This routine stores the quadword in Data to the UnalignedAddress.
+//
+// Arguments:
+//
+// UnalignedAddress(a0) - Supplies a pointer to double float destination.
+// Data(a1) - Supplies the data value to store.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiEmulateStoreFloatIEEEDouble)
+
+ ldq_u t0, 0(a0) // get 1st quadword
+ ldq_u t1, 7(a0) // get 2nd quadword
+
+ insqh a1, a0, t2 // get bytes for high quadword
+ insql a1, a0, t3 // get bytes for low quadword
+
+ mskqh t1, a0, t1 // clear corresponding bytes
+ mskql t0, a0, t0 // clear corresponding bytes
+
+ bis t1, t2, t1 // merge in bytes for high qw
+ bis t0, t3, t0 // merge in bytes for low qw
+
+ stq_u t1, 7(a0) // must store high first in case
+ stq_u t0, 0(a0) // was actually aligned
+
+ ret zero, (ra) // return
+
+ .end KiEmulateStoreFloatIEEEDouble
+
diff --git a/private/ntos/ke/apcobj.c b/private/ntos/ke/apcobj.c
new file mode 100644
index 000000000..1ecdf57ea
--- /dev/null
+++ b/private/ntos/ke/apcobj.c
@@ -0,0 +1,364 @@
+/*++
+
+Copyright (c) 1989-1994 Microsoft Corporation
+
+Module Name:
+
+ apcobj.c
+
+Abstract:
+
+ This module implements the kernel APC object. Functions are provided
+ to initialize, flush, insert, and remove APC objects.
+
+Author:
+
+ David N. Cutler (davec) 5-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input apc is
+// really a kapc and not something else, like deallocated pool.
+//
+
+#define ASSERT_APC(E) { \
+ ASSERT((E)->Type == ApcObject); \
+}
+
+
+VOID
+KeInitializeApc (
+ IN PRKAPC Apc,
+ IN PRKTHREAD Thread,
+ IN KAPC_ENVIRONMENT Environment,
+ IN PKKERNEL_ROUTINE KernelRoutine,
+ IN PKRUNDOWN_ROUTINE RundownRoutine OPTIONAL,
+ IN PKNORMAL_ROUTINE NormalRoutine OPTIONAL,
+ IN KPROCESSOR_MODE ApcMode OPTIONAL,
+ IN PVOID NormalContext OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel APC object. The thread, kernel
+ routine, and optionally a normal routine, processor mode, and normal
+ context parameter are stored in the APC object.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Environment - Supplies the environment in which the APC will execute.
+ Valid values for this parameter are: OriginalApcEnvironment,
+ AttachedApcEnvironment, or CurrentApcEnvironment.
+
+ KernelRoutine - Supplies a pointer to a function that is to be
+ executed at IRQL APC_LEVEL in kernel mode.
+
+ RundownRoutine - Supplies an optional pointer to a function that is to be
+ called if the APC is in a thread's APC queue when the thread terminates.
+
+ NormalRoutine - Supplies an optional pointer to a function that is
+ to be executed at IRQL 0 in the specified processor mode. If this
+ parameter is not specified, then the ProcessorMode and NormalContext
+ parameters are ignored.
+
+ ApcMode - Supplies the processor mode in which the function specified
+ by the NormalRoutine parameter is to be executed.
+
+ NormalContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the NormalRoutine parameter.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ASSERT(Environment <= CurrentApcEnvironment);
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Apc->Type = ApcObject;
+ Apc->Size = sizeof(KAPC);
+
+ //
+ // Initialize the APC environment, thread address, kernel routine address,
+ // rundown routine address, normal routine address, processor mode, and
+ // normal context parameter. If the normal routine address is null, then
+ // the processor mode is defaulted to KernelMode and the APC is a special
+ // APC. Otherwise, the processor mode is taken from the argument list.
+ //
+
+ if (Environment == CurrentApcEnvironment) {
+ Apc->ApcStateIndex = Thread->ApcStateIndex;
+
+ } else {
+
+ ASSERT(Environment <= Thread->ApcStateIndex);
+
+ Apc->ApcStateIndex = Environment;
+ }
+
+ Apc->Thread = Thread;
+ Apc->KernelRoutine = KernelRoutine;
+ Apc->RundownRoutine = RundownRoutine;
+ Apc->NormalRoutine = NormalRoutine;
+ if (ARGUMENT_PRESENT(NormalRoutine)) {
+ Apc->ApcMode = ApcMode;
+ Apc->NormalContext = NormalContext;
+
+ } else {
+ Apc->ApcMode = KernelMode;
+ Apc->NormalContext = NIL;
+ }
+
+ Apc->Inserted = FALSE;
+ return;
+}
+
+PLIST_ENTRY
+KeFlushQueueApc (
+ IN PKTHREAD Thread,
+ IN KPROCESSOR_MODE ApcMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the APC queue selected by the specified processor
+ mode for the specified thread. An APC queue is flushed by removing the
+ listhead from the list, scanning the APC entries in the list, setting
+ their inserted variables to FALSE, and then returning the address of the
+ doubly linked list as the function value.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ ApcMode - Supplies the processor mode of the APC queue that is to
+ be flushed.
+
+Return Value:
+
+ The address of the first entry in the list of APC objects that were flushed
+ from the specified APC queue.
+
+--*/
+
+{
+
+ PKAPC Apc;
+ PLIST_ENTRY FirstEntry;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level, lock dispatcher database, and
+ // lock the APC queue.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // Get address of first APC in the list and check if the list is
+ // empty or contains entries that should be flushed. If entries
+ // should be flushed, then scan the list of APC objects and set their
+ // inserted state to FALSE.
+ //
+
+ FirstEntry = Thread->ApcState.ApcListHead[ApcMode].Flink;
+ if (FirstEntry == &Thread->ApcState.ApcListHead[ApcMode]) {
+ FirstEntry = (PLIST_ENTRY)NULL;
+
+ } else {
+ RemoveEntryList(&Thread->ApcState.ApcListHead[ApcMode]);
+ NextEntry = FirstEntry;
+ do {
+ Apc = CONTAINING_RECORD(NextEntry, KAPC, ApcListEntry);
+ Apc->Inserted = FALSE;
+ NextEntry = NextEntry->Flink;
+ } while (NextEntry != FirstEntry);
+ }
+
+ //
+ // Unlock the APC queue, unlock the dispatcher database, lower IRQL to
+ // its previous value, and return address of first entry in list of APC
+ // objects that were flushed.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return FirstEntry;
+}
+
+BOOLEAN
+KeInsertQueueApc (
+ IN PRKAPC Apc,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts an APC object into the APC queue specifed by the
+ thread and processor mode fields of the APC object. If the APC object
+ is already in an APC queue or APC queuing is disabled, then no operation
+ is performed. Otherwise the APC object is inserted in the specified queue
+ and appropriate scheduling decisions are made.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+ SystemArgument1, SystemArgument2 - Supply a set of two arguments that
+ contain untyped data provided by the executive.
+
+ Increment - Supplies the priority increment that is to be applied if
+ queuing the APC causes a thread wait to be satisfied.
+
+Return Value:
+
+ If the APC object is already in an APC queue or APC queuing is disabled,
+ then a value of FALSE is returned. Otherwise a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+ KIRQL OldIrql;
+ PRKTHREAD Thread;
+
+ ASSERT_APC(Apc);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If APC queuing is disabled, then set inserted to FALSE. Else save
+ // system parameter values in APC object, and attempt to queue APC.
+ //
+
+ Thread = Apc->Thread;
+ if (Thread->ApcQueueable == FALSE) {
+ Inserted = FALSE;
+
+ } else {
+ Apc->SystemArgument1 = SystemArgument1;
+ Apc->SystemArgument2 = SystemArgument2;
+ Inserted = KiInsertQueueApc(Apc, Increment);
+ }
+
+ //
+ // Unlock the dispatcher database, lower IRQL to its previous value,
+ // and return whether APC object was inserted in APC queue.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Inserted;
+}
+
+BOOLEAN
+KeRemoveQueueApc (
+ IN PKAPC Apc
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes an APC object from an APC queue. If the APC object
+ is not in an APC queue, then no operation is performed. Otherwise the
+ APC object is removed from its current queue and its inserted state is
+ set FALSE.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+Return Value:
+
+ If the APC object is not in an APC queue, then a value of FALSE is returned.
+ Otherwise a value of TRUE is returned.
+
+--*/
+
+{
+
+ PKAPC_STATE ApcState;
+ BOOLEAN Inserted;
+ KIRQL OldIrql;
+ PRKTHREAD Thread;
+
+ ASSERT_APC(Apc);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level, lock dispatcher database, and
+ // lock the APC queue.
+ //
+
+ Thread = Apc->Thread;
+ KiLockDispatcherDatabase(&OldIrql);
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // If the APC object is in an APC queue, then remove it from the queue
+ // and set its inserted state to FALSE. If the queue becomes empty, set
+ // the APC pending state to FALSE.
+ //
+
+ Inserted = Apc->Inserted;
+ if (Inserted != FALSE) {
+ Apc->Inserted = FALSE;
+ ApcState = Thread->ApcStatePointer[Apc->ApcStateIndex];
+ RemoveEntryList(&Apc->ApcListEntry);
+ if (IsListEmpty(&ApcState->ApcListHead[Apc->ApcMode]) != FALSE) {
+ if (Apc->ApcMode == KernelMode) {
+ ApcState->KernelApcPending = FALSE;
+
+ } else {
+ ApcState->UserApcPending = FALSE;
+ }
+ }
+ }
+
+ //
+ // Unlock the APC queue, unlock the dispatcher database, lower IRQL to
+ // its previous value, and return whether an APC object was removed from
+ // the APC queue.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Inserted;
+}
diff --git a/private/ntos/ke/apcsup.c b/private/ntos/ke/apcsup.c
new file mode 100644
index 000000000..024012eba
--- /dev/null
+++ b/private/ntos/ke/apcsup.c
@@ -0,0 +1,342 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ apcsup.c
+
+Abstract:
+
+ This module contains the support routines for the APC object. Functions
+ are provided to insert in an APC queue and to deliver user and kernel
+ mode APC's.
+
+Author:
+
+ David N. Cutler (davec) 14-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiDeliverApc (
+ IN KPROCESSOR_MODE PreviousMode,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from the APC interrupt code and when one or
+ more of the APC pending flags are set at system exit and the previous
+ IRQL is zero. All special kernel APC's are delivered first, followed
+ by normal kernel APC's if one is not already in progress, and finally
+ if the user APC queue is not empty, the user APC pending flag is set,
+ and the previous mode is user, then a user APC is delivered. On entry
+ to this routine IRQL is set to APC_LEVEL.
+
+ N.B. The exception frame and trap frame addresses are only guaranteed
+ to be valid if, and only if, the previous mode is user.
+
+Arguments:
+
+ PreviousMode - Supplies the previous processor mode.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKAPC Apc;
+ PKKERNEL_ROUTINE KernelRoutine;
+ PLIST_ENTRY NextEntry;
+ PVOID NormalContext;
+ PKNORMAL_ROUTINE NormalRoutine;
+ KIRQL OldIrql;
+ PVOID SystemArgument1;
+ PVOID SystemArgument2;
+ PKTHREAD Thread;
+
+ //
+ // Raise IRQL to dispatcher level and lock the APC queue.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockApcQueue(Thread, &OldIrql);
+
+ //
+ // Get address of current thread object, clear kernel APC pending, and
+ // check if any kernel mode APC's can be delivered.
+ //
+
+ Thread->ApcState.KernelApcPending = FALSE;
+ while (IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]) == FALSE) {
+ NextEntry = Thread->ApcState.ApcListHead[KernelMode].Flink;
+ Apc = CONTAINING_RECORD(NextEntry, KAPC, ApcListEntry);
+ KernelRoutine = Apc->KernelRoutine;
+ NormalRoutine = Apc->NormalRoutine;
+ NormalContext = Apc->NormalContext;
+ SystemArgument1 = Apc->SystemArgument1;
+ SystemArgument2 = Apc->SystemArgument2;
+ if (NormalRoutine == (PKNORMAL_ROUTINE)NULL) {
+
+ //
+ // First entry in the kernel APC queue is a special kernel APC.
+ // Remove the entry from the APC queue, set its inserted state
+ // to FALSE, release dispatcher database lock, and call the kernel
+ // routine. On return raise IRQL to dispatcher level and lock
+ // dispatcher database lock.
+ //
+
+ RemoveEntryList(NextEntry);
+ Apc->Inserted = FALSE;
+ KiUnlockApcQueue(Thread, OldIrql);
+ (KernelRoutine)(Apc, &NormalRoutine, &NormalContext,
+ &SystemArgument1, &SystemArgument2);
+ KiLockApcQueue(Thread, &OldIrql);
+
+ } else {
+
+ //
+ // First entry in the kernel APC queue is a normal kernel APC.
+ // If there is not a normal kernel APC in progress and kernel
+ // APC's are not disabled, then remove the entry from the APC
+ // queue, set its inserted state to FALSE, release the APC queue
+ // lock, call the specified kernel routine, set kernel APC in
+ // progress, lower the IRQL to zero, and call the normal kernel
+ // APC routine. On return raise IRQL to dispatcher level, lock
+ // the APC queue, and clear kernel APC in progress.
+ //
+
+ if ((Thread->ApcState.KernelApcInProgress == FALSE) &&
+ (Thread->KernelApcDisable == 0)) {
+ RemoveEntryList(NextEntry);
+ Apc->Inserted = FALSE;
+ KiUnlockApcQueue(Thread, OldIrql);
+ (KernelRoutine)(Apc, &NormalRoutine, &NormalContext,
+ &SystemArgument1, &SystemArgument2);
+ if (NormalRoutine != (PKNORMAL_ROUTINE)NULL) {
+ Thread->ApcState.KernelApcInProgress = TRUE;
+ KeLowerIrql(0);
+ (NormalRoutine)(NormalContext, SystemArgument1,
+ SystemArgument2);
+ KeRaiseIrql(APC_LEVEL, &OldIrql);
+ }
+
+ KiLockApcQueue(Thread, &OldIrql);
+ Thread->ApcState.KernelApcInProgress = FALSE;
+
+ } else {
+ KiUnlockApcQueue(Thread, OldIrql);
+ return;
+ }
+ }
+ }
+
+ //
+ // Kernel APC queue is empty. If the previous mode is user, user APC
+ // pending is set, and the user APC queue is not empty, then remove
+ // the first entry from the user APC queue, set its inserted state to
+ // FALSE, clear user APC pending, release the dispatcher database lock,
+ // and call the specified kernel routine. If the normal routine address
+ // is not NULL on return from the kernel routine, then initialize the
+ // user mode APC context and return. Otherwise, check to determine if
+ // another user mode APC can be processed.
+ //
+
+ if ((IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]) == FALSE) &&
+ (PreviousMode == UserMode) && (Thread->ApcState.UserApcPending == TRUE)) {
+ Thread->ApcState.UserApcPending = FALSE;
+ NextEntry = Thread->ApcState.ApcListHead[UserMode].Flink;
+ Apc = CONTAINING_RECORD(NextEntry, KAPC, ApcListEntry);
+ KernelRoutine = Apc->KernelRoutine;
+ NormalRoutine = Apc->NormalRoutine;
+ NormalContext = Apc->NormalContext;
+ SystemArgument1 = Apc->SystemArgument1;
+ SystemArgument2 = Apc->SystemArgument2;
+ RemoveEntryList(NextEntry);
+ Apc->Inserted = FALSE;
+ KiUnlockApcQueue(Thread, OldIrql);
+ (KernelRoutine)(Apc, &NormalRoutine, &NormalContext,
+ &SystemArgument1, &SystemArgument2);
+
+ if (NormalRoutine == (PKNORMAL_ROUTINE)NULL) {
+ KeTestAlertThread(UserMode);
+
+ } else {
+ KiInitializeUserApc(ExceptionFrame, TrapFrame, NormalRoutine,
+ NormalContext, SystemArgument1, SystemArgument2);
+ }
+
+ } else {
+ KiUnlockApcQueue(Thread, OldIrql);
+ }
+
+ return;
+}
+
+BOOLEAN
+FASTCALL
+KiInsertQueueApc (
+ IN PKAPC Apc,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts an APC object into a thread's APC queue. The address
+ of the thread object, the APC queue, and the type of APC are all derived
+ from the APC object. If the APC object is already in an APC queue, then
+ no opertion is performed and a function value of FALSE is returned. Else
+ the APC is inserted in the specified APC queue, its inserted state is set
+ to TRUE, and a function value of TRUE is returned. The APC will actually
+ be delivered when proper enabling conditions exist.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+ Increment - Supplies the priority increment that is to be applied if
+ queuing the APC causes a thread wait to be satisfied.
+
+Return Value:
+
+ If the APC object is already in an APC queue, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ KPROCESSOR_MODE ApcMode;
+ PKAPC ApcEntry;
+ PKAPC_STATE ApcState;
+ BOOLEAN Inserted;
+ PLIST_ENTRY ListEntry;
+ PKTHREAD Thread;
+
+ //
+ // If the APC object is already in an APC queue, then set inserted to
+ // FALSE. Else insert the APC object in the proper queue, set the APC
+ // inserted state to TRUE, check to determine if the APC should be delivered
+ // immediately, and set inserted to TRUE.
+ //
+ // For multiprocessor performance, the following code utilizes the fact
+ // that kernel APC disable count is incremented before checking whether
+ // the kernel APC queue is nonempty.
+ //
+ // See KeLeaveCriticalRegion().
+ //
+
+ Thread = Apc->Thread;
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+ if (Apc->Inserted) {
+ Inserted = FALSE;
+
+ } else {
+ ApcState = Thread->ApcStatePointer[Apc->ApcStateIndex];
+
+ //
+ // Insert the APC after all other special APC entries selected by
+ // the processor mode if the normal routine value is null. Else
+ // insert the APC object at the tail of the APC queue selected by
+ // the processor mode unless the APC mode is user and the address
+ // of the special APC routine is exit thread, in which case insert
+ // the APC at the front of the list and set user APC pending.
+ //
+
+ ApcMode = Apc->ApcMode;
+ if (Apc->NormalRoutine != NULL) {
+ if ((ApcMode != KernelMode) && (Apc->KernelRoutine == PsExitSpecialApc)) {
+ Thread->ApcState.UserApcPending = TRUE;
+ InsertHeadList(&ApcState->ApcListHead[ApcMode],
+ &Apc->ApcListEntry);
+
+ } else {
+ InsertTailList(&ApcState->ApcListHead[ApcMode],
+ &Apc->ApcListEntry);
+ }
+
+ } else {
+ ListEntry = ApcState->ApcListHead[ApcMode].Flink;
+ while (ListEntry != &ApcState->ApcListHead[ApcMode]) {
+ ApcEntry = CONTAINING_RECORD(ListEntry, KAPC, ApcListEntry);
+ if (ApcEntry->NormalRoutine != NULL) {
+ break;
+ }
+
+ ListEntry = ListEntry->Flink;
+ }
+
+ ListEntry = ListEntry->Blink;
+ InsertHeadList(ListEntry, &Apc->ApcListEntry);
+ }
+
+ Apc->Inserted = TRUE;
+
+ //
+ // If the APC index from the APC object matches the APC Index of
+ // the thread, then check to determine if the APC should interrupt
+ // thread execution or sequence the thread out of a wait state.
+ //
+
+ if (Apc->ApcStateIndex == Thread->ApcStateIndex) {
+
+ //
+ // If the processor mode of the APC is kernel, then check if
+ // the APC should either interrupt the thread or sequence the
+ // thread out of a Waiting state. Else check if the APC should
+ // sequence the thread out of an alertable Waiting state.
+ //
+
+ if (ApcMode == KernelMode) {
+ Thread->ApcState.KernelApcPending = TRUE;
+ if (Thread->State == Running) {
+ KiRequestApcInterrupt(Thread->NextProcessor);
+
+ } else if ((Thread->State == Waiting) &&
+ (Thread->WaitIrql == 0) &&
+ ((Apc->NormalRoutine == NULL) ||
+ ((Thread->KernelApcDisable == 0) &&
+ (Thread->ApcState.KernelApcInProgress == FALSE)))) {
+ KiUnwaitThread(Thread, STATUS_KERNEL_APC, Increment);
+ }
+
+ } else if ((Thread->State == Waiting) &&
+ (Thread->WaitMode == UserMode) &&
+ (Thread->Alertable)) {
+ Thread->ApcState.UserApcPending = TRUE;
+ KiUnwaitThread(Thread, STATUS_USER_APC, Increment);
+ }
+ }
+
+ Inserted = TRUE;
+ }
+
+ //
+ // Unlock the APC queue lock, and return whether the APC object was
+ // inserted in an APC queue.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+ return Inserted;
+}
diff --git a/private/ntos/ke/balmgr.c b/private/ntos/ke/balmgr.c
new file mode 100644
index 000000000..92cf281ae
--- /dev/null
+++ b/private/ntos/ke/balmgr.c
@@ -0,0 +1,824 @@
+/*++
+
+Copyright (c) 1991-1994 Microsoft Corporation
+
+Module Name:
+
+ balmgr.c
+
+Abstract:
+
+ This module implements the NT balance set manager. Normally the kernel
+ does not contain "policy" code. However, the balance set manager needs
+ to be able to traverse the kernel data structures and, therefore, the
+ code has been located as logically part of the kernel.
+
+ The balance set manager performs the following operations:
+
+ 1. Makes the kernel stack of threads that have been waiting for a
+ certain amount of time, nonresident.
+
+ 2. Removes processes from the balance set when memory gets tight
+ and brings processes back into the balance set when there is
+ more memory available.
+
+ 3. Makes the kernel stack resident for threads whose wait has been
+ completed, but whose stack is nonresident.
+
+ 4. Arbitrarily boosts the priority of a selected set of threads
+ to prevent priority inversion in variable priority levels.
+
+ In general, the balance set manager only is active during periods when
+ memory is tight.
+
+Author:
+
+ David N. Cutler (davec) 13-Jul-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define balance set wait object types.
+//
+
+typedef enum _BALANCE_OBJECT {
+ TimerExpiration,
+ WorkingSetManagerEvent,
+ MaximumObject
+ } BALANCE_OBJECT;
+
+//
+// Define maximum number of thread stacks that can be out swapped in
+// a single time period.
+//
+
+#define MAXIMUM_THREAD_STACKS 20
+
+//
+// Define periodic wait interval value.
+//
+
+#define PERIODIC_INTERVAL (1 * 1000 * 1000 * 10)
+
+//
+// Define amount of time a thread can be in the ready state without having
+// is priority boosted (approximately 4 seconds).
+//
+
+#define READY_WITHOUT_RUNNING (4 * 75)
+
+//
+// Define kernel stack protect time. For small systems the protect time
+// is 3 seconds. For all other systems, the protect time is 7 seconds.
+//
+
+#define SMALL_SYSTEM_STACK_PROTECT_TIME (3 * 75)
+#define STACK_PROTECT_TIME (7 * 75)
+#define STACK_SCAN_PERIOD 4
+ULONG KiStackProtectTime;
+
+//
+// Define number of threads to scan each period and the priority boost bias.
+//
+
+#define THREAD_BOOST_BIAS 2
+#define THREAD_BOOST_PRIORITY (LOW_REALTIME_PRIORITY - THREAD_BOOST_BIAS)
+#define THREAD_SCAN_PRIORITY (THREAD_BOOST_PRIORITY - 1)
+#define THREAD_READY_COUNT 10
+#define THREAD_SCAN_COUNT 16
+
+//
+// Define local procedure prototypes.
+//
+
+VOID
+KiInSwapKernelStacks (
+ IN KIRQL PreviousIrql
+ );
+
+VOID
+KiInSwapProcesses (
+ IN KIRQL PreviousIrql
+ );
+
+VOID
+KiOutSwapKernelStacks (
+ IN KIRQL PreviousIrql
+ );
+
+VOID
+KiOutSwapProcesses (
+ IN KIRQL PreviousIrql
+ );
+
+VOID
+KiScanReadyQueues (
+ VOID
+ );
+
+//
+// Define thread table index static data.
+//
+
+ULONG KiReadyQueueIndex = 1;
+
+//
+// Define swap request flag.
+//
+
+BOOLEAN KiStackOutSwapRequest = FALSE;
+
+VOID
+KeBalanceSetManager (
+ IN PVOID Context
+ )
+
+/*++
+
+Routine Description:
+
+ This function is the startup code for the balance set manager. The
+ balance set manager thread is created during system initialization
+ and begins execution in this function.
+
+Arguments:
+
+ Context - Supplies a pointer to an arbitrary data structure (NULL).
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LARGE_INTEGER DueTime;
+ KTIMER PeriodTimer;
+ KIRQL OldIrql;
+ ULONG StackScanPeriod;
+ NTSTATUS Status;
+ KWAIT_BLOCK WaitBlockArray[MaximumObject];
+ PVOID WaitObjects[MaximumObject];
+
+ //
+ // Raise the thread priority to the lowest realtime level.
+ //
+
+ KeSetPriorityThread(KeGetCurrentThread(), LOW_REALTIME_PRIORITY);
+
+ //
+ // Initialize the periodic timer, set it to expire one period from
+ // now, and set the stack scan period.
+ //
+
+ KeInitializeTimer(&PeriodTimer);
+ DueTime.QuadPart = - PERIODIC_INTERVAL;
+ KeSetTimer(&PeriodTimer, DueTime, NULL);
+ StackScanPeriod = STACK_SCAN_PERIOD;
+
+ //
+ // Initialize the wait objects array.
+ //
+
+ WaitObjects[TimerExpiration] = (PVOID)&PeriodTimer;
+ WaitObjects[WorkingSetManagerEvent] = (PVOID)&MmWorkingSetManagerEvent;
+
+ //
+ // Loop forever processing balance set manager events.
+ //
+
+ do {
+
+ //
+ // Wait for a memory management memory low event, a swap event,
+ // or the expiration of the period timout rate that the balance
+ // set manager runs at.
+ //
+
+ Status = KeWaitForMultipleObjects(MaximumObject,
+ &WaitObjects[0],
+ WaitAny,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL,
+ &WaitBlockArray[0]);
+
+ //
+ // Switch on the wait status.
+ //
+
+ switch (Status) {
+
+ //
+ // Periodic timer expiration.
+ //
+
+ case TimerExpiration:
+
+ //
+ // Attempt to initiate outswaping of kernel stacks.
+ //
+
+ StackScanPeriod -= 1;
+ if (StackScanPeriod == 0) {
+ StackScanPeriod = STACK_SCAN_PERIOD;
+ KiLockDispatcherDatabase(&OldIrql);
+ if (KiStackOutSwapRequest == FALSE) {
+ KiStackOutSwapRequest = TRUE;
+ KiUnlockDispatcherDatabase(OldIrql);
+ KeSetEvent(&KiSwapEvent, 0, FALSE);
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+ }
+
+ //
+ // Adjust the depth of lookaside lists.
+ //
+
+ ExAdjustLookasideDepth();
+
+ //
+ // Scan ready queues and boost thread priorities as appropriate.
+ //
+
+ KiScanReadyQueues();
+
+ //
+ // Execute the virtual memory working set manager.
+ //
+
+ MmWorkingSetManager();
+
+ //
+ // Set the timer to expire at the next periodic interval.
+ //
+
+ KeSetTimer(&PeriodTimer, DueTime, NULL);
+ break;
+
+ //
+ // Working set manager event.
+ //
+
+ case WorkingSetManagerEvent:
+
+ //
+ // Call the working set manager to trim working sets.
+ //
+
+ MmWorkingSetManager();
+ break;
+
+ //
+ // Illegal return status.
+ //
+
+ default:
+ KdPrint(("BALMGR: Illegal wait status, %lx =\n", Status));
+ break;
+ }
+
+ } while (TRUE);
+ return;
+}
+
+VOID
+KeSwapProcessOrStack (
+ IN PVOID Context
+ )
+
+/*++
+
+Routine Description:
+
+ This thread controls the swapping of processes and kernel stacks. The
+ order of evaluation is:
+
+ Outswap kernel stacks
+ Outswap processes
+ Inswap processes
+ Inswap kernel stacks
+
+Arguments:
+
+ Context - Supplies a pointer to the routine context - not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ NTSTATUS Status;
+
+ //
+ // Raise the thread priority to the lowest realtime level + 7 (i.e.,
+ // priority 23).
+ //
+
+ KeSetPriorityThread(KeGetCurrentThread(), LOW_REALTIME_PRIORITY + 7);
+
+ if (MmQuerySystemSize() == MmSmallSystem) {
+ KiStackProtectTime = SMALL_SYSTEM_STACK_PROTECT_TIME;
+ } else {
+ KiStackProtectTime = STACK_PROTECT_TIME;
+ }
+
+ //
+ // Loop for ever processing swap events.
+ //
+
+ do {
+
+ //
+ // Wait for a swap event to occur.
+ //
+
+ Status = KeWaitForSingleObject(&KiSwapEvent,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Loop until all of the four possible actions cannot be initiated.
+ //
+
+ do {
+
+ //
+ // If a request has been made to out swap kernel stacks, then
+ // attempt to outswap kernel stacks. Otherwise, if the process
+ // out swap list is not empty, then initiate process outswapping.
+ // Otherwise, if the process inswap list is not empty, then start
+ // process inswapping. Otherwise, if the kernal stack inswap list
+ // is not active, then initiate kernel stack inswapping. Otherwise,
+ // no work is available.
+ //
+
+ if (KiStackOutSwapRequest != FALSE) {
+ KiStackOutSwapRequest = FALSE;
+ KiOutSwapKernelStacks(OldIrql);
+ continue;
+
+ } else if (IsListEmpty(&KiProcessOutSwapListHead) == FALSE) {
+ KiOutSwapProcesses(OldIrql);
+ continue;
+
+ } else if (IsListEmpty(&KiProcessInSwapListHead) == FALSE) {
+ KiInSwapProcesses(OldIrql);
+ continue;
+
+ } else if (IsListEmpty(&KiStackInSwapListHead) == FALSE) {
+ KiInSwapKernelStacks(OldIrql);
+ continue;
+
+ } else {
+ break;
+ }
+ } while (TRUE);
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ } while (TRUE);
+ return;
+}
+
+VOID
+KiInSwapKernelStacks (
+ IN KIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function in swaps the kernel stack for threads whose wait has been
+ completed and whose kernel stack is nonresident.
+
+ N.B. The dispatcher data lock is held on entry to this routine and must
+ be help on exit to this routine.
+
+Arguments:
+
+ PreviousIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ //
+ // Process the stack in swap list and for each thread removed from the
+ // list, make its kernel stack resident, and ready it for execution.
+ //
+
+ OldIrql = PreviousIrql;
+ NextEntry = KiStackInSwapListHead.Flink;
+ while (NextEntry != &KiStackInSwapListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+ RemoveEntryList(NextEntry);
+ KiUnlockDispatcherDatabase(OldIrql);
+ MmInPageKernelStack(Thread);
+ KiLockDispatcherDatabase(&OldIrql);
+ Thread->KernelStackResident = TRUE;
+ KiReadyThread(Thread);
+ NextEntry = KiStackInSwapListHead.Flink;
+ }
+
+ return;
+}
+
+VOID
+KiInSwapProcesses (
+ IN KIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function in swaps processes.
+
+ N.B. The dispatcher data lock is held on entry to this routine and must
+ be help on exit to this routine.
+
+Arguments:
+
+ PreviousIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+
+ //
+ // Process the process in swap list and for each process removed from
+ // the list, make the process resident, and process its ready list.
+ //
+
+ OldIrql = PreviousIrql;
+ NextEntry = KiProcessInSwapListHead.Flink;
+ while (NextEntry != &KiProcessInSwapListHead) {
+ Process = CONTAINING_RECORD(NextEntry, KPROCESS, SwapListEntry);
+ RemoveEntryList(NextEntry);
+ KiUnlockDispatcherDatabase(OldIrql);
+ MmInSwapProcess(Process);
+ KiLockDispatcherDatabase(&OldIrql);
+ Process->State = ProcessInMemory;
+ NextEntry = Process->ReadyListHead.Flink;
+ while (NextEntry != &Process->ReadyListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+ RemoveEntryList(NextEntry);
+ Thread->ProcessReadyQueue = FALSE;
+ KiReadyThread(Thread);
+ NextEntry = Process->ReadyListHead.Flink;
+ }
+
+ NextEntry = KiProcessInSwapListHead.Flink;
+ }
+
+ return;
+}
+
+VOID
+KiOutSwapKernelStacks (
+ IN KIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function attempts to out swap the kernel stack for threads whose
+ wait mode is user and which have been waiting longer than the stack
+ protect time.
+
+ N.B. The dispatcher data lock is held on entry to this routine and must
+ be help on exit to this routine.
+
+Arguments:
+
+ PreviousIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG CurrentTick;
+ PLIST_ENTRY NextEntry;
+ ULONG NumberOfThreads;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ PKTHREAD ThreadObjects[MAXIMUM_THREAD_STACKS];
+ ULONG WaitTime;
+
+ //
+ // Scan the waiting in list and check if the wait time exceeds the
+ // stack protect time. If the protect time is exceeded, then make
+ // the kernel stack of the waiting thread nonresident. If the count
+ // of the number of stacks that are resident for the process reaches
+ // zero, then insert the process in the outswap list and set its state
+ // to transition.
+ //
+
+ CurrentTick = KiQueryLowTickCount();
+ OldIrql = PreviousIrql;
+ NextEntry = KiWaitInListHead.Flink;
+ NumberOfThreads = 0;
+ while ((NextEntry != &KiWaitInListHead) &&
+ (NumberOfThreads < MAXIMUM_THREAD_STACKS)) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+
+ ASSERT(Thread->WaitMode == UserMode);
+
+ NextEntry = NextEntry->Flink;
+ WaitTime = CurrentTick - Thread->WaitTime;
+ if ((WaitTime >= KiStackProtectTime) &&
+ KiIsThreadNumericStateSaved(Thread)) {
+ Thread->KernelStackResident = FALSE;
+ ThreadObjects[NumberOfThreads] = Thread;
+ NumberOfThreads += 1;
+ RemoveEntryList(&Thread->WaitListEntry);
+ InsertTailList(&KiWaitOutListHead, &Thread->WaitListEntry);
+ Process = Thread->ApcState.Process;
+ Process->StackCount -= 1;
+ if (Process->StackCount == 0) {
+ Process->State = ProcessInTransition;
+ InsertTailList(&KiProcessOutSwapListHead,
+ &Process->SwapListEntry);
+ }
+ }
+ }
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Out swap the kernels stack for the selected set of threads.
+ //
+
+ while (NumberOfThreads > 0) {
+ NumberOfThreads -= 1;
+ Thread = ThreadObjects[NumberOfThreads];
+ MmOutPageKernelStack(Thread);
+ }
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ return;
+}
+
+VOID
+KiOutSwapProcesses (
+ IN KIRQL PreviousIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function out swaps processes.
+
+ N.B. The dispatcher data lock is held on entry to this routine and must
+ be help on exit to this routine.
+
+Arguments:
+
+ PreviousIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+
+ //
+ // Process the process out swap list and for each process removed from
+ // the list, make the process nonresident, and process its ready list.
+ //
+
+ OldIrql = PreviousIrql;
+ NextEntry = KiProcessOutSwapListHead.Flink;
+ while (NextEntry != &KiProcessOutSwapListHead) {
+ Process = CONTAINING_RECORD(NextEntry, KPROCESS, SwapListEntry);
+ RemoveEntryList(NextEntry);
+ NextEntry = Process->ReadyListHead.Flink;
+ if (NextEntry != &Process->ReadyListHead) {
+ Process->State = ProcessInMemory;
+ while (NextEntry != &Process->ReadyListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+ RemoveEntryList(NextEntry);
+ Thread->ProcessReadyQueue = FALSE;
+ KiReadyThread(Thread);
+ NextEntry = Process->ReadyListHead.Flink;
+ }
+
+ } else {
+ Process->State = ProcessOutOfMemory;
+ KiUnlockDispatcherDatabase(OldIrql);
+ MmOutSwapProcess(Process);
+ KiLockDispatcherDatabase(&OldIrql);
+ }
+
+ NextEntry = KiProcessOutSwapListHead.Flink;
+ }
+
+ return;
+}
+
+VOID
+KiScanReadyQueues (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function scans a section of the ready queues and attempts to
+ boost the priority of threads that run at variable priority levels.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Count;
+ ULONG CurrentTick;
+ PLIST_ENTRY Entry;
+ ULONG Index;
+ PLIST_ENTRY ListHead;
+ ULONG Number;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ ULONG Summary;
+ PKTHREAD Thread;
+ ULONG WaitTime;
+
+ //
+ // Lock the dispatcher database and check if there are any ready threads
+ // queued at the scannable priority levels.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ Summary = KiReadySummary & ((1 << THREAD_BOOST_PRIORITY) - 2);
+ if (Summary != 0) {
+ Count = THREAD_READY_COUNT;
+ CurrentTick = KiQueryLowTickCount();
+ Index = KiReadyQueueIndex;
+ Number = THREAD_SCAN_COUNT;
+ do {
+
+ //
+ // If the current ready queue index is beyond the end of the range
+ // of priorities that are scanned, then wrap back to the beginning
+ // priority.
+ //
+
+ if (Index > THREAD_SCAN_PRIORITY) {
+ Index = 1;
+ }
+
+ //
+ // If there are any ready threads queued at the current priority
+ // level, then attempt to boost the thread priority.
+ //
+
+ if (((Summary >> Index) & 1) != 0) {
+ Summary ^= (1 << Index);
+ ListHead = &KiDispatcherReadyListHead[Index];
+ Entry = ListHead->Flink;
+
+ ASSERT(Entry != ListHead);
+
+ do {
+ Thread = CONTAINING_RECORD(Entry, KTHREAD, WaitListEntry);
+
+ //
+ // If the thread has been waiting for an extended period
+ // and is not currently running with a boost, then boost
+ // the priority of the current thread.
+ //
+
+ WaitTime = CurrentTick - Thread->WaitTime;
+ if ((WaitTime >= READY_WITHOUT_RUNNING) &&
+ (Thread->PriorityDecrement == 0)) {
+
+ //
+ // Remove the thread from the respective ready queue.
+ //
+
+ Entry = Entry->Blink;
+ RemoveEntryList(Entry->Flink);
+ if (IsListEmpty(ListHead) != FALSE) {
+ ClearMember(Index, KiReadySummary);
+ }
+
+ //
+ // Compute the priority decrement value, set the new
+ // thread priority, set the decrement count, set the
+ // thread quantum, and ready the thread for execution.
+ //
+
+ Thread->PriorityDecrement =
+ THREAD_BOOST_PRIORITY - Thread->Priority;
+
+ Thread->DecrementCount = ROUND_TRIP_DECREMENT_COUNT;
+ Thread->Priority = THREAD_BOOST_PRIORITY;
+ Process = Thread->ApcState.Process;
+ Thread->Quantum = Process->ThreadQuantum * 2;
+ KiReadyThread(Thread);
+ Count -= 1;
+ }
+
+ Entry = Entry->Flink;
+ Number -= 1;
+ } while ((Entry != ListHead) & (Number != 0) & (Count != 0));
+ }
+
+ Index += 1;
+ } while ((Summary != 0) & (Number != 0) & (Count != 0));
+ }
+
+ //
+ // Unlock the dispatcher database and save the last read queue index
+ // for the next scan.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ if ((Count != 0) && (Number != 0)) {
+ KiReadyQueueIndex = 1;
+
+ } else {
+ KiReadyQueueIndex = Index;
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/bugcheck.c b/private/ntos/ke/bugcheck.c
new file mode 100644
index 000000000..b820f75e0
--- /dev/null
+++ b/private/ntos/ke/bugcheck.c
@@ -0,0 +1,728 @@
+/*++
+
+Copyright (c) 1989-1993 Microsoft Corporation
+
+Module Name:
+
+ stubs.c
+
+Abstract:
+
+ This module implements bug check and system shutdown code.
+
+Author:
+
+ Mark Lucovsky (markl) 30-Aug-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiScanBugCheckCallbackList (
+ VOID
+ );
+
+//
+// Define bug count recursion counter and a context buffer.
+//
+
+ULONG KeBugCheckCount = 1;
+
+
+VOID
+KeBugCheck (
+ IN ULONG BugCheckCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function crashes the system in a controlled manner.
+
+Arguments:
+
+ BugCheckCode - Supplies the reason for the bug check.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ KeBugCheckEx(BugCheckCode,0,0,0,0);
+}
+
+ULONG KiBugCheckData[5];
+
+BOOLEAN
+KeGetBugMessageText(
+ IN ULONG MessageId,
+ IN PANSI_STRING ReturnedString OPTIONAL
+ )
+{
+ ULONG i;
+ PUCHAR s;
+ PMESSAGE_RESOURCE_BLOCK MessageBlock;
+ PUCHAR Buffer;
+ BOOLEAN Result;
+
+ Result = FALSE;
+ try {
+ if (KiBugCodeMessages != NULL) {
+ MessageBlock = &KiBugCodeMessages->Blocks[0];
+ for (i = KiBugCodeMessages->NumberOfBlocks; i; i--) {
+ if (MessageId >= MessageBlock->LowId &&
+ MessageId <= MessageBlock->HighId) {
+
+ s = (PCHAR)KiBugCodeMessages + MessageBlock->OffsetToEntries;
+ for (i = MessageId - MessageBlock->LowId; i; i--) {
+ s += ((PMESSAGE_RESOURCE_ENTRY)s)->Length;
+ }
+
+ Buffer = ((PMESSAGE_RESOURCE_ENTRY)s)->Text;
+
+ i = strlen(Buffer) - 1;
+ while (i > 0 && (Buffer[i] == '\n' ||
+ Buffer[i] == '\r' ||
+ Buffer[i] == 0
+ )
+ ) {
+ if (!ARGUMENT_PRESENT( ReturnedString )) {
+ Buffer[i] = 0;
+ }
+ i -= 1;
+ }
+
+ if (!ARGUMENT_PRESENT( ReturnedString )) {
+ HalDisplayString(Buffer);
+ }
+ else {
+ ReturnedString->Buffer = Buffer;
+ ReturnedString->Length = (USHORT)(i+1);
+ ReturnedString->MaximumLength = (USHORT)(i+1);
+ }
+ Result = TRUE;
+ break;
+ }
+ MessageBlock++;
+ }
+ }
+ } except ( EXCEPTION_EXECUTE_HANDLER ) {
+ ;
+ }
+
+ return Result;
+}
+
+
+
+PCHAR
+KeBugCheckUnicodeToAnsi(
+ IN PUNICODE_STRING UnicodeString,
+ OUT PCHAR AnsiBuffer,
+ IN ULONG MaxAnsiLength
+ )
+{
+ PCHAR Dst;
+ PWSTR Src;
+ ULONG Length;
+
+ Length = UnicodeString->Length / sizeof( WCHAR );
+ if (Length >= MaxAnsiLength) {
+ Length = MaxAnsiLength - 1;
+ }
+ Src = UnicodeString->Buffer;
+ Dst = AnsiBuffer;
+ while (Length--) {
+ *Dst++ = (UCHAR)*Src++;
+ }
+ *Dst = '\0';
+ return AnsiBuffer;
+}
+
+
+VOID
+KeBugCheckEx (
+ IN ULONG BugCheckCode,
+ IN ULONG BugCheckParameter1,
+ IN ULONG BugCheckParameter2,
+ IN ULONG BugCheckParameter3,
+ IN ULONG BugCheckParameter4
+ )
+
+/*++
+
+Routine Description:
+
+ This function crashes the system in a controlled manner.
+
+Arguments:
+
+ BugCheckCode - Supplies the reason for the bug check.
+
+ BugCheckParameter1-4 - Supplies additional bug check information
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ UCHAR Buffer[100];
+ ULONG BugCheckParameters[4];
+ CONTEXT ContextSave;
+#if !defined(i386)
+ KIRQL OldIrql;
+#endif
+
+#if !defined(NT_UP)
+
+ ULONG TargetSet;
+
+#endif
+ BOOLEAN hardErrorCalled;
+
+ //
+ // Capture the callers context as closely as possible into the debugger's
+ // processor state area of the Prcb
+ //
+ // N.B. There may be some prologue code that shuffles registers such that
+ // they get destroyed.
+ //
+
+#if defined(i386)
+ KiSetHardwareTrigger();
+#else
+ KiHardwareTrigger = 1;
+#endif
+
+ RtlCaptureContext(&KeGetCurrentPrcb()->ProcessorState.ContextFrame);
+ KiSaveProcessorControlState(&KeGetCurrentPrcb()->ProcessorState);
+
+ //
+ // this is necessary on machines where the
+ // virtual unwind that happens during KeDumpMachineState()
+ // destroys the context record
+ //
+
+ ContextSave = KeGetCurrentPrcb()->ProcessorState.ContextFrame;
+
+ //
+ // if we are called by hard error then we don't want to dump the
+ // processor state on the machine.
+ //
+ // We know that we are called by hard error because the bug check
+ // code will be FATAL_UNHANDLED_HARD_ERROR. If this is so then the
+ // error status passed to harderr is the second parameter, and a pointer
+ // to the parameter array from hard error is passed as the third
+ // argument.
+ //
+
+ if (BugCheckCode == FATAL_UNHANDLED_HARD_ERROR) {
+
+ PULONG parameterArray;
+
+ hardErrorCalled = TRUE;
+
+ parameterArray = (PULONG)BugCheckParameter2;
+ BugCheckCode = BugCheckParameter1;
+ BugCheckParameter1 = parameterArray[0];
+ BugCheckParameter2 = parameterArray[1];
+ BugCheckParameter3 = parameterArray[2];
+ BugCheckParameter4 = parameterArray[3];
+
+
+ } else {
+
+ hardErrorCalled = FALSE;
+
+ }
+
+ KiBugCheckData[0] = BugCheckCode;
+ KiBugCheckData[1] = BugCheckParameter1;
+ KiBugCheckData[2] = BugCheckParameter2;
+ KiBugCheckData[3] = BugCheckParameter3;
+ KiBugCheckData[4] = BugCheckParameter4;
+
+ BugCheckParameters[0] = BugCheckParameter1;
+ BugCheckParameters[1] = BugCheckParameter2;
+ BugCheckParameters[2] = BugCheckParameter3;
+ BugCheckParameters[3] = BugCheckParameter4;
+
+#if DBG
+
+ //
+ // Don't clear screen if debugger is available.
+ //
+
+ if (KdDebuggerEnabled != FALSE) {
+ try {
+ DbgPrint("\n*** Fatal System Error: 0x%08lX (0x%08lX,0x%08lX,0x%08lX,0x%08lX)\n\n",
+ BugCheckCode,
+ BugCheckParameter1,
+ BugCheckParameter2,
+ BugCheckParameter3,
+ BugCheckParameter4
+ );
+ DbgBreakPointWithStatus(DBG_STATUS_BUGCHECK_FIRST);
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ for (;;) {
+ }
+ }
+ }
+
+#endif //DBG
+
+ //
+ // Freeze execution of the system by disabling interrupts and looping
+ //
+
+ KiDisableInterrupts();
+
+#if !defined(i386)
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+#endif
+
+ //
+ // Don't attempt to display message more than once.
+ //
+
+ if (InterlockedDecrement (&KeBugCheckCount) == 0) {
+
+#if !defined(NT_UP)
+
+ //
+ // Attempt to get the other processors frozen now, but don't wait
+ // for them to freeze (in case someone is stuck)
+ //
+
+ TargetSet = KeActiveProcessors & ~KeGetCurrentPrcb()->SetMember;
+ if (TargetSet != 0) {
+ KiIpiSend((KAFFINITY) TargetSet, IPI_FREEZE);
+
+ //
+ // Give the other processors one second to flush their data caches.
+ //
+ // N.B. This cannot be synchronized since the reason for the bug
+ // may be one of the other processors failed.
+ //
+
+ KeStallExecutionProcessor(1000 * 1000);
+ }
+
+#endif
+
+ if (!hardErrorCalled) {
+ sprintf((char *)Buffer,
+ "\n*** STOP: 0x%08lX (0x%08lX,0x%08lX,0x%08lX,0x%08lX)\n",
+ BugCheckCode,
+ BugCheckParameter1,
+ BugCheckParameter2,
+ BugCheckParameter3,
+ BugCheckParameter4
+ );
+
+ HalDisplayString((char *)Buffer);
+ KeGetBugMessageText(BugCheckCode, NULL);
+ }
+
+ //
+ // Process the bug check callback list.
+ //
+
+ KiScanBugCheckCallbackList();
+
+ //
+ // If the debugger is not enabled, then dump the machine state and
+ // attempt to enable the debbugger.
+ //
+
+ if (!hardErrorCalled) {
+
+ KeDumpMachineState(
+ &KeGetCurrentPrcb()->ProcessorState,
+ (char *)Buffer,
+ BugCheckParameters,
+ 4,
+ KeBugCheckUnicodeToAnsi);
+
+ }
+
+ if (KdDebuggerEnabled == FALSE && KdPitchDebugger == FALSE ) {
+ KdInitSystem(NULL, FALSE);
+
+ } else {
+ HalDisplayString("\n");
+ }
+
+ //
+ // Write a crash dump and optionally reboot if the system has been
+ // so configured.
+ //
+
+ KeGetCurrentPrcb()->ProcessorState.ContextFrame = ContextSave;
+ if (!IoWriteCrashDump(BugCheckCode,
+ BugCheckParameter1,
+ BugCheckParameter2,
+ BugCheckParameter3,
+ BugCheckParameter4,
+ &ContextSave
+ )) {
+ //
+ // If no crashdump take, display the PSS message
+ //
+
+ KeGetBugMessageText(BUGCODE_PSS_MESSAGE, NULL);
+ }
+ }
+
+ //
+ // Attempt to enter the kernel debugger.
+ //
+
+ while(TRUE) {
+ try {
+ DbgBreakPointWithStatus(DBG_STATUS_BUGCHECK_SECOND);
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ for (;;) {
+ }
+ }
+ };
+
+ return;
+}
+
+VOID
+KeEnterKernelDebugger (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function crashes the system in a controlled manner attempting
+ to invoke the kernel debugger.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#if !defined(i386)
+ KIRQL OldIrql;
+#endif
+
+ //
+ // Freeze execution of the system by disabling interrupts and looping
+ //
+
+ KiHardwareTrigger = 1;
+ KiDisableInterrupts();
+#if !defined(i386)
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+#endif
+ if (InterlockedDecrement (&KeBugCheckCount) == 0) {
+ if (KdDebuggerEnabled == FALSE) {
+ if ( KdPitchDebugger == FALSE ) {
+ KdInitSystem(NULL, FALSE);
+ }
+ }
+ }
+
+ while(TRUE) {
+ try {
+ DbgBreakPointWithStatus(DBG_STATUS_FATAL);
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ for (;;) {
+ }
+ }
+ };
+}
+
+NTKERNELAPI
+BOOLEAN
+KeDeregisterBugCheckCallback (
+ IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord
+ )
+
+/*++
+
+Routine Description:
+
+ This function deregisters a bug check callback record.
+
+Arguments:
+
+ CallbackRecord - Supplies a pointer to a bug check callback record.
+
+Return Value:
+
+ If the specified bug check callback record is successfully deregistered,
+ then a value of TRUE is returned. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Deregister;
+ KIRQL OldIrql;
+
+ //
+ // Raise IRQL to HIGH_LEVEL and acquire the bug check callback list
+ // spinlock.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+ KiAcquireSpinLock(&KeBugCheckCallbackLock);
+
+ //
+ // If the specified callback record is currently registered, then
+ // deregister the callback record.
+ //
+
+ Deregister = FALSE;
+ if (CallbackRecord->State == BufferInserted) {
+ CallbackRecord->State = BufferEmpty;
+ RemoveEntryList(&CallbackRecord->Entry);
+ Deregister = TRUE;
+ }
+
+ //
+ // Release the bug check callback spinlock, lower IRQL to its previous
+ // value, and return whether the callback record was successfully
+ // deregistered.
+ //
+
+ KiReleaseSpinLock(&KeBugCheckCallbackLock);
+ KeLowerIrql(OldIrql);
+ return Deregister;
+}
+
+NTKERNELAPI
+BOOLEAN
+KeRegisterBugCheckCallback (
+ IN PKBUGCHECK_CALLBACK_RECORD CallbackRecord,
+ IN PKBUGCHECK_CALLBACK_ROUTINE CallbackRoutine,
+ IN PVOID Buffer,
+ IN ULONG Length,
+ IN PUCHAR Component
+ )
+
+/*++
+
+Routine Description:
+
+ This function registers a bug check callback record. If the system
+ crashes, then the specified function will be called during bug check
+ processing so it may dump additional state in the specified bug check
+ buffer.
+
+ N.B. Bug check callback routines are called in reverse order of
+ registration, i.e., in LIFO order.
+
+Arguments:
+
+ CallbackRecord - Supplies a pointer to a callback record.
+
+ CallbackRoutine - Supplies a pointer to the callback routine.
+
+ Buffer - Supplies a pointer to the bug check buffer.
+
+ Length - Supplies the length of the bug check buffer in bytes.
+
+ Component - Supplies a pointer to a zero terminated component
+ identifier.
+
+Return Value:
+
+ If the specified bug check callback record is successfully registered,
+ then a value of TRUE is returned. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+ KIRQL OldIrql;
+
+ //
+ // Raise IRQL to HIGH_LEVEL and acquire the bug check callback list
+ // spinlock.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+ KiAcquireSpinLock(&KeBugCheckCallbackLock);
+
+ //
+ // If the specified callback record is currently not registered, then
+ // register the callback record.
+ //
+
+ Inserted = FALSE;
+ if (CallbackRecord->State == BufferEmpty) {
+ CallbackRecord->CallbackRoutine = CallbackRoutine;
+ CallbackRecord->Buffer = Buffer;
+ CallbackRecord->Length = Length;
+ CallbackRecord->Component = Component;
+ CallbackRecord->Checksum =
+ (ULONG)CallbackRoutine + (ULONG)Buffer + Length + (ULONG)Component;
+
+ CallbackRecord->State = BufferInserted;
+ InsertHeadList(&KeBugCheckCallbackListHead, &CallbackRecord->Entry);
+ Inserted = TRUE;
+ }
+
+ //
+ // Release the bug check callback spinlock, lower IRQL to its previous
+ // value, and return whether the callback record was successfully
+ // registered.
+ //
+
+ KiReleaseSpinLock(&KeBugCheckCallbackLock);
+ KeLowerIrql(OldIrql);
+ return Inserted;
+}
+
+VOID
+KiScanBugCheckCallbackList (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function scans the bug check callback list and calls each bug
+ check callback routine so it can dump component specific information
+ that may identify the cause of the bug check.
+
+ N.B. The scan of the bug check callback list is performed VERY
+ carefully. Bug check callback routines are called at HIGH_LEVEL
+ and may not acquire ANY resources.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKBUGCHECK_CALLBACK_RECORD CallbackRecord;
+ ULONG Checksum;
+ ULONG Index;
+ PLIST_ENTRY LastEntry;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ PUCHAR Source;
+
+ //
+ // If the bug check callback listhead is not initialized, then the
+ // bug check has occured before the system has gotten far enough
+ // in the initialization code to enable anyone to register a callback.
+ //
+
+ ListHead = &KeBugCheckCallbackListHead;
+ if ((ListHead->Flink != NULL) && (ListHead->Blink != NULL)) {
+
+ //
+ // Scan the bug check callback list.
+ //
+
+ LastEntry = ListHead;
+ NextEntry = ListHead->Flink;
+ while (NextEntry != ListHead) {
+
+ //
+ // The next entry address must be aligned properly, the
+ // callback record must be readable, and the callback record
+ // must have back link to the last entry.
+ //
+
+ if (((ULONG)NextEntry & (sizeof(ULONG) - 1)) != 0) {
+ return;
+
+ } else {
+ CallbackRecord = CONTAINING_RECORD(NextEntry,
+ KBUGCHECK_CALLBACK_RECORD,
+ Entry);
+
+ Source = (PUCHAR)CallbackRecord;
+ for (Index = 0; Index < sizeof(KBUGCHECK_CALLBACK_RECORD); Index += 1) {
+ if (MmDbgReadCheck((PVOID)Source) == NULL) {
+ return;
+ }
+
+ Source += 1;
+ }
+
+ if (CallbackRecord->Entry.Blink != LastEntry) {
+ return;
+ }
+
+ //
+ // If the callback record has a state of inserted and the
+ // computed checksum matches the callback record checksum,
+ // then call the specified bug check callback routine.
+ //
+
+ Checksum = (ULONG)CallbackRecord->CallbackRoutine;
+ Checksum += (ULONG)CallbackRecord->Buffer;
+ Checksum += CallbackRecord->Length;
+ Checksum += (ULONG)CallbackRecord->Component;
+ if ((CallbackRecord->State == BufferInserted) &&
+ (CallbackRecord->Checksum == Checksum)) {
+
+ //
+ // Call the specified bug check callback routine and
+ // handle any exceptions that occur.
+ //
+
+ CallbackRecord->State = BufferStarted;
+ try {
+ (CallbackRecord->CallbackRoutine)(CallbackRecord->Buffer,
+ CallbackRecord->Length);
+
+ CallbackRecord->State = BufferFinished;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ CallbackRecord->State = BufferIncomplete;
+ }
+ }
+ }
+
+ LastEntry = NextEntry;
+ NextEntry = NextEntry->Flink;
+ }
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/channel.c b/private/ntos/ke/channel.c
new file mode 100644
index 000000000..f671d2cdf
--- /dev/null
+++ b/private/ntos/ke/channel.c
@@ -0,0 +1,1868 @@
+/*++
+
+Copyright (c) 1995 Microsoft Corporation
+
+Module Name:
+
+ channel.c
+
+Abstract:
+
+ This module implements the executive channel object. Channel obects
+ provide a very high speed local interprocess communication mechanism.
+
+Author:
+
+ David N. Cutler (davec) 26-Mar-1995
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define local function prototypes.
+//
+
+VOID
+KiAllocateReceiveBufferChannel (
+ VOID
+ );
+
+VOID
+KiCloseChannel (
+ IN PEPROCESS Process,
+ IN PVOID Object,
+ IN ACCESS_MASK GrantedAccess,
+ IN ULONG ProcessHandleCount,
+ IN ULONG SystemHandleCount
+ );
+
+VOID
+KiDeleteChannel (
+ IN PVOID Object
+ );
+
+NTSTATUS
+KiListenChannel (
+ IN PRECHANNEL ServerChannel,
+ IN KPROCESSOR_MODE WaitMode,
+ OUT PCHANNEL_MESSAGE *Message
+ );
+
+PKTHREAD
+KiRendezvousWithThread (
+ IN PRECHANNEL WaitChannel,
+ IN ULONG WaitMode
+ );
+
+//
+// Address of event object type descriptor.
+//
+
+POBJECT_TYPE KeChannelType;
+
+//
+// Structure that describes the mapping of generic access rights to object
+// specific access rights for event objects.
+//
+
+GENERIC_MAPPING KiChannelMapping = {
+ STANDARD_RIGHTS_READ |
+ CHANNEL_READ_MESSAGE,
+ STANDARD_RIGHTS_WRITE |
+ CHANNEL_WRITE_MESSAGE,
+ STANDARD_RIGHTS_EXECUTE |
+ SYNCHRONIZE,
+ CHANNEL_ALL_ACCESS
+};
+
+//
+// Define function sections.
+//
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, KiAllocateReceiveBufferChannel)
+#pragma alloc_text(INIT, KiChannelInitialization)
+#pragma alloc_text(PAGE, KiDeleteChannel)
+#pragma alloc_text(PAGE, KiRundownChannel)
+#pragma alloc_text(PAGE, NtCreateChannel)
+#pragma alloc_text(PAGE, NtListenChannel)
+#pragma alloc_text(PAGE, NtOpenChannel)
+#pragma alloc_text(PAGE, NtSetContextChannel)
+#endif
+
+NTSTATUS
+NtCreateChannel (
+ OUT PHANDLE ChannelHandle,
+ IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates a server listen channel object and opens a handle
+ to the object with the specified desired access.
+
+Arguments:
+
+ ChannelHandle - Supplies a pointer to a variable that will receive the
+ channel object handle.
+
+ ObjectAttributes - Supplies a pointer to an object attributes structure.
+
+Return Value:
+
+ If the channel object is created, then a success status is returned.
+ Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ PVOID ChannelObject;
+ KPROCESSOR_MODE PreviousMode;
+ PRECHANNEL ServerChannel;
+ HANDLE ServerHandle;
+ NTSTATUS Status;
+
+ //
+ // Establish an exception handler, probe and zero the output handle
+ // address, and attempt to create a channel object. If the probe fails
+ // or access to the object attributes fails, then return the exception
+ // code as the service status.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ try {
+
+ //
+ // Get previous processor mode and probe output handle address if
+ // necessary.
+ //
+
+ if (PreviousMode != KernelMode) {
+ ProbeAndZeroHandle(ChannelHandle);
+ }
+
+ //
+ // Allocate channel object.
+ //
+
+ Status = ObCreateObject(PreviousMode,
+ KeChannelType,
+ ObjectAttributes,
+ PreviousMode,
+ NULL,
+ sizeof(ECHANNEL),
+ 0,
+ 0,
+ &ChannelObject);
+
+ } except(ExSystemExceptionFilter()) {
+ return GetExceptionCode();
+ }
+
+ //
+ // If the channel object was successfully created, then initialize the
+ // channel object and insert the channel object in the process handle
+ // table.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ ServerChannel = (PRECHANNEL)ChannelObject;
+ ServerChannel->Type = LISTEN_CHANNEL;
+ ServerChannel->State = ServerIdle;
+ ServerChannel->OwnerProcess = &PsGetCurrentProcess()->Pcb;
+ ServerChannel->ClientThread = NULL;
+ ServerChannel->ServerThread = NULL;
+ ServerChannel->ServerContext = NULL;
+ ServerChannel->ServerChannel = NULL;
+ KeInitializeEvent(&ServerChannel->ReceiveEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ KeInitializeEvent(&ServerChannel->ClearToSendEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ Status = ObInsertObject(ServerChannel,
+ NULL,
+ CHANNEL_ALL_ACCESS,
+ 0,
+ NULL,
+ &ServerHandle);
+
+ //
+ // If the channel object was successfully inserted in the process
+ // handle table, then attempt to write the channel object handle
+ // value. If the write attempt fails, then do not report an error.
+ // When the caller attempts to access the handle value, an access
+ // violation will occur.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *ChannelHandle = ServerHandle;
+
+ } except(ExSystemExceptionFilter()) {
+ }
+ }
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+NTSTATUS
+NtListenChannel (
+ IN HANDLE ChannelHandle,
+ OUT PCHANNEL_MESSAGE *Message
+ )
+
+/*++
+
+Routine Description:
+
+ This function listens for a client message.
+
+ N.B. This function can only be executed from a server thread.
+
+Arguments:
+
+ ChannelHandle - Supplies a handle to a listen channel on which the
+ server thread listens.
+
+ Message - Supplies a pointer to a variable that receives a pointer
+ to the client message header.
+
+Return Value:
+
+ If the function is successfully completed, then a success status is
+ returned. Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ KPROCESSOR_MODE PreviousMode;
+ PRECHANNEL ServerChannel;
+ PRKTHREAD ServerThread;
+ NTSTATUS Status;
+
+ //
+ // Establish an exception handler, probe the output message address,
+ // and allocate a receive buffer if necessary. If the probe fails or
+ // the receive buffer allocation is not successful, then return the
+ // exception code as the service status.
+ //
+
+ ServerThread = KeGetCurrentThread();
+ try {
+
+ //
+ // Get previous processor mode and probe output message address.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeAndNullPointer(Message);
+ }
+
+ //
+ // If the current thread does not have an associated receive buffer,
+ // then attempt to allocate one now. If the allocation fails, then
+ // an exception is raised.
+ //
+
+ if (ServerThread->Section == NULL) {
+ KiAllocateReceiveBufferChannel();
+ }
+
+ } except(ExSystemExceptionFilter()) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Reference channel object by handle.
+ //
+
+ Status = ObReferenceObjectByHandle(ChannelHandle,
+ CHANNEL_ALL_ACCESS,
+ KeChannelType,
+ PreviousMode,
+ &ServerChannel,
+ NULL);
+
+ //
+ // If the reference was successful and the channel is a listen channel,
+ // then wait for a client message to arrive.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ if (ServerChannel->ServerChannel != NULL) {
+ Status = STATUS_INVALID_PARAMETER; // **** fix ****
+
+ } else {
+ Status = KiListenChannel(ServerChannel, PreviousMode, Message);
+ }
+
+ ObDereferenceObject(ServerChannel);
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+NTSTATUS
+NtOpenChannel (
+ OUT PHANDLE ChannelHandle,
+ IN POBJECT_ATTRIBUTES ObjectAttributes
+ )
+
+/*++
+
+Routine Description:
+
+ This function opens a handle to a server channel by creating a message
+ channel that is connected to the specified server channel.
+
+Arguments:
+
+ ChannelHandle - Supplies a pointer to a variable that will receive the
+ channel object handle.
+
+ ObjectAttributes - Supplies a pointer to an object attributes structure.
+
+Return Value:
+
+ If the channel object is opened, then a success status is returned.
+ Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ PRECHANNEL ClientChannel;
+ HANDLE ClientHandle;
+ PKTHREAD ClientThread;
+ KPROCESSOR_MODE PreviousMode;
+ PRECHANNEL ServerChannel;
+ PVOID ServerObject;
+ NTSTATUS Status;
+
+ //
+ // Establish an exception handler, probe and zero the output handle
+ // address, and attempt to open the server channel object. If the
+ // probe fails, then return the exception code as the service status.
+ //
+
+ try {
+
+ //
+ // Get previous processor mode and probe output handle address
+ // if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeAndZeroHandle(ChannelHandle);
+ }
+
+ //
+ // Reference the server channel object with the specified desired
+ // access.
+ //
+
+ Status = ObReferenceObjectByName(ObjectAttributes->ObjectName,
+ ObjectAttributes->Attributes,
+ NULL,
+ CHANNEL_ALL_ACCESS,
+ KeChannelType,
+ PreviousMode,
+ NULL,
+ (PVOID *)&ServerObject);
+
+ } except(ExSystemExceptionFilter()) {
+ return GetExceptionCode();
+ }
+
+ //
+ // If the reference was successful, then attempt to create a client
+ // channel object.
+ //
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // If the owner process of the server channel is the same as
+ // the current process, then a server thread is attempting to
+ // open a client handle. This is not allowed since it would
+ // not be possible to distinguish the server from the cient.
+ //
+
+ ClientThread = KeGetCurrentThread();
+ ServerChannel = (PECHANNEL)ServerObject;
+ if (ServerChannel->OwnerProcess == ClientThread->ApcState.Process) {
+ Status = STATUS_INVALID_PARAMETER; // **** fix ***
+
+ } else {
+ Status = ObCreateObject(PreviousMode,
+ KeChannelType,
+ NULL,
+ PreviousMode,
+ NULL,
+ sizeof(ECHANNEL),
+ 0,
+ 0,
+ (PVOID *)&ClientChannel);
+
+ //
+ // If the channel object was successfully created, then
+ // initialize the channel object and attempt to insert the
+ // channel object in the server process channel table.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ ClientChannel->Type = MESSAGE_CHANNEL;
+ ClientChannel->State = ClientIdle;
+ ClientChannel->OwnerProcess = &PsGetCurrentProcess()->Pcb;
+ ClientChannel->ClientThread = NULL;
+ ClientChannel->ServerThread = NULL;
+ ClientChannel->ServerContext = NULL;
+ ClientChannel->ServerChannel = ServerChannel;
+ KeInitializeEvent(&ClientChannel->ReceiveEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ KeInitializeEvent(&ClientChannel->ClearToSendEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ //
+ // Create a handle to the message channel object.
+ //
+
+ Status = ObInsertObject(ClientChannel,
+ NULL,
+ CHANNEL_ALL_ACCESS,
+ 0,
+ NULL,
+ &ClientHandle);
+
+ //
+ // If the channel object was successfully inserted in the
+ // client process handle table, then attempt to write the
+ // client channel object handle value. If the write attempt
+ // fails, then do not report an error. When the caller
+ // attempts to access the handle value, an access violation
+ // will occur.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *ChannelHandle = ClientHandle;
+
+ } except(ExSystemExceptionFilter()) {
+ }
+
+ }
+
+ return Status;
+ }
+ }
+
+ ObDereferenceObject(ServerChannel);
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+NTSTATUS
+NtReplyWaitSendChannel (
+ IN PVOID Text,
+ IN ULONG Length,
+ OUT PCHANNEL_MESSAGE *Message
+ )
+
+/*++
+
+Routine Description:
+
+ This function sends a reply message to a client and waits for a send.
+
+ N.B. This function can only be executed from a server thread that
+ has an assoicated message channel.
+
+Arguments:
+
+ Text - Supplies a pointer to the message text.
+
+ Length - Supplies the length of the message text.
+
+ Message - Supplies a pointer to a variable that receives the send
+ message header.
+
+Return Value:
+
+ If the function is successfully completed, then a succes status is
+ returned. Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ PKTHREAD ClientThread;
+ PCHANNEL_MESSAGE ClientView;
+ PRECHANNEL MessageChannel;
+ KPROCESSOR_MODE PreviousMode;
+ PECHANNEL ServerChannel;
+ PKTHREAD ServerThread;
+ NTSTATUS Status;
+
+ //
+ // Establish an exception handler, probe the output message address,
+ // probe the message text, and allocate a receive buffer if necessary.
+ // If either of the probes fail or the receive buffer allocation is
+ // not successful, then return the exception code as the service
+ // status.
+ //
+
+ ServerThread = KeGetCurrentThread();
+ try {
+
+ //
+ // Get previous processor mode and probe output message address and
+ // the message text if necessary.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(Text, Length, sizeof(CHAR));
+ ProbeAndNullPointer(Message);
+ }
+
+ //
+ // If the current thread does not have an associated receive buffer,
+ // then attempt to allocate one now. If the allocation fails, then
+ // an exception is raised.
+ //
+
+ if (ServerThread->Section == NULL) {
+ KiAllocateReceiveBufferChannel();
+ }
+
+ } except(ExSystemExceptionFilter()) {
+ return GetExceptionCode();
+ }
+
+ //
+ // If the message length is greater than the host page size minus
+ // the message header length, then return an error.
+ //
+
+ if (Length >= (PAGE_SIZE - sizeof(CHANNEL_MESSAGE))) {
+ return STATUS_BUFFER_OVERFLOW;
+ }
+
+ //
+ // If the server thread has an associated message channel, the channel
+ // is in server receive message state, and the channel server thread
+ // matches the current thread.
+ //
+ // This implies that:
+ //
+ // 1. The channel is a message channel.
+ //
+ // 2. The channel is being accessed by the server thread.
+ //
+ // 3. The channel is associated with a listen channel.
+ //
+ // 4. There is currently a server channel owner.
+ //
+ // 5. There is currently a client channel owner.
+ //
+
+ KiLockDispatcherDatabase(&ServerThread->WaitIrql);
+ MessageChannel = ServerThread->Channel;
+ if ((MessageChannel == NULL) ||
+ (MessageChannel->State != ServerReceiveMessage) ||
+ (MessageChannel->ServerThread != ServerThread)) {
+
+ //
+ // A message is not associated with the current thread,
+ // the message channel is in the wrong state, or the
+ // current thread is not the owner of the channel.
+ //
+
+ KiUnlockDispatcherDatabase(ServerThread->WaitIrql);
+ Status = STATUS_INVALID_PARAMETER; // **** fix ****
+
+ } else {
+
+ //
+ // Rendezvous with the client thread so a transfer of the
+ // reply text to the client thread can occur.
+ //
+
+ ClientThread = KiRendezvousWithThread(MessageChannel, PreviousMode);
+
+ //
+ // Control is returned when:
+ //
+ // 1. The server thread is being terminated (USER_APC).
+ //
+ // 2. A rendezvous with a client thread has occured.
+ //
+ // N.B. If the status value is less than zero, then it
+ // is the address of the client thread.
+ //
+
+ if ((LONG)ClientThread < 0) {
+
+ //
+ // The client thread is returned as the rendezvous status
+ // with the thread in the transition state. Get the address
+ // of the client thread system view, establish an exception
+ // handler, and transfer the message text from the server's
+ // buffer to the client's receive buffer. If an exception
+ // occurs during the copy, then return the exception code
+ // as the service status.
+ //
+
+ ClientView = ClientThread->SystemView;
+ Status = STATUS_SUCCESS;
+ if (Length != 0) {
+ try {
+ RtlCopyMemory(ClientView + 1, Text, Length);
+
+ } except (ExSystemExceptionFilter()) {
+ Status = GetExceptionCode();
+ }
+ }
+
+ //
+ // Set the channel message parameters.
+ //
+
+ ClientView->Text = (PVOID)(ClientThread->ThreadView + 1);
+ ClientView->Length = Length;
+ ClientView->Context = NULL;
+ ClientView->Base = Text;
+ ClientView->Close = FALSE;
+
+ //
+ // Raise IRQL to dispatch level, lock the dispatcher
+ // database, and check if the message was successfully
+ // transfered to the client's receive buffer. If the
+ // message was successfully transfered to the client's
+ // receive buffer. then reset the channel state, fill
+ // in the message parameters, ready the client thread,
+ // and listen for the next message. Otherwise, set the
+ // client wait status and ready the client thread for
+ // execution.
+ //
+
+ KiLockDispatcherDatabase(&ServerThread->WaitIrql);
+ if (NT_SUCCESS(Status)) {
+ MessageChannel->State = ClientIdle;
+ MessageChannel->ClientThread = NULL;
+ MessageChannel->ServerThread = NULL;
+ ClientThread->WaitStatus = STATUS_SUCCESS;
+
+ //
+ // Reference the server channel and dereference the
+ // message channel.
+ //
+
+ ServerChannel = MessageChannel->ServerChannel;
+ ObReferenceObject(ServerChannel);
+ ObDereferenceObject(MessageChannel);
+
+ //
+ // If there are no clients waiting to send to the server,
+ // then switch directly to the client thread. Otherwise,
+ // ready the client thread, then listen for the next
+ // message.
+ //
+
+ if (IsListEmpty(&ServerChannel->ClearToSendEvent.Header.WaitListHead) == FALSE) {
+ KiReadyThread(ClientThread);
+ KiUnlockDispatcherDatabase(ServerThread->WaitIrql);
+ Status = KiListenChannel(ServerChannel,
+ PreviousMode,
+ Message);
+
+ } else {
+ Status = KiSwitchToThread(ClientThread,
+ WrRendezvous,
+ PreviousMode,
+ &ServerChannel->ReceiveEvent);
+
+ //
+ // If a client message was successfully received, then
+ // attempt to write the address of the send message
+ // address. If the write attempt fails, then do not
+ // report an error. When the caller attempts to access
+ // the message address, an access violation will occur.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *Message = ServerThread->ThreadView;
+
+ } except(ExSystemExceptionFilter()) {
+ }
+ }
+ }
+
+ ObDereferenceObject(ServerChannel);
+
+ } else {
+
+ //
+ // The reply message was not successfully transfered
+ // to the client receive buffer because of an access
+ // violation encountered durring the access to the
+ // server buffer.
+ //
+
+ ClientThread->WaitStatus = STATUS_KERNEL_APC;
+ KiReadyThread(ClientThread);
+ KiUnlockDispatcherDatabase(ServerThread->WaitIrql);
+ }
+
+ } else {
+
+ //
+ // The server thread is terminating and the channel
+ // structures will be cleaned up by the termiantion
+ // code.
+ //
+
+ Status = (NTSTATUS)ClientThread;
+ }
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+NTSTATUS
+NtSendWaitReplyChannel (
+ IN HANDLE ChannelHandle,
+ IN PVOID Text,
+ IN ULONG Length,
+ OUT PCHANNEL_MESSAGE *Message
+ )
+
+/*++
+
+Routine Description:
+
+ This function sends a message to a server and waits for a reply.
+
+ N.B. This function can only be executed from a client thread.
+
+Arguments:
+
+ ChannelHandle - Supplies a handle to a message channel over which the
+ specified message text is sent.
+
+ Text - Supplies a pointer to the message text.
+
+ Length - Supplies the length of the message text.
+
+ Message - Supplies a pointer to a variable that receives a pointer
+ to the reply message header.
+
+Return Value:
+
+ If the function is successfully completed, then a success status is
+ returned. Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ PKTHREAD ClientThread;
+ PRECHANNEL MessageChannel;
+ KPROCESSOR_MODE PreviousMode;
+ PRECHANNEL ServerChannel;
+ PKTHREAD ServerThread;
+ PCHANNEL_MESSAGE ServerView;
+ NTSTATUS Status;
+
+ //
+ // Establish an exception handler, probe the output message address,
+ // probe the message text, and allocate a receive buffer if necessary.
+ // If either of the probes fail or the receive buffer allocation is
+ // not successful, then return the exception code as the service
+ // status.
+ //
+
+ ClientThread = KeGetCurrentThread();
+ try {
+
+ //
+ // Get previous processor mode and probe output message address
+ // and the message text.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(Text, Length, sizeof(UCHAR));
+ ProbeAndNullPointer(Message);
+ }
+
+ //
+ // If the current thread does not have an associated receive buffer,
+ // then attempt to allocate one now. If the allocation fails, then
+ // an exception is raised.
+ //
+
+ if (ClientThread->Section == NULL) {
+ KiAllocateReceiveBufferChannel();
+ }
+
+ } except(ExSystemExceptionFilter()) {
+ return GetExceptionCode();
+ }
+
+ //
+ // If the message length is greater than the host page size minus
+ // the message header length, then return an error.
+ //
+
+ if (Length >= (PAGE_SIZE - sizeof(CHANNEL_MESSAGE))) {
+ return STATUS_BUFFER_OVERFLOW;
+ }
+
+ //
+ // Reference channel object by handle.
+ //
+
+ Status = ObReferenceObjectByHandle(ChannelHandle,
+ CHANNEL_ALL_ACCESS,
+ KeChannelType,
+ PreviousMode,
+ (PVOID *)&MessageChannel,
+ NULL);
+
+ //
+ // If the reference was successful, then check if the channel is in
+ // the client idle state.
+ //
+ // This implies that:
+ //
+ // 1. The channel is a message channel.
+ //
+ // 2. The channel is being accessed by a client thread.
+ //
+ // 3. The channel is connected to a listen channel.
+ //
+ // 4. There is currently no client thread channel owner.
+ //
+ // 5. There is currently no server thread channel owner.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ KiLockDispatcherDatabase(&ClientThread->WaitIrql);
+ if (MessageChannel->State != ClientIdle) {
+
+ //
+ // The message channel is in the wrong state.
+ //
+
+ KiUnlockDispatcherDatabase(ClientThread->WaitIrql);
+ Status = STATUS_INVALID_PARAMETER; // **** fix ****
+
+ } else {
+
+ //
+ // Set the channel state, set the client owner thread, and
+ // rendezvous with a server thread.
+ //
+
+ MessageChannel->State = ClientSendWaitReply;
+ MessageChannel->ClientThread = ClientThread;
+ ClientThread->Channel = MessageChannel;
+ ServerChannel = MessageChannel->ServerChannel;
+ ServerThread = KiRendezvousWithThread(ServerChannel, PreviousMode);
+
+ //
+ // Control is returned when:
+ //
+ // 1. The client thread is being terminated (USER_APC).
+ //
+ // 2. A rendezvous with a server thread has occured.
+ //
+ // N.B. If the status value is less than zero, then it
+ // is the address of the server thread.
+ //
+
+ if ((LONG)ServerThread < 0) {
+
+ //
+ // The server thread is returned as the rendezvous status
+ // with the thread in the transition state. Get the address
+ // of the server thread system view, establish an exception
+ // handler, and transfer the message text from the client's
+ // buffer to the server's receive buffer. If an exception
+ // occurs during the copy, then return the exception code
+ // as the service status.
+ //
+
+ ServerView = ServerThread->SystemView;
+ if (Length != 0) {
+ try {
+ RtlCopyMemory(ServerView + 1, Text, Length);
+
+ } except (ExSystemExceptionFilter()) {
+ Status = GetExceptionCode();
+ }
+ }
+
+ //
+ // Set the channel message parameters.
+ //
+
+ ServerView->Text = (PVOID)(ServerThread->ThreadView + 1);
+ ServerView->Length = Length;
+ ServerView->Context = MessageChannel->ServerContext;
+ ServerView->Base = Text;
+ ServerView->Close = FALSE;
+
+ //
+ // Raise IRQL to dispatch level, lock the dispatcher
+ // database and check if the message was successfully
+ // transfered to the server's receive buffer. If the
+ // message was successfully transfered, then set the
+ // channel state, set the server thread address, set
+ // the address of the message channel in the server
+ // thread, increment the message channel reference
+ // count, fill in the message parameters, and switch
+ // directly to the server thread. Otherwise, set the
+ // channel state, and reready the server thread for
+ // execution.
+ //
+
+ KiLockDispatcherDatabase(&ClientThread->WaitIrql);
+ if (NT_SUCCESS(Status)) {
+ MessageChannel->State = ServerReceiveMessage;
+ MessageChannel->ServerThread = ServerThread;
+ ObReferenceObject(MessageChannel);
+ ServerThread->Channel = MessageChannel;
+ Status = KiSwitchToThread(ServerThread,
+ WrRendezvous,
+ PreviousMode,
+ &MessageChannel->ReceiveEvent);
+
+ //
+ // If the send and subsequent reply from the server
+ // thread is successful, then attempt to write the
+ // address of the reply message address. If the write
+ // attempt fails, then do not report an error. When
+ // the caller attempts to access the message address,
+ // an access violation will occur.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *Message = ClientThread->ThreadView;
+
+ } except(ExSystemExceptionFilter()) {
+ }
+ }
+
+ } else {
+
+ //
+ // The send message was not successfully transfered
+ // to the server receive buffer because of an access
+ // violation encountered durring the access to the
+ // client buffer.
+ //
+
+ MessageChannel->State = ClientIdle;
+ MessageChannel->ClientThread = NULL;
+ ClientThread->Channel = NULL;
+ ServerThread->WaitStatus = STATUS_KERNEL_APC;
+ KiReadyThread(ServerThread);
+ KiUnlockDispatcherDatabase(ClientThread->WaitIrql);
+ }
+
+ } else {
+
+ //
+ // The client thread is terminating and the channel
+ // structures will be cleaned up by the termination
+ // code.
+ //
+
+ Status = (NTSTATUS)ServerThread;
+ }
+ }
+
+ ObDereferenceObject(MessageChannel);
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+NTSTATUS
+NtSetContextChannel (
+ IN PVOID Context
+ )
+
+/*++
+
+Routine Description:
+
+ This function stores a context value for the current associated
+ message channel.
+
+ N.B. This function can only be executed from a server thread that
+ has an associated message channel.
+
+Arguments:
+
+ Context - Supplies a context value that is to be stored in the
+ associated message channel.
+
+Return Value:
+
+ If the channel information is set, then a success status is returned.
+ Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+#if 0
+
+ PRECHANNEL MessageChannel;
+ PKTHREAD CurrentThread;
+ NTSTATUS Status;
+
+ //
+ // If the thread has an assoicated channel and the server thread for
+ // the channel is the current thread, then store the channel context.
+ //
+
+ CurrentThread = KeGetCurrentThread();
+ MessageChannel = CurrentThread->Channel;
+ if ((MessageChannel == NULL) ||
+ (CurrentThread != MessageChannel->ServerThread)) {
+ Status = STATUS_INVALID_PARAMETER; // ****** FIX *****
+
+ } else {
+ MessageChannel->ServerContext = Context;
+ Status = STATUS_SUCCESS;
+ }
+
+ //
+ // Return service status.
+ //
+
+ return Status;
+
+#else
+
+ return STATUS_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+#if 0
+
+
+VOID
+KiAllocateReceiveBufferChannel (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates an unnamed section with a single page, maps
+ a view of the section into the current process and into the system
+ address space, and associates the view with the current thread.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ If a channel receive buffer is not allocated, then raise an exception.
+
+--*/
+
+{
+
+ LARGE_INTEGER MaximumSize;
+ PEPROCESS Process;
+ NTSTATUS Status;
+ PKTHREAD Thread;
+ LARGE_INTEGER ViewOffset;
+ ULONG ViewSize;
+
+ //
+ // Create an unnamed section object.
+ //
+
+ Thread = KeGetCurrentThread();
+
+ ASSERT(Thread->Section == NULL);
+
+ MaximumSize.QuadPart = PAGE_SIZE;
+ Status = MmCreateSection(&Thread->Section,
+ 0,
+ NULL,
+ &MaximumSize,
+ PAGE_READWRITE,
+ SEC_COMMIT,
+ NULL,
+ NULL);
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // Map a view of the section into the current process.
+ //
+
+ Process = PsGetCurrentProcess();
+ ViewOffset.QuadPart = 0;
+ ViewSize = PAGE_SIZE;
+ Status = MmMapViewOfSection(Thread->Section,
+ Process,
+ &Thread->ThreadView,
+ 0,
+ ViewSize,
+ &ViewOffset,
+ &ViewSize,
+ ViewUnmap,
+ 0,
+ PAGE_READWRITE);
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // Map a view of the section into the system address
+ // space.
+ //
+
+ Status = MmMapViewInSystemSpace(Thread->Section,
+ &Thread->SystemView,
+ &ViewSize);
+
+ if (NT_SUCCESS(Status)) {
+ return;
+ }
+
+ MmUnmapViewOfSection(Process, Thread->ThreadView);
+ }
+
+ ObDereferenceObject(Thread->Section);
+ }
+
+ //
+ // The allocation of a receive buffer was not successful. Raise an
+ // exception that will be caught by the caller.
+ //
+
+ ExRaiseStatus(Status);
+ return;
+}
+
+BOOLEAN
+KiChannelInitialization (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function creates the channel object type descriptor at system
+ initialization and stores the address of the object type descriptor
+ in global storage.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ A value of TRUE is returned if the channel object type descriptor is
+ successfully initialized. Otherwise a value of FALSE is returned.
+
+--*/
+
+{
+
+ OBJECT_TYPE_INITIALIZER ObjectTypeInitializer;
+ NTSTATUS Status;
+ UNICODE_STRING TypeName;
+
+ //
+ // Initialize string descriptor.
+ //
+
+ RtlInitUnicodeString(&TypeName, L"Channel");
+
+ //
+ // Create channel object type descriptor.
+ //
+
+ RtlZeroMemory(&ObjectTypeInitializer, sizeof(ObjectTypeInitializer));
+ ObjectTypeInitializer.Length = sizeof(ObjectTypeInitializer);
+ ObjectTypeInitializer.GenericMapping = KiChannelMapping;
+ ObjectTypeInitializer.PoolType = NonPagedPool;
+ ObjectTypeInitializer.DefaultNonPagedPoolCharge = sizeof(ECHANNEL);
+ ObjectTypeInitializer.ValidAccessMask = CHANNEL_ALL_ACCESS;
+ ObjectTypeInitializer.InvalidAttributes = OBJ_EXCLUSIVE | OBJ_INHERIT | OBJ_PERMANENT;
+ ObjectTypeInitializer.CloseProcedure = KiCloseChannel;
+ ObjectTypeInitializer.DeleteProcedure = KiDeleteChannel;
+ Status = ObCreateObjectType(&TypeName,
+ &ObjectTypeInitializer,
+ NULL,
+ &KeChannelType);
+
+ //
+ // If the channel object type descriptor was successfully created, then
+ // return a value of TRUE. Otherwise return a value of FALSE.
+ //
+
+ return (BOOLEAN)(NT_SUCCESS(Status));
+}
+
+VOID
+KiCloseChannel (
+ IN PEPROCESS Process,
+ IN PVOID Object,
+ IN ACCESS_MASK GrantedAccess,
+ IN ULONG ProcessHandleCount,
+ IN ULONG SystemHandleCount
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called when a handle to a channel is closed. If the
+ hanlde is the last handle in the process to the channel object and
+ the channel object is a message channel, then send a message to the
+ server indicating that the client handle is being closed.
+
+Arguments:
+
+ Object - Supplies a pointer to an executive channel.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PECHANNEL MessageChannel = (PECHANNEL)Object;
+
+ //
+ // If the object is a message channel and hte last handle is being
+ // closed, then send a message to the server indicating that the
+ // channel is being closed.
+ //
+
+ if ((MessageChannel->ServerChannel != NULL) &&
+ (ProcessHandleCount == 1)) {
+ }
+
+ return;
+}
+
+VOID
+KiDeleteChannel (
+ IN PVOID Object
+ )
+
+/*++
+
+Routine Description:
+
+ This function is the delete routine for channel objects. Its function
+ is to ...
+
+Arguments:
+
+ Object - Supplies a pointer to an executive channel.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRECHANNEL ChannelObject = (PECHANNEL)Object;
+
+ //
+ // If the channel is a message channel, then dereference the connnected
+ // server channel.
+ //
+
+ if (ChannelObject->ServerChannel != NULL) {
+ ObDereferenceObject(ChannelObject->ServerChannel);
+ }
+
+ return;
+}
+
+VOID
+KiRundownChannel (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function runs down associated channel object and receive buffers
+ when the a thread is terminated.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKTHREAD Thread;
+
+ //
+ // If the current thread has an associated receive buffer, then unmap
+ // the receive buffer and dereference the underlying section.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Thread->Section != NULL) {
+ MmUnmapViewOfSection(PsGetCurrentProcess(), Thread->ThreadView);
+ MmUnmapViewInSystemSpace(Thread->SystemView);
+ ObDereferenceObject(Thread->Section);
+ Thread->Section = NULL;
+ }
+
+ //
+ // If the current thread has an associated channel, then ...
+ //
+
+ return;
+}
+
+NTSTATUS
+KiListenChannel (
+ IN PRECHANNEL ServerChannel,
+ IN KPROCESSOR_MODE WaitMode,
+ OUT PCHANNEL_MESSAGE *Message
+ )
+
+/*++
+
+Routine Description:
+
+ This function listens for a client message to arrive.
+
+ N.B. This function can only be executed from a server thread.
+
+Arguments:
+
+ ServerChannel - Supplies a pointer to a litent channel on which the
+ server thread listens.
+
+ WaitMode - Supplies the processor wait mode.
+
+ Message - Supplies a pointer to a variable that receives a pointer
+ to the client message header.
+
+Return Value:
+
+ If the function is successfully completed, then a success status is
+ returned. Otherwise, a failure status is returned.
+
+--*/
+
+{
+
+ PKEVENT ClearToSendEvent;
+ PKTHREAD ClientThread;
+ PKQUEUE Queue;
+ PKTHREAD ServerThread;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+ NTSTATUS WaitStatus;
+
+ //
+ // Raise IRQL to dispatch level and lock the dispatcher database.
+ //
+
+ ServerThread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&ServerThread->WaitIrql);
+
+ //
+ // Start of wait loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the
+ // middle of the wait or a kernel APC is pending on the first attempt
+ // through the loop.
+ //
+
+ do {
+
+ //
+ // Check if there is a thread waiting on the clear to send event.
+ //
+
+ ClearToSendEvent = &ServerChannel->ClearToSendEvent;
+ WaitEntry = ClearToSendEvent->Header.WaitListHead.Flink;
+ if (WaitEntry != &ClearToSendEvent->Header.WaitListHead) {
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ ClientThread = WaitBlock->Thread;
+
+ //
+ // Remove the wait block from the wait list of the receive event,
+ // and remove the client thread from the wait list.
+ //
+
+ RemoveEntryList(&WaitBlock->WaitListEntry);
+ RemoveEntryList(&ClientThread->WaitListEntry);
+
+ //
+ // If the client thread is processing a queue entry, then increment the
+ // count of currently active threads.
+ //
+
+ Queue = ClientThread->Queue;
+ if (Queue != NULL) {
+ Queue->CurrentCount += 1;
+ }
+
+ //
+ // Set the wait completion status to kernel APC so the client
+ // will attempt another rendezvous and ready the client thread
+ // for execution.
+ //
+
+ ClientThread->WaitStatus = STATUS_KERNEL_APC;
+ KiReadyThread(ClientThread);
+ }
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor
+ // just after IRQL was raised to DISPATCH_LEVEL, but before the
+ // dispatcher database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (ServerThread->ApcState.KernelApcPending &&
+ (ServerThread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its
+ // previous value. An APC interrupt will immediately occur
+ // which will result in the delivery of the kernel APC if
+ // possible.
+ //
+
+ KiUnlockDispatcherDatabase(ServerThread->WaitIrql);
+
+ } else {
+
+ //
+ // Test if a user APC is pending.
+ //
+
+ if ((WaitMode != KernelMode) &&
+ (ServerThread->ApcState.UserApcPending)) {
+ WaitStatus = STATUS_USER_APC;
+ break;
+ }
+
+ //
+ // Construct a wait block for the clear to send event object.
+ //
+
+ WaitBlock = &ServerThread->WaitBlock[0];
+ ServerThread->WaitBlockList = WaitBlock;
+ ServerThread->WaitStatus = (NTSTATUS)0;
+ WaitBlock->Object = (PVOID)&ServerChannel->ReceiveEvent;
+ WaitBlock->NextWaitBlock = WaitBlock;
+ WaitBlock->WaitKey = (CSHORT)STATUS_SUCCESS;
+ WaitBlock->WaitType = WaitAny;
+ InsertTailList(&ServerChannel->ReceiveEvent.Header.WaitListHead,
+ &WaitBlock->WaitListEntry);
+
+ //
+ // If the current thread is processing a queue entry, then
+ // attempt to activate another thread that is blocked on the
+ // queue object.
+ //
+
+ Queue = ServerThread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher
+ // state to Waiting, and insert the thread in the wait list.
+ //
+
+ ServerThread->Alertable = FALSE;
+ ServerThread->WaitMode = WaitMode;
+ ServerThread->WaitReason = WrRendezvous;
+ ServerThread->WaitTime = KiQueryLowTickCount();
+ ServerThread->State = Waiting;
+ KiInsertWaitList(WaitMode, ServerThread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(ServerThread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+
+ //
+ // If a client message was successfully received, then
+ // attempt to write the address of the send message
+ // address. If the write attempt fails, then do not
+ // report an error. When the caller attempts to access
+ // the message address, an access violation will occur.
+ //
+
+ if (NT_SUCCESS(WaitStatus)) {
+ try {
+ *Message = ServerThread->ThreadView;
+
+ } except(ExSystemExceptionFilter()) {
+ }
+ }
+
+ return WaitStatus;
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&ServerThread->WaitIrql);
+ } while (TRUE);
+
+ //
+ // Unlock the dispatcher database and return the target thread.
+ //
+
+ KiUnlockDispatcherDatabase(ServerThread->WaitIrql);
+ return WaitStatus;
+}
+
+PKTHREAD
+KiRendezvousWithThread (
+ IN PRECHANNEL WaitChannel,
+ IN ULONG WaitMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function performs a rendezvous with a thread waiting on the
+ channel receive event.
+
+ N.B. This routine is called with the dispatcher database locked.
+
+ N.B. The wait IRQL is assumed to be set for the current thread.
+
+ N.B. Control is returned from this function with the dispatcher
+ database unlocked.
+
+Arguments:
+
+ WaitChannel - Supplies a pointer to a channel whose receive event
+ is the target of the rendezvous operation.
+
+ WaitMode - Supplies the processor wait mode.
+
+Return Value:
+
+ If a thread rendezvous is successfully performed, then the address
+ of the thread object is returned as the completion status. Otherwise,
+ if the wait completes because of a timeout or because the thread is
+ being terminated, then the appropriate status is returned.
+
+--*/
+
+{
+
+ PKTHREAD CurrentThread;
+ PKQUEUE Queue;
+ PKTHREAD TargetThread;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+ NTSTATUS WaitStatus;
+
+ //
+ // Start of wait loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the
+ // middle of the wait or a kernel APC is pending on the first attempt
+ // through the loop.
+ //
+ // If the rendezvous event wait list is not empty, then remove the first
+ // entry from the list, compute the address of the respective thread,
+ // cancel the thread timer if appropraite, and return the thread address.
+ // Otherwise, wait for a thread to rendezvous with.
+ //
+
+ CurrentThread = KeGetCurrentThread();
+ do {
+
+ //
+ // Check if there is a thread waiting on the rendezvous event.
+ //
+
+ WaitEntry = WaitChannel->ReceiveEvent.Header.WaitListHead.Flink;
+ if (WaitEntry != &WaitChannel->ReceiveEvent.Header.WaitListHead) {
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ TargetThread = WaitBlock->Thread;
+
+ //
+ // Remove the wait block from the wait list of the receive event,
+ // and remove the target thread from the wait list.
+ //
+
+ RemoveEntryList(&WaitBlock->WaitListEntry);
+ RemoveEntryList(&TargetThread->WaitListEntry);
+
+ //
+ // If the target thread is processing a queue entry, then increment the
+ // count of currently active threads.
+ //
+
+ Queue = TargetThread->Queue;
+ if (Queue != NULL) {
+ Queue->CurrentCount += 1;
+ }
+
+ //
+ // Set the thread state to transistion.
+ //
+
+ TargetThread->State = Transition;
+ break;
+
+ } else {
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor
+ // just after IRQL was raised to DISPATCH_LEVEL, but before the
+ // dispatcher database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (CurrentThread->ApcState.KernelApcPending &&
+ (CurrentThread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its
+ // previous value. An APC interrupt will immediately occur
+ // which will result in the delivery of the kernel APC if
+ // possible.
+ //
+
+ KiUnlockDispatcherDatabase(CurrentThread->WaitIrql);
+
+ } else {
+
+ //
+ // Test if a user APC is pending.
+ //
+
+ if ((WaitMode != KernelMode) &&
+ (CurrentThread->ApcState.UserApcPending)) {
+ TargetThread = (PKTHREAD)STATUS_USER_APC;
+ break;
+ }
+
+ //
+ // Construct a wait block for the clear to send event object.
+ //
+
+ WaitBlock = &CurrentThread->WaitBlock[0];
+ CurrentThread->WaitBlockList = WaitBlock;
+ CurrentThread->WaitStatus = (NTSTATUS)0;
+ WaitBlock->Object = (PVOID)&WaitChannel->ClearToSendEvent;
+ WaitBlock->NextWaitBlock = WaitBlock;
+ WaitBlock->WaitKey = (CSHORT)STATUS_SUCCESS;
+ WaitBlock->WaitType = WaitAny;
+ InsertTailList(&WaitChannel->ClearToSendEvent.Header.WaitListHead,
+ &WaitBlock->WaitListEntry);
+
+ //
+ // If the current thread is processing a queue entry, then
+ // attempt to activate another thread that is blocked on the
+ // queue object.
+ //
+
+ Queue = CurrentThread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher
+ // state to Waiting, and insert the thread in the wait list.
+ //
+
+ CurrentThread->Alertable = FALSE;
+ CurrentThread->WaitMode = (KPROCESSOR_MODE)WaitMode;
+ CurrentThread->WaitReason = WrRendezvous;
+ CurrentThread->WaitTime = KiQueryLowTickCount();
+ CurrentThread->State = Waiting;
+ KiInsertWaitList(WaitMode, CurrentThread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(CurrentThread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return (PKTHREAD)WaitStatus;
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&CurrentThread->WaitIrql);
+ }
+
+ } while (TRUE);
+
+ //
+ // Unlock the dispatcher database and return the target thread.
+ //
+
+ KiUnlockDispatcherDatabase(CurrentThread->WaitIrql);
+ return TargetThread;
+}
+
+#endif
diff --git a/private/ntos/ke/config.c b/private/ntos/ke/config.c
new file mode 100644
index 000000000..524bcf1e6
--- /dev/null
+++ b/private/ntos/ke/config.c
@@ -0,0 +1,207 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ config.c
+
+Abstract:
+
+ This module implements the code to find an ARC configuration tree
+ entry as constructed by the OS Loader.
+
+Author:
+
+ David N. Cutler (davec) 9-Sep-1991
+
+Environment:
+
+ User mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,KeFindConfigurationEntry)
+#pragma alloc_text(INIT,KeFindConfigurationNextEntry)
+#endif
+
+PCONFIGURATION_COMPONENT_DATA
+KeFindConfigurationEntry (
+ IN PCONFIGURATION_COMPONENT_DATA Child,
+ IN CONFIGURATION_CLASS Class,
+ IN CONFIGURATION_TYPE Type,
+ IN PULONG Key OPTIONAL
+ )
+/*++
+
+Routine Description:
+
+ This function search the specified configuration tree and returns a
+ pointer to an entry that matches the specified class, type, and key
+ parameters.
+
+ This routine is the same as KeFindConfurationEntryNext expect
+ that the search is performed from the first entry
+
+ N.B. This routine can only be called during system initialization.
+
+--*/
+{
+ PCONFIGURATION_COMPONENT_DATA Resume;
+
+ Resume = NULL;
+ return KeFindConfigurationNextEntry (Child, Class, Type, Key, &Resume);
+}
+
+PCONFIGURATION_COMPONENT_DATA
+KeFindConfigurationNextEntry (
+ IN PCONFIGURATION_COMPONENT_DATA Child,
+ IN CONFIGURATION_CLASS Class,
+ IN CONFIGURATION_TYPE Type,
+ IN PULONG Key OPTIONAL,
+ IN PCONFIGURATION_COMPONENT_DATA *Resume
+ )
+
+/*++
+
+Routine Description:
+
+ This function search the specified configuration tree and returns a
+ pointer to an entry that matches the specified class, type, and key
+ parameters.
+
+ N.B. This routine can only be called during system initialization.
+
+Arguments:
+
+ Child - Supplies an optional pointer to an NT configuration component.
+
+ Class - Supplies the configuration class of the entry to locate.
+
+ Type - Supplies the configuration type of the entry to locate.
+
+ Key - Supplies a pointer to an optional key value to use in locating
+ the specified entry.
+
+ Resume - Supplies the last returned entry for which the search
+ should resume from.
+
+Return Value:
+
+ If the specified entry is located, then a pointer to the configuration
+ entry is returned as the function value. Otherwise, NULL is returned.
+
+--*/
+
+{
+
+ PCONFIGURATION_COMPONENT_DATA Entry;
+ ULONG MatchKey;
+ ULONG MatchMask;
+ PCONFIGURATION_COMPONENT_DATA Sibling;
+
+ //
+ // Initialize the match key and mask based on whether the optional key
+ // value is specified.
+ //
+
+ if (ARGUMENT_PRESENT(Key)) {
+ MatchMask = 0xffffffff;
+ MatchKey = *Key;
+
+ } else {
+ MatchMask = 0;
+ MatchKey = 0;
+ }
+
+ //
+ // Search specified configuration tree for an entry that matches the
+ // the specified class, type, and key.
+ //
+
+ while (Child != NULL) {
+ if (*Resume) {
+ //
+ // If resume location found, clear resume location and continue
+ // search with next entry
+ //
+
+ if (Child == *Resume) {
+ *Resume = NULL;
+ }
+ } else {
+
+ //
+ // If the class, type, and key match, then return a pointer to
+ // the child entry.
+ //
+
+ if ((Child->ComponentEntry.Class == Class) &&
+ (Child->ComponentEntry.Type == Type) &&
+ ((Child->ComponentEntry.Key & MatchMask) == MatchKey)) {
+ return Child;
+ }
+ }
+
+ //
+ // If the child has a sibling list, then search the sibling list
+ // for an entry that matches the specified class, type, and key.
+ //
+
+ Sibling = Child->Sibling;
+ while (Sibling != NULL) {
+ if (*Resume) {
+ //
+ // If resume location found, clear resume location and continue
+ // search with next entry
+ //
+
+ if (Sibling == *Resume) {
+ *Resume = NULL;
+ }
+ } else {
+
+ //
+ // If the class, type, and key match, then return a pointer to
+ // the child entry.
+ //
+
+ if ((Sibling->ComponentEntry.Class == Class) &&
+ (Sibling->ComponentEntry.Type == Type) &&
+ ((Sibling->ComponentEntry.Key & MatchMask) == MatchKey)) {
+ return Sibling;
+ }
+ }
+
+ //
+ // If the sibling has a child tree, then search the child tree
+ // for an entry that matches the specified class, type, and key.
+ //
+
+ if (Sibling->Child != NULL) {
+ Entry = KeFindConfigurationNextEntry (
+ Sibling->Child,
+ Class,
+ Type,
+ Key,
+ Resume
+ );
+
+ if (Entry != NULL) {
+ return Entry;
+ }
+ }
+
+ Sibling = Sibling->Sibling;
+ }
+
+ Child = Child->Child;
+ }
+
+ return NULL;
+}
diff --git a/private/ntos/ke/debug.c b/private/ntos/ke/debug.c
new file mode 100644
index 000000000..d6ace3680
--- /dev/null
+++ b/private/ntos/ke/debug.c
@@ -0,0 +1,531 @@
+/*++
+
+Copyright (c) 1989-1993 Microsoft Corporation
+
+Module Name:
+
+ stubs.c
+
+Abstract:
+
+ This module implements kernel debugger synchronization routines.
+
+Author:
+
+ Ken Reneris (kenr) 30-Aug-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#define IDBG 1
+
+
+#define FrozenState(a) (a & 0xF)
+
+// state
+#define RUNNING 0x00
+#define TARGET_FROZEN 0x02
+#define TARGET_THAW 0x03
+#define FREEZE_OWNER 0x04
+
+// flags (bits)
+#define FREEZE_ACTIVE 0x20
+
+
+//
+// Define local storage to save the old IRQL.
+//
+
+KIRQL KiOldIrql;
+
+#ifndef NT_UP
+PKPRCB KiFreezeOwner;
+#endif
+
+
+
+BOOLEAN
+KeFreezeExecution (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function freezes the execution of all other processors in the host
+ configuration and then returns to the caller.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that describes the
+ trap.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that
+ describes the trap.
+
+Return Value:
+
+ Previous interrupt enable.
+
+--*/
+
+{
+
+ BOOLEAN Enable;
+
+#if !defined(NT_UP)
+
+ BOOLEAN Flag;
+ PKPRCB Prcb;
+ ULONG TargetSet;
+ ULONG BitNumber;
+ KIRQL OldIrql;
+
+#if IDBG
+
+ ULONG Count = 30000;
+
+#endif
+#endif
+
+ //
+ // Disable interrupts.
+ //
+
+ Enable = KiDisableInterrupts();
+ KiFreezeFlag = FREEZE_FROZEN;
+
+#if !defined(NT_UP)
+ //
+ // Raise IRQL to HIGH_LEVEL.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ if (FrozenState(KeGetCurrentPrcb()->IpiFrozen) == FREEZE_OWNER) {
+ //
+ // This processor already owns the freeze lock.
+ // Return without trying to re-acquire lock or without
+ // trying to IPI the other processors again
+ //
+
+ return Enable;
+ }
+
+
+ //
+ // Try to acquire the KiFreezeExecutionLock before sending the request.
+ // To prevent deadlock from occurring, we need to accept and process
+ // incoming FreexeExecution requests while we are waiting to acquire
+ // the FreezeExecutionFlag.
+ //
+
+ while (KiTryToAcquireSpinLock (&KiFreezeExecutionLock) == FALSE) {
+
+ //
+ // FreezeExecutionLock is busy. Another processor may be trying
+ // to IPI us - go service any IPI.
+ //
+
+ KiRestoreInterrupts(Enable);
+ Flag = KiIpiServiceRoutine((PVOID)TrapFrame, (PVOID)ExceptionFrame);
+ KiDisableInterrupts();
+
+#if IDBG
+
+ if (Flag != FALSE) {
+ Count = 30000;
+ continue;
+ }
+
+ KeStallExecutionProcessor (100);
+ if (!Count--) {
+ Count = 30000;
+ if (KiTryToAcquireSpinLock (&KiFreezeLockBackup) == TRUE) {
+ KiFreezeFlag |= FREEZE_BACKUP;
+ break;
+ }
+ }
+
+#endif
+
+ }
+
+ //
+ // After acquiring the lock flag, we send Freeze request to each processor
+ // in the system (other than us) and wait for it to become frozen.
+ //
+
+ Prcb = KeGetCurrentPrcb(); // Do this after spinlock is acquired.
+ TargetSet = KeActiveProcessors & ~(1 << Prcb->Number);
+ if (TargetSet) {
+
+#if IDBG
+ Count = 400;
+#endif
+
+ KiFreezeOwner = Prcb;
+ Prcb->IpiFrozen = FREEZE_OWNER | FREEZE_ACTIVE;
+ Prcb->SkipTick = TRUE;
+ KiIpiSend((KAFFINITY) TargetSet, IPI_FREEZE);
+
+ while (TargetSet != 0) {
+ BitNumber = KiFindFirstSetRightMember(TargetSet);
+ ClearMember(BitNumber, TargetSet);
+ Prcb = KiProcessorBlock[BitNumber];
+
+#if IDBG
+
+ while (Prcb->IpiFrozen != TARGET_FROZEN) {
+ if (Count == 0) {
+ KiFreezeFlag |= FREEZE_SKIPPED_PROCESSOR;
+ break;
+ }
+
+ KeStallExecutionProcessor (10000);
+ Count--;
+ }
+
+#else
+
+ while (Prcb->IpiFrozen != TARGET_FROZEN)
+ { }
+
+#endif
+
+ }
+ }
+
+ //
+ // Save the old IRQL and return whether interrupts were previous enabled.
+ //
+
+ KiOldIrql = OldIrql;
+
+#endif // !defined(NT_UP)
+
+ return Enable;
+}
+
+VOID
+KiFreezeTargetExecution (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function freezes the execution of the current running processor.
+ If a trapframe is supplied to current state is saved into the prcb
+ for the debugger.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to the trap frame that describes the
+ trap.
+
+ ExceptionFrame - Supplies a pointer to the exception frame that
+ describes the trap.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#if !defined(NT_UP)
+
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ BOOLEAN Enable;
+ KCONTINUE_STATUS Status;
+ EXCEPTION_RECORD ExceptionRecord;
+
+ Enable = KiDisableInterrupts();
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->IpiFrozen = TARGET_FROZEN;
+ Prcb->SkipTick = TRUE;
+
+ if (TrapFrame != NULL) {
+ KiSaveProcessorState(TrapFrame, ExceptionFrame);
+ }
+
+ //
+ // Sweep the data cache in case this is a system crash and the bug
+ // check code is attempting to write a crash dump file.
+ //
+
+ KeSweepCurrentDcache();
+
+ //
+ // Wait for person requesting us to freeze to
+ // clear our frozen flag
+ //
+
+ while (FrozenState(Prcb->IpiFrozen) == TARGET_FROZEN) {
+ if (Prcb->IpiFrozen & FREEZE_ACTIVE) {
+
+ //
+ // This processor has been made the active processor
+ //
+ if (TrapFrame) {
+ RtlZeroMemory (&ExceptionRecord, sizeof ExceptionRecord);
+ ExceptionRecord.ExceptionCode = STATUS_WAKE_SYSTEM_DEBUGGER;
+ ExceptionRecord.ExceptionRecord = &ExceptionRecord;
+ ExceptionRecord.ExceptionAddress =
+ (PVOID)CONTEXT_TO_PROGRAM_COUNTER (&Prcb->ProcessorState.ContextFrame);
+
+ Status = (KiDebugSwitchRoutine) (
+ &ExceptionRecord,
+ &Prcb->ProcessorState.ContextFrame,
+ FALSE
+ );
+
+ } else {
+ Status = ContinueError;
+ }
+
+ //
+ // If status is anything other then, continue with next
+ // processor then reselect master
+ //
+
+ if (Status != ContinueNextProcessor) {
+ Prcb->IpiFrozen &= ~FREEZE_ACTIVE;
+ KiFreezeOwner->IpiFrozen |= FREEZE_ACTIVE;
+ }
+ }
+ }
+
+ if (TrapFrame != NULL) {
+ KiRestoreProcessorState(TrapFrame, ExceptionFrame);
+ }
+
+ Prcb->IpiFrozen = RUNNING;
+
+ KeFlushCurrentTb();
+ KeSweepCurrentIcache();
+
+ KeLowerIrql(OldIrql);
+ KiRestoreInterrupts(Enable);
+#endif // !define(NT_UP)
+
+ return;
+}
+
+
+KCONTINUE_STATUS
+KeSwitchFrozenProcessor (
+ IN ULONG ProcessorNumber
+ )
+{
+#if !defined(NT_UP)
+ PKPRCB TargetPrcb, CurrentPrcb;
+
+ //
+ // If Processor number is out of range, reselect current processor
+ //
+
+ if (ProcessorNumber >= (ULONG) KeNumberProcessors) {
+ return ContinueProcessorReselected;
+ }
+
+ TargetPrcb = KiProcessorBlock[ProcessorNumber];
+ CurrentPrcb = KeGetCurrentPrcb();
+
+ //
+ // Move active flag to correct processor.
+ //
+
+ CurrentPrcb->IpiFrozen &= ~FREEZE_ACTIVE;
+ TargetPrcb->IpiFrozen |= FREEZE_ACTIVE;
+
+ //
+ // If this processor is frozen in KiFreezeTargetExecution, return to it
+ //
+
+ if (FrozenState(CurrentPrcb->IpiFrozen) == TARGET_FROZEN) {
+ return ContinueNextProcessor;
+ }
+
+ //
+ // This processor must be FREEZE_OWNER, wait to be reselected as the
+ // active processor
+ //
+
+ if (FrozenState(CurrentPrcb->IpiFrozen) != FREEZE_OWNER) {
+ return ContinueError;
+ }
+
+ while (!(CurrentPrcb->IpiFrozen & FREEZE_ACTIVE)) ;
+
+#endif // !defined(NT_UP)
+
+ //
+ // Reselect this processor
+ //
+
+ return ContinueProcessorReselected;
+}
+
+
+VOID
+KeThawExecution (
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function thaws the execution of all other processors in the host
+ configuration and then returns to the caller. It is intended for use by
+ the kernel debugger.
+
+Arguments:
+
+ Enable - Supplies the previous interrupt enable that is to be restored
+ after having thawed the execution of all other processors.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+#if !defined(NT_UP)
+
+ KIRQL OldIrql;
+ ULONG TargetSet;
+ ULONG BitNumber;
+ ULONG Flag;
+ PKPRCB Prcb;
+
+ //
+ // Before releasing FreezeExecutionLock clear any all targets IpiFrozen
+ // flag.
+ //
+
+ KeGetCurrentPrcb()->IpiFrozen = RUNNING;
+
+ TargetSet = KeActiveProcessors & ~(1 << KeGetCurrentPrcb()->Number);
+ while (TargetSet != 0) {
+ BitNumber = KiFindFirstSetRightMember(TargetSet);
+ ClearMember(BitNumber, TargetSet);
+ Prcb = KiProcessorBlock[BitNumber];
+#if IDBG
+ //
+ // If the target processor was not forzen, then don't wait
+ // for target to unfreeze.
+ //
+
+ if (FrozenState(Prcb->IpiFrozen) != TARGET_FROZEN) {
+ Prcb->IpiFrozen = RUNNING;
+ continue;
+ }
+#endif
+
+ Prcb->IpiFrozen = TARGET_THAW;
+ while (Prcb->IpiFrozen == TARGET_THAW)
+ { }
+ }
+
+ //
+ // Capture the previous IRQL before releasing the freeze lock.
+ //
+
+ OldIrql = KiOldIrql;
+
+#if IDBG
+
+ Flag = KiFreezeFlag;
+ KiFreezeFlag = 0;
+
+ if ((Flag & FREEZE_BACKUP) != 0) {
+ KiReleaseSpinLock(&KiFreezeLockBackup);
+ } else {
+ KiReleaseSpinLock(&KiFreezeExecutionLock);
+ }
+
+#else
+
+ KiFreezeFlag = 0;
+ KiReleaseSpinLock(&KiFreezeExecutionLock);
+
+#endif
+#endif // !defined (NT_UP)
+
+
+ //
+ // Flush the current TB, instruction cache, and data cache.
+ //
+
+ KeFlushCurrentTb();
+ KeSweepCurrentIcache();
+ KeSweepCurrentDcache();
+
+ //
+ // Lower IRQL and restore interrupt enable
+ //
+
+#if !defined(NT_UP)
+ KeLowerIrql(OldIrql);
+#endif
+ KiRestoreInterrupts(Enable);
+ return;
+}
+
+VOID
+KeReturnToFirmware (
+ IN FIRMWARE_REENTRY Routine
+ )
+
+/*++
+
+Routine Description:
+
+ This routine will thaw all other processors in an MP environment to cause
+ them to return to do a return to firmware with the supplied parameter.
+
+ It will then call HalReturnToFirmware itself.
+
+ N.B. It is assumed that we are in the environment of the kernel debugger
+ or a crash dump.
+
+
+Arguments:
+
+ Routine - What to invoke on return to firmware.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Just get the interface in now. When intel and kenr come up with the
+ // right stuff we can fill this in.
+ //
+
+ HalReturnToFirmware(Routine);
+
+}
diff --git a/private/ntos/ke/devquobj.c b/private/ntos/ke/devquobj.c
new file mode 100644
index 000000000..4e3ee38fc
--- /dev/null
+++ b/private/ntos/ke/devquobj.c
@@ -0,0 +1,445 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ devquobj.c
+
+Abstract:
+
+ This module implements the kernel device queue object. Functions are
+ provided to initialize a device queue object and to insert and remove
+ device queue entries in a device queue object.
+
+Author:
+
+ David N. Cutler (davec) 1-Apr-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input device queue
+// is really a kdevice_queue and not something else, like deallocated pool.
+//
+
+#define ASSERT_DEVICE_QUEUE(E) { \
+ ASSERT((E)->Type == DeviceQueueObject); \
+}
+
+
+VOID
+KeInitializeDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel device queue object.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device
+ queue.
+
+ SpinLock - Supplies a pointer to an executive spin lock.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard control object header.
+ //
+
+ DeviceQueue->Type = DeviceQueueObject;
+ DeviceQueue->Size = sizeof(KDEVICE_QUEUE);
+
+ //
+ // Initialize the device queue list head, spin lock, and busy indicator.
+ //
+
+ InitializeListHead(&DeviceQueue->DeviceListHead);
+ KeInitializeSpinLock(&DeviceQueue->Lock);
+ DeviceQueue->Busy = FALSE;
+ return;
+}
+
+BOOLEAN
+KeInsertDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue,
+ IN PKDEVICE_QUEUE_ENTRY DeviceQueueEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a device queue entry at the tail of the specified
+ device queue. If the device is not busy, then it is set busy and the entry
+ is not placed in the device queue. Otherwise the specified entry is placed
+ at the end of the device queue.
+
+ N.B. This function can only be called from DISPATCH_LEVEL.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device queue.
+
+ DeviceQueueEntry - Supplies a pointer to a device queue entry.
+
+Return Value:
+
+ If the device is not busy, then a value of FALSE is returned. Otherwise a
+ value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+
+ ASSERT_DEVICE_QUEUE(DeviceQueue);
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Lock specified device queue.
+ //
+
+ KiAcquireSpinLock(&DeviceQueue->Lock);
+
+ //
+ // Insert the specified device queue entry at the end of the device queue
+ // if the device queue is busy. Otherwise set the device queue busy and
+ // don't insert the device queue entry.
+ //
+
+ if (DeviceQueue->Busy == TRUE) {
+ Inserted = TRUE;
+ InsertTailList(&DeviceQueue->DeviceListHead,
+ &DeviceQueueEntry->DeviceListEntry);
+ } else {
+ DeviceQueue->Busy = TRUE;
+ Inserted = FALSE;
+ }
+ DeviceQueueEntry->Inserted = Inserted;
+ KiReleaseSpinLock(&DeviceQueue->Lock);
+ return Inserted;
+}
+
+BOOLEAN
+KeInsertByKeyDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue,
+ IN PKDEVICE_QUEUE_ENTRY DeviceQueueEntry,
+ IN ULONG SortKey
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a device queue entry into the specified device
+ queue according to a sort key. If the device is not busy, then it is
+ set busy and the entry is not placed in the device queue. Otherwise
+ the specified entry is placed in the device queue at a position such
+ that the specified sort key is greater than or equal to its predecessor
+ and less than its successor.
+
+ N.B. This function can only be called from DISPATCH_LEVEL.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device queue.
+
+ DeviceQueueEntry - Supplies a pointer to a device queue entry.
+
+ SortKey - Supplies the sort key by which the position to insert the device
+ queue entry is to be determined.
+
+Return Value:
+
+ If the device is not busy, then a value of FALSE is returned. Otherwise a
+ value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+ PLIST_ENTRY NextEntry;
+ PKDEVICE_QUEUE_ENTRY QueueEntry;
+
+ ASSERT_DEVICE_QUEUE(DeviceQueue);
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Lock specified device queue.
+ //
+
+ KiAcquireSpinLock(&DeviceQueue->Lock);
+
+ //
+ // Insert the specified device queue entry in the device queue at the
+ // position specified by the sort key if the device queue is busy.
+ // Otherwise set the device queue busy an don't insert the device queue
+ // entry.
+ //
+
+ DeviceQueueEntry->SortKey = SortKey;
+ if (DeviceQueue->Busy == TRUE) {
+ Inserted = TRUE;
+ NextEntry = DeviceQueue->DeviceListHead.Flink;
+ while (NextEntry != &DeviceQueue->DeviceListHead) {
+ QueueEntry = CONTAINING_RECORD(NextEntry, KDEVICE_QUEUE_ENTRY,
+ DeviceListEntry);
+ if (SortKey < QueueEntry->SortKey) {
+ break;
+ }
+ NextEntry = NextEntry->Flink;
+ }
+ NextEntry = NextEntry->Blink;
+ InsertHeadList(NextEntry, &DeviceQueueEntry->DeviceListEntry);
+ } else {
+ DeviceQueue->Busy = TRUE;
+ Inserted = FALSE;
+ }
+ DeviceQueueEntry->Inserted = Inserted;
+ KiReleaseSpinLock(&DeviceQueue->Lock);
+ return Inserted;
+}
+
+PKDEVICE_QUEUE_ENTRY
+KeRemoveDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes an entry from the head of the specified device
+ queue. If the device queue is empty, then the device is set Not-Busy
+ and a NULL pointer is returned. Otherwise the next entry is removed
+ from the head of the device queue and the address of device queue entry
+ is returned.
+
+ N.B. This function can only be called from DISPATCH_LEVEL.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device queue.
+
+Return Value:
+
+ A NULL pointer is returned if the device queue is empty. Otherwise a
+ pointer to a device queue entry is returned.
+
+--*/
+
+{
+
+ PKDEVICE_QUEUE_ENTRY DeviceQueueEntry;
+ PLIST_ENTRY NextEntry;
+
+ ASSERT_DEVICE_QUEUE(DeviceQueue);
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Lock specified device queue.
+ //
+
+ KiAcquireSpinLock(&DeviceQueue->Lock);
+
+ ASSERT(DeviceQueue->Busy == TRUE);
+
+ //
+ // If the device queue is not empty, then remove the first entry from
+ // the queue. Otherwise set the device queue not busy.
+ //
+
+ if (IsListEmpty(&DeviceQueue->DeviceListHead) == TRUE) {
+ DeviceQueue->Busy = FALSE;
+ DeviceQueueEntry = (PKDEVICE_QUEUE_ENTRY)NULL;
+ } else {
+ NextEntry = RemoveHeadList(&DeviceQueue->DeviceListHead);
+ DeviceQueueEntry = CONTAINING_RECORD(NextEntry, KDEVICE_QUEUE_ENTRY,
+ DeviceListEntry);
+ DeviceQueueEntry->Inserted = FALSE;
+ }
+
+ //
+ // Release device queue spin lock and return address of device queue
+ // entry.
+ //
+
+ KiReleaseSpinLock(&DeviceQueue->Lock);
+ return DeviceQueueEntry;
+}
+
+PKDEVICE_QUEUE_ENTRY
+KeRemoveByKeyDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue,
+ IN ULONG SortKey
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes an entry from the specified device
+ queue. If the device queue is empty, then the device is set Not-Busy
+ and a NULL pointer is returned. Otherwise the an entry is removed
+ from the device queue and the address of device queue entry
+ is returned. The queue is search for the first entry which has a value
+ greater than or equal to the SortKey. If no such entry is found then the
+ first entry of the queue is returned.
+
+ N.B. This function can only be called from DISPATCH_LEVEL.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device queue.
+
+ SortKey - Supplies the sort key by which the position to remove the device
+ queue entry is to be determined.
+
+Return Value:
+
+ A NULL pointer is returned if the device queue is empty. Otherwise a
+ pointer to a device queue entry is returned.
+
+--*/
+
+{
+
+ PKDEVICE_QUEUE_ENTRY DeviceQueueEntry;
+ PLIST_ENTRY NextEntry;
+
+ ASSERT_DEVICE_QUEUE(DeviceQueue);
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Lock specified device queue.
+ //
+
+ KiAcquireSpinLock(&DeviceQueue->Lock);
+
+ ASSERT(DeviceQueue->Busy == TRUE);
+
+ //
+ // If the device queue is not empty, then remove the first entry from
+ // the queue. Otherwise set the device queue not busy.
+ //
+
+ if (IsListEmpty(&DeviceQueue->DeviceListHead) == TRUE) {
+ DeviceQueue->Busy = FALSE;
+ DeviceQueueEntry = (PKDEVICE_QUEUE_ENTRY)NULL;
+ } else {
+ NextEntry = DeviceQueue->DeviceListHead.Flink;
+ while (NextEntry != &DeviceQueue->DeviceListHead) {
+ DeviceQueueEntry = CONTAINING_RECORD(NextEntry, KDEVICE_QUEUE_ENTRY,
+ DeviceListEntry);
+ if (SortKey <= DeviceQueueEntry->SortKey) {
+ break;
+ }
+ NextEntry = NextEntry->Flink;
+ }
+
+ if (NextEntry != &DeviceQueue->DeviceListHead) {
+ RemoveEntryList(&DeviceQueueEntry->DeviceListEntry);
+
+ } else {
+ NextEntry = RemoveHeadList(&DeviceQueue->DeviceListHead);
+ DeviceQueueEntry = CONTAINING_RECORD(NextEntry, KDEVICE_QUEUE_ENTRY,
+ DeviceListEntry);
+ }
+
+ DeviceQueueEntry->Inserted = FALSE;
+ }
+
+ //
+ // Release device queue spin lock and return address of device queue
+ // entry.
+ //
+
+ KiReleaseSpinLock(&DeviceQueue->Lock);
+ return DeviceQueueEntry;
+}
+
+BOOLEAN
+KeRemoveEntryDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue,
+ IN PKDEVICE_QUEUE_ENTRY DeviceQueueEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes a specified entry from the the specified device
+ queue. If the device queue entry is not in the device queue, then no
+ operation is performed. Otherwise the specified device queue entry is
+ removed from the device queue and its inserted status is set to FALSE.
+
+Arguments:
+
+ DeviceQueue - Supplies a pointer to a control object of type device queue.
+
+ DeviceQueueEntry - Supplies a pointer to a device queue entry which is to
+ be removed from its device queue.
+
+Return Value:
+
+ A value of TRUE is returned if the device queue entry is removed from its
+ device queue. Otherwise a value of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Removed;
+
+ ASSERT_DEVICE_QUEUE(DeviceQueue);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock specified device queue.
+ //
+
+ ExAcquireSpinLock(&DeviceQueue->Lock, &OldIrql);
+
+ //
+ // If the device queue entry is not in a device queue, then no operation
+ // is performed. Otherwise remove the specified device queue entry from its
+ // device queue.
+ //
+
+ Removed = DeviceQueueEntry->Inserted;
+ if (Removed == TRUE) {
+ DeviceQueueEntry->Inserted = FALSE;
+ RemoveEntryList(&DeviceQueueEntry->DeviceListEntry);
+ }
+
+ //
+ // Unlock specified device queue, lower IRQL to its previous level, and
+ // return whether the device queue entry was removed from its queue.
+ //
+
+ ExReleaseSpinLock(&DeviceQueue->Lock, OldIrql);
+ return Removed;
+}
diff --git a/private/ntos/ke/dirs b/private/ntos/ke/dirs
new file mode 100644
index 000000000..a2a38f0fd
--- /dev/null
+++ b/private/ntos/ke/dirs
@@ -0,0 +1,24 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ dirs.
+
+Abstract:
+
+ This file specifies the subdirectories of the current directory that
+ contain component makefiles.
+
+
+Author:
+
+
+NOTE: Commented description of this file is in \nt\bak\bin\dirs.tpl
+
+!ENDIF
+
+DIRS=up
+
+OPTIONAL_DIRS=mp
diff --git a/private/ntos/ke/dpcobj.c b/private/ntos/ke/dpcobj.c
new file mode 100644
index 000000000..345e72226
--- /dev/null
+++ b/private/ntos/ke/dpcobj.c
@@ -0,0 +1,436 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ dpcobj.c
+
+Abstract:
+
+ This module implements the kernel DPC object. Functions are provided
+ to initialize, insert, and remove DPC objects.
+
+Author:
+
+ David N. Cutler (davec) 6-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input dpc is
+// really a kdpc and not something else, like deallocated pool.
+//
+
+#define ASSERT_DPC(E) { \
+ ASSERT((E)->Type == DpcObject); \
+}
+
+VOID
+KeInitializeDpc (
+ IN PRKDPC Dpc,
+ IN PKDEFERRED_ROUTINE DeferredRoutine,
+ IN PVOID DeferredContext
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel DPC object. The deferred routine
+ and context parameter are stored in the DPC object.
+
+Arguments:
+
+ Dpc - Supplies a pointer to a control object of type DPC.
+
+ DeferredRoutine - Supplies a pointer to a function that is called when
+ the DPC object is removed from the current processor's DPC queue.
+
+ DeferredContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the DeferredRoutine parameter.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Dpc->Type = DpcObject;
+ Dpc->Number = 0;
+ Dpc->Importance = MediumImportance;
+
+ //
+ // Initialize deferred routine address and deferred context parameter.
+ //
+
+ Dpc->DeferredRoutine = DeferredRoutine;
+ Dpc->DeferredContext = DeferredContext;
+ Dpc->Lock = NULL;
+ return;
+}
+
+BOOLEAN
+KeInsertQueueDpc (
+ IN PRKDPC Dpc,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a DPC object into the DPC queue. If the DPC object
+ is already in the DPC queue, then no operation is performed. Otherwise,
+ the DPC object is inserted in the DPC queue and a dispatch interrupt is
+ requested.
+
+Arguments:
+
+ Dpc - Supplies a pointer to a control object of type DPC.
+
+ SystemArgument1, SystemArgument2 - Supply a set of two arguments that
+ contain untyped data provided by the executive.
+
+Return Value:
+
+ If the DPC object is already in a DPC queue, then a value of FALSE is
+ returned. Otherwise a value of TRUE is returned.
+
+--*/
+
+{
+
+ ULONG Index;
+ PKSPIN_LOCK Lock;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ ULONG Processor;
+
+ ASSERT_DPC(Dpc);
+
+ //
+ // Disable interrupts.
+ //
+
+#if defined(_MIPS_)
+
+ _disable();
+
+#else
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+#endif
+
+ //
+ // Acquire the DPC queue lock for the specified target processor.
+ //
+
+#if !defined(NT_UP)
+
+ if (Dpc->Number >= MAXIMUM_PROCESSORS) {
+ Processor = Dpc->Number - MAXIMUM_PROCESSORS;
+ Prcb = KiProcessorBlock[Processor];
+
+ } else {
+ Prcb = KeGetCurrentPrcb();
+ }
+
+ KiAcquireSpinLock(&Prcb->DpcLock);
+
+#else
+
+ Prcb = KeGetCurrentPrcb();
+
+#endif
+
+ //
+ // If the DPC object is not in a DPC queue, then store the system
+ // arguments, insert the DPC object in the DPC queue, increment the
+ // number of DPCs queued to the target processor, increment the DPC
+ // queue depth, set the address of the DPC target DPC spinlock, and
+ // request a dispatch interrupt if appropriate.
+ //
+ // N.B. The following test will be changed to a compare and swap
+ // when 386 suppport is dropped from the system.
+ //
+
+ Lock = Dpc->Lock;
+ if (Lock == NULL) {
+ Prcb->DpcCount += 1;
+ Prcb->DpcQueueDepth += 1;
+ Dpc->Lock = &Prcb->DpcLock;
+ Dpc->SystemArgument1 = SystemArgument1;
+ Dpc->SystemArgument2 = SystemArgument2;
+
+ //
+ // If the DPC is of high importance, then insert the DPC at the
+ // head of the DPC queue. Otherwise, insert the DPC at the end
+ // of the DPC queue.
+ //
+
+ if (Dpc->Importance == HighImportance) {
+ InsertHeadList(&Prcb->DpcListHead, &Dpc->DpcListEntry);
+
+ } else {
+ InsertTailList(&Prcb->DpcListHead, &Dpc->DpcListEntry);
+ }
+#if defined(_ALPHA_) && !defined(NT_UP)
+ //
+ // A memory barrier is required here to synchronize with
+ // KiRetireDpcList, which clears DpcRoutineActive and
+ // DpcInterruptRequested without the dispatcher lock.
+ //
+ __MB();
+#endif
+
+ //
+ // If a DPC routine is not active on the target processor, then
+ // request a dispatch interrupt if appropriate.
+ //
+
+ if ((Prcb->DpcRoutineActive == FALSE) &&
+ (Prcb->DpcInterruptRequested == FALSE)) {
+
+#if defined(NT_UP)
+
+ //
+ // Request a dispatch interrupt on the current processor if
+ // the DPC is not of low importance, the length of the DPC
+ // queue has exceeded the maximum threshold, or if the DPC
+ // request rate is below the minimum threshold.
+ //
+
+ if ((Dpc->Importance != LowImportance) ||
+ (Prcb->DpcQueueDepth >= Prcb->MaximumDpcQueueDepth) ||
+ (Prcb->DpcRequestRate < Prcb->MinimumDpcRate)) {
+ Prcb->DpcInterruptRequested = TRUE;
+ KiRequestSoftwareInterrupt(DISPATCH_LEVEL);
+ }
+
+#else
+
+ //
+ // If the DPC is being queued to another processor and the
+ // DPC is of high importance, or the length of the other
+ // processor's DPC queue has exceeded the maximum threshold,
+ // then request a dispatch interrupt.
+ //
+
+ if (Prcb != KeGetCurrentPrcb()) {
+ if (((Dpc->Importance == HighImportance) ||
+ (Prcb->DpcQueueDepth >= Prcb->MaximumDpcQueueDepth))) {
+ Prcb->DpcInterruptRequested = TRUE;
+ KiIpiSend((KAFFINITY)(1 << Processor), IPI_DPC);
+ }
+
+ } else {
+
+ //
+ // Request a dispatch interrupt on the current processor if
+ // the DPC is not of low importance, the length of the DPC
+ // queue has exceeded the maximum threshold, or if the DPC
+ // request rate is below the minimum threshold.
+ //
+
+ if ((Dpc->Importance != LowImportance) ||
+ (Prcb->DpcQueueDepth >= Prcb->MaximumDpcQueueDepth) ||
+ (Prcb->DpcRequestRate < Prcb->MinimumDpcRate)) {
+ Prcb->DpcInterruptRequested = TRUE;
+ KiRequestSoftwareInterrupt(DISPATCH_LEVEL);
+ }
+ }
+
+#endif
+
+ }
+ }
+
+ //
+ // Release the DPC lock, enable interrupts, and return whether the
+ // DPC was queued or not.
+ //
+
+#if !defined(NT_UP)
+
+ KiReleaseSpinLock(&Prcb->DpcLock);
+
+#endif
+
+#if defined(_MIPS_)
+
+ _enable();
+
+#else
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return (Lock == NULL);
+}
+
+BOOLEAN
+KeRemoveQueueDpc (
+ IN PRKDPC Dpc
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes a DPC object from the DPC queue. If the DPC object
+ is not in the DPC queue, then no operation is performed. Otherwise, the
+ DPC object is removed from the DPC queue and its inserted state is set
+ FALSE.
+
+Arguments:
+
+ Dpc - Supplies a pointer to a control object of type DPC.
+
+Return Value:
+
+ If the DPC object is not in the DPC queue, then a value of FALSE is
+ returned. Otherwise a value of TRUE is returned.
+
+--*/
+
+{
+
+ PKSPIN_LOCK Lock;
+ PKPRCB Prcb;
+
+ ASSERT_DPC(Dpc);
+
+ //
+ // If the DPC object is in the DPC queue, then remove it from the queue
+ // and set its inserted state to FALSE.
+ //
+
+ _disable();
+ Lock = Dpc->Lock;
+ if (Lock != NULL) {
+
+#if !defined(NT_UP)
+
+ //
+ // Acquire the DPC lock of the target processor.
+ //
+
+ KiAcquireSpinLock(Lock);
+
+#endif
+
+ Prcb = CONTAINING_RECORD(Lock, KPRCB, DpcLock);
+ Prcb->DpcQueueDepth -= 1;
+ Dpc->Lock = NULL;
+ RemoveEntryList(&Dpc->DpcListEntry);
+
+#if !defined(NT_UP)
+
+ //
+ // Release the DPC lock of the target processor.
+ //
+
+ KiReleaseSpinLock(Lock);
+
+#endif
+
+ }
+
+ //
+ // Enable interrupts and return whether the DPC was removed from a DPC
+ // queue.
+ //
+
+ _enable();
+ return (Lock != NULL);
+}
+
+VOID
+KeSetImportanceDpc (
+ IN PRKDPC Dpc,
+ IN KDPC_IMPORTANCE Importance
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the importance of a DPC.
+
+Arguments:
+
+ Dpc - Supplies a pointer to a control object of type DPC.
+
+ Number - Supplies the importance of the DPC.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set the importance of the DPC.
+ //
+
+ Dpc->Importance = (UCHAR)Importance;
+ return;
+}
+
+VOID
+KeSetTargetProcessorDpc (
+ IN PRKDPC Dpc,
+ IN CCHAR Number
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the processor number to which the DPC is targeted.
+
+Arguments:
+
+ Dpc - Supplies a pointer to a control object of type DPC.
+
+ Number - Supplies the target processor number.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set target processor number.
+ //
+ // The target processor number if biased by the maximum number of
+ // processors that are supported.
+ //
+
+ Dpc->Number = MAXIMUM_PROCESSORS + Number;
+ return;
+}
diff --git a/private/ntos/ke/dpcsup.c b/private/ntos/ke/dpcsup.c
new file mode 100644
index 000000000..13c059b05
--- /dev/null
+++ b/private/ntos/ke/dpcsup.c
@@ -0,0 +1,427 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ dpcsup.c
+
+Abstract:
+
+ This module contains the support routines for the system DPC objects.
+ Functions are provided to process quantum end, the power notification
+ queue, and timer expiration.
+
+Author:
+
+ David N. Cutler (davec) 22-Apr-1989
+
+Environment:
+
+ Kernel mode only, IRQL DISPATCH_LEVEL.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define DPC entry structure and maximum DPC List size.
+//
+
+#define MAXIMUM_DPC_LIST_SIZE 16
+
+typedef struct _DPC_ENTRY {
+ PRKDPC Dpc;
+ PKDEFERRED_ROUTINE Routine;
+ PVOID Context;
+} DPC_ENTRY, *PDPC_ENTRY;
+
+PRKTHREAD
+KiQuantumEnd (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called when a quantum end event occurs on the current
+ processor. Its function is to determine whether the thread priority should
+ be decremented and whether a redispatch of the processor should occur.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ The next thread to be schedule on the current processor is returned as
+ the function value. If this value is not NULL, then the return is with
+ the dispatcher database locked. Otherwise, the dispatcher database is
+ unlocked.
+
+--*/
+
+{
+
+ KPRIORITY NewPriority;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KPRIORITY Priority;
+ PKPROCESS Process;
+ PRKTHREAD Thread;
+ PRKTHREAD NextThread;
+
+ //
+ // Acquire the dispatcher database lock.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the quantum has expired for the current thread, then update its
+ // quantum and priority.
+ //
+
+ if (Thread->Quantum <= 0) {
+ Process = Thread->ApcState.Process;
+ Thread->Quantum = Process->ThreadQuantum;
+
+ //
+ // Decrement the thread's current priority if the thread is not
+ // running in a realtime priority class and check to determine
+ // if the processor should be redispatched.
+ //
+
+ Priority = Thread->Priority;
+ if (Priority < LOW_REALTIME_PRIORITY) {
+ NewPriority = Priority - Thread->PriorityDecrement - 1;
+ if (NewPriority < Thread->BasePriority) {
+ NewPriority = Thread->BasePriority;
+ }
+
+ Thread->PriorityDecrement = 0;
+
+ } else {
+ NewPriority = Priority;
+ }
+
+ //
+ // If the new thread priority is different that the current thread
+ // priority, then the thread does not run at a realtime level and
+ // its priority should be set. Otherwise, attempt to round robin
+ // at the current level.
+ //
+
+ if (Priority != NewPriority) {
+ KiSetPriorityThread(Thread, NewPriority);
+
+ } else {
+ if (Prcb->NextThread == NULL) {
+ NextThread = KiFindReadyThread(Thread->NextProcessor, Priority);
+
+ if (NextThread != NULL) {
+ NextThread->State = Standby;
+ Prcb->NextThread = NextThread;
+ }
+
+ } else {
+ Thread->Preempted = FALSE;
+ }
+ }
+ }
+
+ //
+ // If a thread was scheduled for execution on the current processor,
+ // then return the address of the thread with the dispatcher database
+ // locked. Otherwise, return NULL with the dispatcher data unlocked.
+ //
+
+ NextThread = Prcb->NextThread;
+ if (NextThread == NULL) {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ return NextThread;
+}
+
+#if DBG
+
+
+VOID
+KiCheckTimerTable (
+ IN ULARGE_INTEGER CurrentTime
+ )
+
+{
+
+ ULONG Index;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKTIMER Timer;
+
+ //
+ // Raise IRQL to highest level and scan timer table for timers that
+ // have expired.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+ Index = 0;
+ do {
+ ListHead = &KiTimerTableListHead[Index];
+ NextEntry = ListHead->Flink;
+ while (NextEntry != ListHead) {
+ Timer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
+ NextEntry = NextEntry->Flink;
+ if (Timer->DueTime.QuadPart <= CurrentTime.QuadPart) {
+ DbgBreakPoint();
+ }
+ }
+
+ Index += 1;
+ } while(Index < TIMER_TABLE_SIZE);
+
+ //
+ // Lower IRQL to the previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+ return;
+}
+
+#endif
+
+
+VOID
+KiTimerExpiration (
+ IN PKDPC TimerDpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called when the clock interupt routine discovers that
+ a timer has expired.
+
+Arguments:
+
+ TimerDpc - Supplies a pointer to a control object of type DPC.
+
+ DeferredContext - Not used.
+
+ SystemArgument1 - Supplies the starting timer table index value to
+ use for the timer table scan.
+
+ SystemArgument2 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ ULARGE_INTEGER CurrentTime;
+ LIST_ENTRY ExpiredListHead;
+ LONG HandLimit;
+ LONG Index;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKTIMER Timer;
+
+ //
+ // Acquire the dispatcher database lock and read the current interrupt
+ // time to determine which timers have expired.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ KiQueryInterruptTime((PLARGE_INTEGER)&CurrentTime);
+
+ //
+ // If the timer table has not wrapped, then start with the specified
+ // timer table index value, and scan for timer entries that have expired.
+ // Otherwise, start with the first entry in the timer table and scan the
+ // entire table for timer entries that have expired.
+ //
+ // N.B. This later condition exists when DPC processing is blocked for a
+ // period longer than one round trip throught the timer table.
+ //
+
+ HandLimit = (LONG)KiQueryLowTickCount();
+ if (((ULONG)(HandLimit - (LONG)SystemArgument1)) >= TIMER_TABLE_SIZE) {
+ Index = - 1;
+ HandLimit = TIMER_TABLE_SIZE - 1;
+
+ } else {
+ Index = ((LONG)SystemArgument1 - 1) & (TIMER_TABLE_SIZE - 1);
+ HandLimit &= (TIMER_TABLE_SIZE - 1);
+ }
+
+ InitializeListHead(&ExpiredListHead);
+ do {
+ Index = (Index + 1) & (TIMER_TABLE_SIZE - 1);
+ ListHead = &KiTimerTableListHead[Index];
+ NextEntry = ListHead->Flink;
+ while (NextEntry != ListHead) {
+ Timer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
+ if (Timer->DueTime.QuadPart <= CurrentTime.QuadPart) {
+
+ //
+ // The next timer in the current timer list has expired.
+ // Remove the entry from the timer list and insert the
+ // timer in the expired list.
+ //
+
+ RemoveEntryList(&Timer->TimerListEntry);
+ InsertTailList(&ExpiredListHead, &Timer->TimerListEntry);
+ NextEntry = ListHead->Flink;
+
+ } else {
+ break;
+ }
+ }
+
+ } while(Index != HandLimit);
+
+#if DBG
+
+ if (((ULONG)SystemArgument2 == 0) && (KeNumberProcessors == 1)) {
+ KiCheckTimerTable(CurrentTime);
+ }
+
+#endif
+
+ //
+ // Process the expired timer list.
+ //
+ // N.B. The following function returns with the dispatcher database
+ // unlocked.
+ //
+
+ KiTimerListExpire(&ExpiredListHead, OldIrql);
+ return;
+}
+
+VOID
+FASTCALL
+KiTimerListExpire (
+ IN PLIST_ENTRY ExpiredListHead,
+ IN KIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process a list of timers that have expired.
+
+ N.B. This function is called with the dispatcher database locked and
+ returns with the dispatcher database unlocked.
+
+Arguments:
+
+ ExpiredListHead - Supplies a pointer to a list of timers that have
+ expired.
+
+ OldIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Count;
+ PKDPC Dpc;
+ DPC_ENTRY DpcList[MAXIMUM_DPC_LIST_SIZE];
+ LONG Index;
+ LARGE_INTEGER Interval;
+ KIRQL OldIrql1;
+ LARGE_INTEGER SystemTime;
+ PKTIMER Timer;
+
+ //
+ // Capture the timer expiration time.
+ //
+
+ KiQuerySystemTime(&SystemTime);
+
+ //
+ // Remove the next timer from the expired timer list, set the state of
+ // the timer to signaled, reinsert the timer in the timer tree if it is
+ // periodic, and optionally call the DPC routine if one is specified.
+ //
+
+RestartScan:
+ Index = 0;
+ while (ExpiredListHead->Flink != ExpiredListHead) {
+ Timer = CONTAINING_RECORD(ExpiredListHead->Flink, KTIMER, TimerListEntry);
+ KiRemoveTreeTimer(Timer);
+ Timer->Header.SignalState = 1;
+ if (IsListEmpty(&Timer->Header.WaitListHead) == FALSE) {
+ KiWaitTest(Timer, TIMER_EXPIRE_INCREMENT);
+ }
+
+ if (Timer->Period != 0) {
+ Interval.QuadPart = Int32x32To64(Timer->Period, - 10 * 1000);
+ KiInsertTreeTimer(Timer, Interval);
+ }
+
+ if (Timer->Dpc != NULL) {
+ Dpc = Timer->Dpc;
+ DpcList[Index].Dpc = Dpc;
+ DpcList[Index].Routine = Dpc->DeferredRoutine;
+ DpcList[Index].Context = Dpc->DeferredContext;
+ Index += 1;
+ if (Index == MAXIMUM_DPC_LIST_SIZE) {
+ break;
+ }
+ }
+ }
+
+ //
+ // Unlock the dispacher database and process DPC list entries.
+ //
+
+ if (Index != 0) {
+ KiUnlockDispatcherDatabase(DISPATCH_LEVEL);
+ Count = Index;
+ do {
+ Index -= 1;
+ (DpcList[Index].Routine)(DpcList[Index].Dpc,
+ DpcList[Index].Context,
+ (PVOID)SystemTime.LowPart,
+ (PVOID)SystemTime.HighPart);
+
+ } while (Index > 0);
+
+ //
+ // If processing of the expired timer list was terminated because
+ // the DPC List was full, then process any remaining entries.
+ //
+
+ if (Count == MAXIMUM_DPC_LIST_SIZE) {
+ KiLockDispatcherDatabase(&OldIrql1);
+ goto RestartScan;
+ }
+
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/eventobj.c b/private/ntos/ke/eventobj.c
new file mode 100644
index 000000000..7bfc8ea59
--- /dev/null
+++ b/private/ntos/ke/eventobj.c
@@ -0,0 +1,532 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ eventobj.c
+
+Abstract:
+
+ This module implements the kernel event objects. Functions are
+ provided to initialize, pulse, read, reset, and set event objects.
+
+Author:
+
+ David N. Cutler (davec) 27-Feb-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#undef KeClearEvent
+
+//
+// The following assert macro is used to check that an input event is
+// really a kernel event and not something else, like deallocated pool.
+//
+
+#define ASSERT_EVENT(E) { \
+ ASSERT((E)->Header.Type == NotificationEvent || \
+ (E)->Header.Type == SynchronizationEvent); \
+}
+
+//
+// The following assert macro is used to check that an input event is
+// really a kernel event pair and not something else, like deallocated
+// pool.
+//
+
+#define ASSERT_EVENT_PAIR(E) { \
+ ASSERT((E)->Type == EventPairObject); \
+}
+
+
+#undef KeInitializeEvent
+
+VOID
+KeInitializeEvent (
+ IN PRKEVENT Event,
+ IN EVENT_TYPE Type,
+ IN BOOLEAN State
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel event object. The initial signal
+ state of the object is set to the specified value.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+ Type - Supplies the type of event; NotificationEvent or
+ SynchronizationEvent.
+
+ State - Supplies the initial signal state of the event object.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard dispatcher object header, set initial signal
+ // state of event object, and set the type of event object.
+ //
+
+ Event->Header.Type = (UCHAR)Type;
+ Event->Header.Size = sizeof(KEVENT) / sizeof(LONG);
+ Event->Header.SignalState = State;
+ InitializeListHead(&Event->Header.WaitListHead);
+ return;
+}
+
+VOID
+KeInitializeEventPair (
+ IN PKEVENT_PAIR EventPair
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel event pair object. A kernel event
+ pair object contains two separate synchronization event objects that
+ are used to provide a fast interprocess synchronization capability.
+
+Arguments:
+
+ EventPair - Supplies a pointer to a control object of type event pair.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize the type and size of the event pair object and initialize
+ // the two event object as synchronization events with an initial state
+ // of FALSE.
+ //
+
+ EventPair->Type = (USHORT)EventPairObject;
+ EventPair->Size = sizeof(KEVENT_PAIR);
+ KeInitializeEvent(&EventPair->EventLow, SynchronizationEvent, FALSE);
+ KeInitializeEvent(&EventPair->EventHigh, SynchronizationEvent, FALSE);
+ return;
+}
+
+VOID
+KeClearEvent (
+ IN PRKEVENT Event
+ )
+
+/*++
+
+Routine Description:
+
+ This function clears the signal state of an event object.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ASSERT_EVENT(Event);
+
+ //
+ // Clear signal state of event object.
+ //
+
+ Event->Header.SignalState = 0;
+ return;
+}
+
+LONG
+KePulseEvent (
+ IN PRKEVENT Event,
+ IN KPRIORITY Increment,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function atomically sets the signal state of an event object to
+ Signaled, attempts to satisfy as many Waits as possible, and then resets
+ the signal state of the event object to Not-Signaled. The previous signal
+ state of the event object is returned as the function value.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+ Increment - Supplies the priority increment that is to be applied
+ if setting the event causes a Wait to be satisfied.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KePulseEvent will be immediately followed by a call to one of the
+ kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the event object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+ PRKTHREAD Thread;
+
+ ASSERT_EVENT(Event);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the current state of the event object is Not-Signaled and
+ // the wait queue is not empty, then set the state of the event
+ // to Signaled, satisfy as many Waits as possible, and then reset
+ // the state of the event to Not-Signaled.
+ //
+
+ OldState = Event->Header.SignalState;
+ if ((OldState == 0) && (IsListEmpty(&Event->Header.WaitListHead) == FALSE)) {
+ Event->Header.SignalState = 1;
+ KiWaitTest(Event, Increment);
+ }
+
+ Event->Header.SignalState = 0;
+
+ //
+ // If the value of the Wait argument is TRUE, then return to the
+ // caller with IRQL raised and the dispatcher database locked. Else
+ // release the dispatcher database lock and lower IRQL to the
+ // previous value.
+ //
+
+ if (Wait != FALSE) {
+ Thread = KeGetCurrentThread();
+ Thread->WaitIrql = OldIrql;
+ Thread->WaitNext = Wait;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ //
+ // Return previous signal state of event object.
+ //
+
+ return OldState;
+}
+
+LONG
+KeReadStateEvent (
+ IN PRKEVENT Event
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of an event object.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+Return Value:
+
+ The current signal state of the event object.
+
+--*/
+
+{
+
+ ASSERT_EVENT(Event);
+
+ //
+ // Return current signal state of event object.
+ //
+
+ return Event->Header.SignalState;
+}
+
+LONG
+KeResetEvent (
+ IN PRKEVENT Event
+ )
+
+/*++
+
+Routine Description:
+
+ This function resets the signal state of an event object to
+ Not-Signaled. The previous state of the event object is returned
+ as the function value.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+Return Value:
+
+ The previous signal state of the event object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+
+ ASSERT_EVENT(Event);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current signal state of event object and then reset
+ // the state of the event object to Not-Signaled.
+ //
+
+ OldState = Event->Header.SignalState;
+ Event->Header.SignalState = 0;
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value.
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return previous signal state of event object.
+ //
+
+ return OldState;
+}
+
+LONG
+KeSetEvent (
+ IN PRKEVENT Event,
+ IN KPRIORITY Increment,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the signal state of an event object to Signaled
+ and attempts to satisfy as many Waits as possible. The previous
+ signal state of the event object is returned as the function value.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+ Increment - Supplies the priority increment that is to be applied
+ if setting the event causes a Wait to be satisfied.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KePulseEvent will be immediately followed by a call to one of the
+ kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the event object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+ PRKTHREAD Thread;
+
+ ASSERT_EVENT(Event);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Collect call data.
+ //
+
+#if defined(_COLLECT_SET_EVENT_CALLDATA_)
+
+ RECORD_CALL_DATA(&KiSetEventCallData);
+
+#endif
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the current state of the event object is not Signaled, the set the
+ // state of the event object to Signaled, and check for waiters.
+ //
+
+ OldState = Event->Header.SignalState;
+ Event->Header.SignalState = 1;
+ if ((OldState == 0) && (IsListEmpty(&Event->Header.WaitListHead) == FALSE)) {
+ KiWaitTest(Event, Increment);
+ }
+
+ //
+ // If the value of the Wait argument is TRUE, then return to the
+ // caller with IRQL raised and the dispatcher database locked. Else
+ // release the dispatcher database lock and lower IRQL to its
+ // previous value.
+ //
+
+ if (Wait != FALSE) {
+ Thread = KeGetCurrentThread();
+ Thread->WaitNext = Wait;
+ Thread->WaitIrql = OldIrql;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ //
+ // Return previous signal state of event object.
+ //
+
+ return OldState;
+}
+
+VOID
+KeSetEventBoostPriority (
+ IN PRKEVENT Event,
+ IN PRKTHREAD *Thread OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function conditionally sets the signal state of an event object
+ to Signaled, and attempts to unwait the first waiter, and optionally
+ returns the thread address of the unwatied thread.
+
+ N.B. This function can only be called with synchronization events
+ and is primarily for the purpose of implementing fast mutexes.
+ It is assumed that the waiter is NEVER waiting on multiple
+ objects.
+
+Arguments:
+
+ Event - Supplies a pointer to a dispatcher object of type event.
+
+ Thread - Supplies an optional pointer to a variable that receives
+ the address of the thread that is awakened.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KPRIORITY Increment;
+ KIRQL OldIrql;
+ PRKTHREAD WaitThread;
+
+ ASSERT(Event->Header.Type == SynchronizationEvent);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the the wait list is not empty, then satisfy the wait of the
+ // first thread in the wait list. Otherwise, set the signal state
+ // of the event object.
+ //
+ // N.B. This function is only called for fast mutexes and exclusive
+ // access to resources. All waits MUST be wait for single object.
+ //
+
+ Event->Header.SignalState = 1;
+ if (IsListEmpty(&Event->Header.WaitListHead) == FALSE) {
+
+ //
+ // Get the address of the waiting thread.
+ //
+
+ WaitThread = CONTAINING_RECORD(Event->Header.WaitListHead.Flink,
+ KWAIT_BLOCK,
+ WaitListEntry)->Thread;
+
+ //
+ // If specified, return the address of the thread that is awakened.
+ //
+
+ if (ARGUMENT_PRESENT(Thread)) {
+ *Thread = WaitThread;
+ }
+
+ //
+ // Clear the signal state of the event, give the new owner of the
+ // resource/fast mutex (the only callers) a full quantum, and unwait
+ // the thread with a standard event increment unless the system is
+ // a server system, in which case no boost if given.
+ //
+
+ Event->Header.SignalState = 0;
+ WaitThread->Quantum = WaitThread->ApcState.Process->ThreadQuantum;
+ Increment = 0;
+ if (MmProductType == 0) {
+ Increment = EVENT_INCREMENT;
+ }
+
+ KiUnwaitThread(WaitThread, STATUS_SUCCESS, Increment);
+ }
+
+ //
+ // Unlock dispatcher database lock and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
diff --git a/private/ntos/ke/genxx.inc b/private/ntos/ke/genxx.inc
new file mode 100644
index 000000000..ba0cb1184
--- /dev/null
+++ b/private/ntos/ke/genxx.inc
@@ -0,0 +1,807 @@
+/*++
+
+Copyright (c) 1995 Microsoft Corporation
+
+Module Name:
+
+ genxx.inc
+
+Abstract:
+
+ This file contains common code to generate assembler definitions.
+
+Author:
+
+ David N. Cutler (davec) 9-Aug-1995
+
+Revision History:
+
+--*/
+
+ //
+ // Process state enumerated type definitions.
+ //
+
+ genCom("Process State Enumerated Type Values");
+
+ genVal(ProcessInMemory, ProcessInMemory);
+ genVal(ProcessOutOfMemory, ProcessOutOfMemory);
+ genVal(ProcessInTransition, ProcessInTransition);
+
+ //
+ // Thread state enumerated type definitions.
+ //
+
+ genCom("Thread State Enumerated Type Values");
+
+ genVal(Initialized, Initialized);
+ genVal(Ready, Ready);
+ genVal(Running, Running);
+ genVal(Standby, Standby);
+ genVal(Terminated, Terminated);
+ genVal(Waiting, Waiting);
+
+ //
+ // Wait reason and wait type enumerated type definitions.
+ //
+
+ EnableInc(HAL);
+
+ genCom("Wait Reason and Wait Type Enumerated Type Values");
+
+ genVal(WrExecutive, Executive);
+
+ DisableInc(HAL);
+
+ genVal(WrEventPair, WrEventPair);
+ genVal(WaitAny, WaitAny);
+ genVal(WaitAll, WaitAll);
+
+ //
+ // APC state structure offset definitions.
+ //
+
+ genCom("Apc State Structure Offset Definitions");
+
+ genDef(As, KAPC_STATE, ApcListHead);
+ genDef(As, KAPC_STATE, Process);
+ genDef(As, KAPC_STATE, KernelApcInProgress);
+ genDef(As, KAPC_STATE, KernelApcPending);
+ genDef(As, KAPC_STATE, UserApcPending);
+
+ //
+ // Bug check code definitions
+ //
+
+ EnableInc(HAL);
+
+ genCom("Bug Check Code Definitions");
+
+ genVal(APC_INDEX_MISMATCH, APC_INDEX_MISMATCH);
+ genVal(DATA_BUS_ERROR, DATA_BUS_ERROR);
+ genVal(DATA_COHERENCY_EXCEPTION, DATA_COHERENCY_EXCEPTION);
+ genVal(HAL1_INITIALIZATION_FAILED, HAL1_INITIALIZATION_FAILED);
+ genVal(INSTRUCTION_BUS_ERROR, INSTRUCTION_BUS_ERROR);
+ genVal(INSTRUCTION_COHERENCY_EXCEPTION, INSTRUCTION_COHERENCY_EXCEPTION);
+ genVal(INTERRUPT_EXCEPTION_NOT_HANDLED, INTERRUPT_EXCEPTION_NOT_HANDLED);
+ genVal(INTERRUPT_UNWIND_ATTEMPTED, INTERRUPT_UNWIND_ATTEMPTED);
+ genVal(INVALID_AFFINITY_SET, INVALID_AFFINITY_SET);
+ genVal(INVALID_DATA_ACCESS_TRAP, INVALID_DATA_ACCESS_TRAP);
+ genVal(IRQL_GT_ZERO_AT_SYSTEM_SERVICE, IRQL_GT_ZERO_AT_SYSTEM_SERVICE);
+ genVal(IRQL_NOT_LESS_OR_EQUAL, IRQL_NOT_LESS_OR_EQUAL);
+ genVal(KMODE_EXCEPTION_NOT_HANDLED, KMODE_EXCEPTION_NOT_HANDLED);
+ genVal(NMI_HARDWARE_FAILURE, NMI_HARDWARE_FAILURE);
+ genVal(NO_USER_MODE_CONTEXT, NO_USER_MODE_CONTEXT);
+ genVal(PAGE_FAULT_WITH_INTERRUPTS_OFF, PAGE_FAULT_WITH_INTERRUPTS_OFF);
+ genVal(PANIC_STACK_SWITCH, PANIC_STACK_SWITCH);
+ genVal(SPIN_LOCK_INIT_FAILURE, SPIN_LOCK_INIT_FAILURE);
+ genVal(SYSTEM_EXIT_OWNED_MUTEX, SYSTEM_EXIT_OWNED_MUTEX);
+ genVal(SYSTEM_SERVICE_EXCEPTION, SYSTEM_SERVICE_EXCEPTION);
+ genVal(SYSTEM_UNWIND_PREVIOUS_USER, SYSTEM_UNWIND_PREVIOUS_USER);
+ genVal(TRAP_CAUSE_UNKNOWN, TRAP_CAUSE_UNKNOWN);
+ genVal(UNEXPECTED_KERNEL_MODE_TRAP, UNEXPECTED_KERNEL_MODE_TRAP);
+
+ DisableInc(HAL);
+
+ //
+ // Breakpoint types
+ //
+
+ EnableInc(HAL);
+
+ genCom("Breakpoint type definitions");
+
+ genVal(DBG_STATUS_CONTROL_C, DBG_STATUS_CONTROL_C);
+
+ DisableInc(HAL);
+
+ //
+ // Client Id structure offset definitions.
+ //
+
+ genCom("Client Id Structure Offset Definitions");
+
+ genDef(Cid, CLIENT_ID, UniqueProcess);
+ genDef(Cid, CLIENT_ID, UniqueThread);
+
+ //
+ // Critical section structure offset definitions.
+ //
+
+ genCom("Critical Section Structure Offset Definitions");
+
+ genDef(Cs, RTL_CRITICAL_SECTION, DebugInfo);
+ genDef(Cs, RTL_CRITICAL_SECTION, LockCount);
+ genDef(Cs, RTL_CRITICAL_SECTION, RecursionCount);
+ genDef(Cs, RTL_CRITICAL_SECTION, OwningThread);
+ genDef(Cs, RTL_CRITICAL_SECTION, LockSemaphore);
+
+ //
+ // Critical section debug information structure offset definitions.
+ //
+
+ genCom("Critical Section Debug Information Structure Offset Definitions");
+
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, Type);
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, CreatorBackTraceIndex);
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, CriticalSection);
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, ProcessLocksList);
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, EntryCount);
+ genDef(Cs, RTL_CRITICAL_SECTION_DEBUG, ContentionCount);
+
+ //
+ // Exception dispatcher context structure offset definitions.
+ //
+
+#if defined(_ALPHA_) || defined(_MIPS_) || defined(_PPC_)
+
+ genCom("Dispatcher Context Structure Offset Definitions");
+
+ genDef(Dc, DISPATCHER_CONTEXT, ControlPc);
+ genDef(Dc, DISPATCHER_CONTEXT, FunctionEntry);
+ genDef(Dc, DISPATCHER_CONTEXT, EstablisherFrame);
+ genDef(Dc, DISPATCHER_CONTEXT, ContextRecord);
+
+#endif
+
+ //
+ // Exception record offset, flag, and enumerated type definitions.
+ //
+
+ EnableInc(HAL);
+
+ genCom("Exception Record Offset, Flag, and Enumerated Type Definitions");
+
+ genVal(EXCEPTION_NONCONTINUABLE, EXCEPTION_NONCONTINUABLE);
+ genVal(EXCEPTION_UNWINDING, EXCEPTION_UNWINDING);
+ genVal(EXCEPTION_EXIT_UNWIND, EXCEPTION_EXIT_UNWIND);
+ genVal(EXCEPTION_STACK_INVALID, EXCEPTION_STACK_INVALID);
+ genVal(EXCEPTION_NESTED_CALL, EXCEPTION_NESTED_CALL);
+ genVal(EXCEPTION_TARGET_UNWIND, EXCEPTION_TARGET_UNWIND);
+ genVal(EXCEPTION_COLLIDED_UNWIND, EXCEPTION_COLLIDED_UNWIND);
+ genVal(EXCEPTION_UNWIND, EXCEPTION_UNWIND);
+ genVal(EXCEPTION_EXECUTE_HANDLER, EXCEPTION_EXECUTE_HANDLER);
+ genVal(EXCEPTION_CONTINUE_SEARCH, EXCEPTION_CONTINUE_SEARCH);
+ genVal(EXCEPTION_CONTINUE_EXECUTION, EXCEPTION_CONTINUE_EXECUTION);
+
+#if defined(_X86_)
+
+ genVal(EXCEPTION_CHAIN_END, (ULONG)EXCEPTION_CHAIN_END);
+
+#endif
+
+ genSpc();
+
+ genVal(ExceptionContinueExecution, ExceptionContinueExecution);
+ genVal(ExceptionContinueSearch, ExceptionContinueSearch);
+ genVal(ExceptionNestedException, ExceptionNestedException);
+ genVal(ExceptionCollidedUnwind, ExceptionCollidedUnwind);
+
+ genSpc();
+
+ genDef(Er, EXCEPTION_RECORD, ExceptionCode);
+ genDef(Er, EXCEPTION_RECORD, ExceptionFlags);
+ genDef(Er, EXCEPTION_RECORD, ExceptionRecord);
+ genDef(Er, EXCEPTION_RECORD, ExceptionAddress);
+ genDef(Er, EXCEPTION_RECORD, NumberParameters);
+ genDef(Er, EXCEPTION_RECORD, ExceptionInformation);
+ genVal(ExceptionRecordLength, (sizeof(EXCEPTION_RECORD) + 15) & (~15));
+
+ DisableInc(HAL);
+
+ //
+ // Fast Mutex structure offset definitions.
+ //
+
+ EnableInc(HAL);
+
+ genCom("Fast Mutex Structure Offset Definitions");
+
+ genDef(Fm, FAST_MUTEX, Count);
+ genDef(Fm, FAST_MUTEX, Owner);
+ genDef(Fm, FAST_MUTEX, Contention);
+ genDef(Fm, FAST_MUTEX, Event);
+ genDef(Fm, FAST_MUTEX, OldIrql);
+
+ //
+ // Interrupt priority request level definitions
+ //
+
+ genCom("Interrupt Priority Request Level Definitions");
+
+ genVal(APC_LEVEL, APC_LEVEL);
+ genVal(DISPATCH_LEVEL, DISPATCH_LEVEL);
+
+#if defined(_X86_)
+
+ genVal(CLOCK1_LEVEL, CLOCK1_LEVEL);
+ genVal(CLOCK2_LEVEL, CLOCK2_LEVEL);
+
+#endif
+
+ genVal(IPI_LEVEL, IPI_LEVEL);
+ genVal(POWER_LEVEL, POWER_LEVEL);
+ genVal(PROFILE_LEVEL, PROFILE_LEVEL);
+ genVal(HIGH_LEVEL, HIGH_LEVEL);
+ genVal(SYNCH_LEVEL, SYNCH_LEVEL);
+
+ //
+ // Large integer structure offset definitions.
+ //
+
+ genCom("Large Integer Structure Offset Definitions");
+
+ genDef(Li, LARGE_INTEGER, LowPart);
+ genDef(Li, LARGE_INTEGER, HighPart);
+
+ //
+ // List entry structure offset definitions.
+ //
+
+ genCom("List Entry Structure Offset Definitions");
+
+ genDef(Ls, LIST_ENTRY, Flink);
+ genDef(Ls, LIST_ENTRY, Blink);
+
+ //
+ // String structure offset definitions.
+ //
+
+ genCom("String Structure Offset Definitions");
+
+ genDef(Str, STRING, Length);
+ genDef(Str, STRING, MaximumLength);
+ genDef(Str, STRING, Buffer);
+
+ //
+ // System time structure offset definitions.
+ //
+
+#if defined(_MIPS_) || defined(_PPC_) || defined(_X86_)
+
+ genCom("System Time Structure Offset Definitions");
+
+ genAlt(StLowTime, KSYSTEM_TIME, LowPart);
+ genDef(St, KSYSTEM_TIME, High1Time);
+ genDef(St, KSYSTEM_TIME, High2Time);
+
+#endif
+
+ //
+ // Time structure offset definitions.
+ //
+
+ genCom("Time Structure Offset Definitions");
+
+ genAlt(TmLowTime, LARGE_INTEGER, LowPart);
+ genAlt(TmHighTime, LARGE_INTEGER , HighPart);
+
+ DisableInc(HAL);
+
+ //
+ // Thread switch counter structure offset definitions.
+ //
+
+ genCom("Thread Switch Counter Offset Definitions");
+
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, FindAny);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, FindIdeal);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, FindLast);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, IdleAny);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, IdleCurrent);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, IdleIdeal);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, IdleLast);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, PreemptAny);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, PreemptCurrent);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, PreemptLast);
+ genDef(Tw, KTHREAD_SWITCH_COUNTERS, SwitchToIdle);
+
+ //
+ // Status code definitions
+ //
+
+ genCom("Status Code Definitions");
+
+#if defined(_ALPHA_)
+
+ genVal(STATUS_ALPHA_ARITHMETIC_EXCEPTION, STATUS_ALPHA_ARITHMETIC_EXCEPTION);
+ genVal(STATUS_ALPHA_BAD_VIRTUAL_ADDRESS, STATUS_ALPHA_BAD_VIRTUAL_ADDRESS);
+ genVal(STATUS_ALPHA_FLOATING_NOT_IMPLEMENTED, STATUS_ALPHA_FLOATING_NOT_IMPLEMENTED);
+ genVal(STATUS_ALPHA_GENTRAP, STATUS_ALPHA_GENTRAP);
+ genVal(STATUS_ALPHA_MACHINE_CHECK, (DATA_BUS_ERROR | 0xdfff0000));
+
+#endif
+
+ genVal(STATUS_ACCESS_VIOLATION, STATUS_ACCESS_VIOLATION);
+ genVal(STATUS_ARRAY_BOUNDS_EXCEEDED, STATUS_ARRAY_BOUNDS_EXCEEDED);
+ genVal(STATUS_BAD_COMPRESSION_BUFFER, STATUS_BAD_COMPRESSION_BUFFER);
+ genVal(STATUS_BREAKPOINT, STATUS_BREAKPOINT);
+ genVal(STATUS_DATATYPE_MISALIGNMENT, STATUS_DATATYPE_MISALIGNMENT);
+ genVal(STATUS_FLOAT_DENORMAL_OPERAND, STATUS_FLOAT_DENORMAL_OPERAND);
+ genVal(STATUS_FLOAT_DIVIDE_BY_ZERO, STATUS_FLOAT_DIVIDE_BY_ZERO);
+ genVal(STATUS_FLOAT_INEXACT_RESULT, STATUS_FLOAT_INEXACT_RESULT);
+ genVal(STATUS_FLOAT_INVALID_OPERATION, STATUS_FLOAT_INVALID_OPERATION);
+ genVal(STATUS_FLOAT_OVERFLOW, STATUS_FLOAT_OVERFLOW);
+ genVal(STATUS_FLOAT_STACK_CHECK, STATUS_FLOAT_STACK_CHECK);
+ genVal(STATUS_FLOAT_UNDERFLOW, STATUS_FLOAT_UNDERFLOW);
+ genVal(STATUS_GUARD_PAGE_VIOLATION, STATUS_GUARD_PAGE_VIOLATION);
+ genVal(STATUS_ILLEGAL_FLOAT_CONTEXT, STATUS_ILLEGAL_FLOAT_CONTEXT);
+ genVal(STATUS_ILLEGAL_INSTRUCTION, STATUS_ILLEGAL_INSTRUCTION);
+ genVal(STATUS_INSTRUCTION_MISALIGNMENT, STATUS_INSTRUCTION_MISALIGNMENT);
+ genVal(STATUS_INVALID_HANDLE, STATUS_INVALID_HANDLE);
+ genVal(STATUS_INVALID_LOCK_SEQUENCE, STATUS_INVALID_LOCK_SEQUENCE);
+ genVal(STATUS_INVALID_OWNER, STATUS_INVALID_OWNER);
+ genVal(STATUS_INVALID_PARAMETER_1, STATUS_INVALID_PARAMETER_1);
+ genVal(STATUS_INVALID_SYSTEM_SERVICE, STATUS_INVALID_SYSTEM_SERVICE);
+ genVal(STATUS_INTEGER_DIVIDE_BY_ZERO, STATUS_INTEGER_DIVIDE_BY_ZERO);
+ genVal(STATUS_INTEGER_OVERFLOW, STATUS_INTEGER_OVERFLOW);
+ genVal(STATUS_IN_PAGE_ERROR, STATUS_IN_PAGE_ERROR);
+ genVal(STATUS_KERNEL_APC, STATUS_KERNEL_APC);
+ genVal(STATUS_LONGJUMP, STATUS_LONGJUMP);
+ genVal(STATUS_NO_CALLBACK_ACTIVE, STATUS_NO_CALLBACK_ACTIVE);
+ genVal(STATUS_NO_EVENT_PAIR, STATUS_NO_EVENT_PAIR);
+ genVal(STATUS_PRIVILEGED_INSTRUCTION, STATUS_PRIVILEGED_INSTRUCTION);
+ genVal(STATUS_SINGLE_STEP, STATUS_SINGLE_STEP);
+ genVal(STATUS_STACK_OVERFLOW, STATUS_STACK_OVERFLOW);
+ genVal(STATUS_SUCCESS, STATUS_SUCCESS);
+ genVal(STATUS_THREAD_IS_TERMINATING, STATUS_THREAD_IS_TERMINATING);
+ genVal(STATUS_TIMEOUT, STATUS_TIMEOUT);
+ genVal(STATUS_UNWIND, STATUS_UNWIND);
+ genVal(STATUS_WAKE_SYSTEM_DEBUGGER, STATUS_WAKE_SYSTEM_DEBUGGER);
+
+ //
+ // Define kernel object structure definitions.
+ //
+ // APC object structure defintions.
+ //
+
+ genCom("APC Object Structure Offset Definitions");
+
+ genDef(Ap, KAPC, Type);
+ genDef(Ap, KAPC, Size);
+ genDef(Ap, KAPC, Thread);
+ genDef(Ap, KAPC, ApcListEntry);
+ genDef(Ap, KAPC, KernelRoutine);
+ genDef(Ap, KAPC, RundownRoutine);
+ genDef(Ap, KAPC, NormalRoutine);
+ genDef(Ap, KAPC, NormalContext);
+ genDef(Ap, KAPC, SystemArgument1);
+ genDef(Ap, KAPC, SystemArgument2);
+ genDef(Ap, KAPC, ApcStateIndex);
+ genDef(Ap, KAPC, ApcMode);
+ genDef(Ap, KAPC, Inserted);
+ genVal(ApcObjectLength, sizeof(KAPC));
+
+ //
+ // DPC Object structure definitions.
+ //
+
+ EnableInc(HAL);
+
+ genCom("DPC object Structure Offset Definitions");
+
+ genDef(Dp, KDPC, Type);
+ genDef(Dp, KDPC, Number);
+ genDef(Dp, KDPC, Importance);
+ genDef(Dp, KDPC, DpcListEntry);
+ genDef(Dp, KDPC, DeferredRoutine);
+ genDef(Dp, KDPC, DeferredContext);
+ genDef(Dp, KDPC, SystemArgument1);
+ genDef(Dp, KDPC, SystemArgument2);
+ genDef(Dp, KDPC, Lock);
+ genVal(DpcObjectLength, sizeof(KDPC));
+
+ DisableInc(HAL);
+
+ //
+ // Device queue object structure definitions.
+ //
+
+ genCom("Device Queue Object Structure Offset Definitions");
+
+ genDef(Dv, KDEVICE_QUEUE, Type);
+ genDef(Dv, KDEVICE_QUEUE, Size);
+ genDef(Dv, KDEVICE_QUEUE, DeviceListHead);
+ genAlt(DvSpinLock, KDEVICE_QUEUE, Lock);
+ genDef(Dv, KDEVICE_QUEUE, Busy);
+ genVal(DeviceQueueObjectLength, sizeof(KDEVICE_QUEUE));
+
+ //
+ // Device object entry structure definitions.
+ //
+
+ genCom("Device Queue Entry Structure Offset Definitions");
+
+ genDef(De, KDEVICE_QUEUE_ENTRY, DeviceListEntry);
+ genDef(De, KDEVICE_QUEUE_ENTRY, SortKey);
+ genDef(De, KDEVICE_QUEUE_ENTRY, Inserted);
+ genVal(DeviceQueueEntryLength, sizeof(KDEVICE_QUEUE_ENTRY));
+
+ //
+ // Event object structure definitions.
+ //
+
+ genCom("Event Object Structure Offset Definitions");
+
+ genDef(Ev, DISPATCHER_HEADER, Type);
+ genDef(Ev, DISPATCHER_HEADER, Size);
+ genDef(Ev, DISPATCHER_HEADER, SignalState);
+ genAlt(EvWaitListHead, KEVENT, Header.WaitListHead);
+ genVal(EventObjectLength, sizeof(KEVENT));
+
+ //
+ // Event pair object structure definitions.
+ //
+
+ genCom("Event Pair Object Structure Offset Definitions");
+
+ genDef(Ep, KEVENT_PAIR, Type);
+ genDef(Ep, KEVENT_PAIR, Size);
+ genDef(Ep, KEVENT_PAIR, EventLow);
+ genDef(Ep, KEVENT_PAIR, EventHigh);
+
+#if defined(_MIPS_) || defined(_PPC_)
+
+ EventOffset = OFFSET(KEVENT_PAIR, EventHigh) - OFFSET(KEVENT_PAIR, EventLow);
+ if ((EventOffset & (EventOffset - 1)) != 0) {
+ fprintf(stderr, "GENXX: Event offset not log2N\n");
+ }
+
+ genVal(SET_LOW_WAIT_HIGH, - (EventOffset * 2));
+ genVal(SET_HIGH_WAIT_LOW, - EventOffset);
+ genVal(SET_EVENT_PAIR_MASK, EventOffset);
+
+#endif
+
+ //
+ // Interrupt object structure definitions.
+ //
+
+#if defined(_ALPHA_) || defined(_MIPS_) || defined(_PPC_)
+
+ EnableInc(HAL);
+
+#endif
+
+ genCom("Interrupt Object Structure Offset Definitions");
+
+ genVal(InLevelSensitive, LevelSensitive);
+ genVal(InLatched, Latched);
+
+ genSpc();
+
+ genDef(In, KINTERRUPT, Type);
+ genDef(In, KINTERRUPT, Size);
+ genDef(In, KINTERRUPT, InterruptListEntry);
+ genDef(In, KINTERRUPT, ServiceRoutine);
+ genDef(In, KINTERRUPT, ServiceContext);
+ genDef(In, KINTERRUPT, SpinLock);
+ genDef(In, KINTERRUPT, ActualLock);
+ genDef(In, KINTERRUPT, DispatchAddress);
+ genDef(In, KINTERRUPT, Vector);
+ genDef(In, KINTERRUPT, Irql);
+ genDef(In, KINTERRUPT, SynchronizeIrql);
+ genDef(In, KINTERRUPT, FloatingSave);
+ genDef(In, KINTERRUPT, Connected);
+ genDef(In, KINTERRUPT, Number);
+ genDef(In, KINTERRUPT, Mode);
+ genDef(In, KINTERRUPT, ShareVector);
+ genDef(In, KINTERRUPT, DispatchCode);
+ genVal(InterruptObjectLength, sizeof(KINTERRUPT));
+
+#if defined(_X86_)
+
+ genSpc();
+
+ genVal(NORMAL_DISPATCH_LENGTH, NORMAL_DISPATCH_LENGTH * sizeof(ULONG));
+ genVal(DISPATCH_LENGTH, DISPATCH_LENGTH * sizeof(ULONG));
+
+#endif
+
+#if defined(_ALPHA_) || defined(_MIPS_) || defined(_PPC_)
+
+ DisableInc(HAL);
+
+#endif
+
+ //
+ // Process object structure offset definitions.
+ //
+
+ genCom("Process Object Structure Offset Definitions");
+
+ genDef(Pr, DISPATCHER_HEADER, Type);
+ genDef(Pr, DISPATCHER_HEADER, Size);
+ genDef(Pr, DISPATCHER_HEADER, SignalState);
+ genDef(Pr, KPROCESS, ProfileListHead);
+ genDef(Pr, KPROCESS, DirectoryTableBase);
+
+#if defined(_X86_)
+
+ genDef(Pr, KPROCESS, LdtDescriptor);
+ genDef(Pr, KPROCESS, Int21Descriptor);
+ genDef(Pr, KPROCESS, IopmOffset);
+ genDef(Pr, KPROCESS, Iopl);
+ genDef(Pr, KPROCESS, VdmFlag);
+
+#endif
+
+#if defined(_PPC_)
+
+ genDef(Pr, KPROCESS, ProcessPid);
+ genDef(Pr, KPROCESS, ProcessSequence);
+
+#endif
+
+ genDef(Pr, KPROCESS, ActiveProcessors);
+ genDef(Pr, KPROCESS, KernelTime);
+ genDef(Pr, KPROCESS, UserTime);
+ genDef(Pr, KPROCESS, ReadyListHead);
+ genDef(Pr, KPROCESS, SwapListEntry);
+ genDef(Pr, KPROCESS, ThreadListHead);
+ genDef(Pr, KPROCESS, ProcessLock);
+ genDef(Pr, KPROCESS, Affinity);
+ genDef(Pr, KPROCESS, StackCount);
+ genDef(Pr, KPROCESS, BasePriority);
+ genDef(Pr, KPROCESS, ThreadQuantum);
+ genDef(Pr, KPROCESS, AutoAlignment);
+ genDef(Pr, KPROCESS, State);
+ genVal(ProcessObjectLength, ((sizeof(KPROCESS) + 15) & ~15));
+ genVal(ExtendedProcessObjectLength, ((sizeof(EPROCESS) + 15) & ~15));
+
+ //
+ // Profile object structure offset definitions.
+ //
+
+ genCom("Profile Object Structure Offset Definitions");
+
+ genDef(Pf, KPROFILE, Type);
+ genDef(Pf, KPROFILE, Size);
+ genDef(Pf, KPROFILE, ProfileListEntry);
+ genDef(Pf, KPROFILE, Process);
+ genDef(Pf, KPROFILE, RangeBase);
+ genDef(Pf, KPROFILE, RangeLimit);
+ genDef(Pf, KPROFILE, BucketShift);
+ genDef(Pf, KPROFILE, Buffer);
+ genDef(Pf, KPROFILE, Segment);
+ genDef(Pf, KPROFILE, Affinity);
+ genDef(Pf, KPROFILE, Source);
+ genDef(Pf, KPROFILE, Started);
+ genVal(ProfileObjectLength, sizeof(KPROFILE));
+
+ //
+ // Queue object structure offset definitions.
+ //
+
+ genCom("Queue Object Structure Offset Definitions");
+
+ genDef(Qu, DISPATCHER_HEADER, Type);
+ genDef(Qu, DISPATCHER_HEADER, Size);
+ genDef(Qu, DISPATCHER_HEADER, SignalState);
+ genDef(Qu, KQUEUE, EntryListHead);
+ genDef(Qu, KQUEUE, CurrentCount);
+ genDef(Qu, KQUEUE, MaximumCount);
+ genDef(Qu, KQUEUE, ThreadListHead);
+ genVal(QueueObjectLength, sizeof(KQUEUE));
+
+ //
+ // Thread object structure offset definitions
+ //
+
+ genCom("Thread Object Structure Offset Definitions");
+
+ genDef(Ee, EEVENT_PAIR, KernelEventPair);
+ genDef(Et, ETHREAD, Cid);
+ genDef(Et, ETHREAD, EventPair);
+ genDef(Et, ETHREAD, PerformanceCountLow);
+ genDef(Et, ETHREAD, PerformanceCountHigh);
+ genVal(EtEthreadLength, ((sizeof(ETHREAD) + 15) & ~15));
+
+ genSpc();
+
+ genDef(Th, DISPATCHER_HEADER, Type);
+ genDef(Th, DISPATCHER_HEADER, Size);
+ genDef(Th, DISPATCHER_HEADER, SignalState);
+ genDef(Th, KTHREAD, MutantListHead);
+ genDef(Th, KTHREAD, InitialStack);
+ genDef(Th, KTHREAD, StackLimit);
+ genDef(Th, KTHREAD, Teb);
+ genDef(Th, KTHREAD, TlsArray);
+ genDef(Th, KTHREAD, KernelStack);
+ genDef(Th, KTHREAD, DebugActive);
+ genDef(Th, KTHREAD, State);
+ genDef(Th, KTHREAD, Alerted);
+ genDef(Th, KTHREAD, Iopl);
+ genDef(Th, KTHREAD, NpxState);
+ genDef(Th, KTHREAD, Saturation);
+ genDef(Th, KTHREAD, Priority);
+ genDef(Th, KTHREAD, ApcState);
+ genDef(Th, KTHREAD, ContextSwitches);
+ genDef(Th, KTHREAD, WaitStatus);
+ genDef(Th, KTHREAD, WaitIrql);
+ genDef(Th, KTHREAD, WaitMode);
+ genDef(Th, KTHREAD, WaitNext);
+ genDef(Th, KTHREAD, WaitReason);
+ genDef(Th, KTHREAD, WaitBlockList);
+ genDef(Th, KTHREAD, WaitListEntry);
+ genDef(Th, KTHREAD, WaitTime);
+ genDef(Th, KTHREAD, BasePriority);
+ genDef(Th, KTHREAD, DecrementCount);
+ genDef(Th, KTHREAD, PriorityDecrement);
+ genDef(Th, KTHREAD, Quantum);
+ genDef(Th, KTHREAD, WaitBlock);
+ genDef(Th, KTHREAD, KernelApcDisable);
+ genDef(Th, KTHREAD, UserAffinity);
+ genDef(Th, KTHREAD, SystemAffinityActive);
+ genDef(Th, KTHREAD, ServiceTable);
+// genDef(Th, KTHREAD, Channel);
+// genDef(Th, KTHREAD, Section);
+// genDef(Th, KTHREAD, SystemView);
+// genDef(Th, KTHREAD, ThreadView);
+ genDef(Th, KTHREAD, Queue);
+ genDef(Th, KTHREAD, ApcQueueLock);
+ genDef(Th, KTHREAD, Timer);
+ genDef(Th, KTHREAD, QueueListEntry);
+ genDef(Th, KTHREAD, Affinity);
+ genDef(Th, KTHREAD, Preempted);
+ genDef(Th, KTHREAD, ProcessReadyQueue);
+ genDef(Th, KTHREAD, KernelStackResident);
+ genDef(Th, KTHREAD, NextProcessor);
+ genDef(Th, KTHREAD, CallbackStack);
+ genDef(Th, KTHREAD, Win32Thread);
+ genDef(Th, KTHREAD, TrapFrame);
+ genDef(Th, KTHREAD, ApcStatePointer);
+ genDef(Th, KTHREAD, PreviousMode);
+ genDef(Th, KTHREAD, EnableStackSwap);
+ genDef(Th, KTHREAD, LargeStack);
+ genDef(Th, KTHREAD, KernelTime);
+ genDef(Th, KTHREAD, UserTime);
+ genDef(Th, KTHREAD, SavedApcState);
+ genDef(Th, KTHREAD, Alertable);
+ genDef(Th, KTHREAD, ApcStateIndex);
+ genDef(Th, KTHREAD, ApcQueueable);
+ genDef(Th, KTHREAD, AutoAlignment);
+ genDef(Th, KTHREAD, StackBase);
+ genDef(Th, KTHREAD, SuspendApc);
+ genDef(Th, KTHREAD, SuspendSemaphore);
+ genDef(Th, KTHREAD, ThreadListEntry);
+ genDef(Th, KTHREAD, FreezeCount);
+ genDef(Th, KTHREAD, SuspendCount);
+ genDef(Th, KTHREAD, IdealProcessor);
+ genDef(Th, KTHREAD, DisableBoost);
+ genVal(ThreadObjectLength, ((sizeof(KTHREAD) + 15) & ~15));
+ genVal(ExtendedThreadObjectLength, ((sizeof(ETHREAD) + 15) & ~15));
+
+ genSpc();
+
+ genVal(EVENT_WAIT_BLOCK_OFFSET, OFFSET(KTHREAD, WaitBlock) + (sizeof(KWAIT_BLOCK) * EVENT_WAIT_BLOCK));
+
+#if defined(_X86_)
+
+ genVal(NPX_STATE_NOT_LOADED, NPX_STATE_NOT_LOADED);
+ genVal(NPX_STATE_LOADED, NPX_STATE_LOADED);
+
+#endif
+
+ //
+ // Timer object structure offset definitions
+ //
+
+ genCom("Timer object Structure Offset Definitions");
+
+ genDef(Ti, DISPATCHER_HEADER, Type);
+ genDef(Ti, DISPATCHER_HEADER, Size);
+ genDef(Ti, DISPATCHER_HEADER, Inserted);
+ genDef(Ti, DISPATCHER_HEADER, SignalState);
+ genDef(Ti, KTIMER, DueTime);
+ genDef(Ti, KTIMER, TimerListEntry);
+ genDef(Ti, KTIMER, Dpc);
+ genDef(Ti, KTIMER, Period);
+ genVal(TimerObjectLength, sizeof(KTIMER));
+
+ genSpc();
+
+ genVal(TIMER_TABLE_SIZE, TIMER_TABLE_SIZE);
+
+ //
+ // Wait block structure offset definitions
+ //
+
+ genCom("Wait Block Structure Offset Definitions");
+
+ genDef(Wb, KWAIT_BLOCK, WaitListEntry);
+ genDef(Wb, KWAIT_BLOCK, Thread);
+ genDef(Wb, KWAIT_BLOCK, Object);
+ genDef(Wb, KWAIT_BLOCK, NextWaitBlock);
+ genDef(Wb, KWAIT_BLOCK, WaitKey);
+ genDef(Wb, KWAIT_BLOCK, WaitType);
+
+ //
+ // Fiber structure offset definitions.
+ //
+
+ genCom("Fiber Structure Offset Definitions");
+
+ genDef(Fb, FIBER, FiberData);
+ genDef(Fb, FIBER, ExceptionList);
+ genDef(Fb, FIBER, StackBase);
+ genDef(Fb, FIBER, StackLimit);
+ genDef(Fb, FIBER, DeallocationStack);
+ genDef(Fb, FIBER, FiberContext);
+
+ //
+ // Process environment block structure offset definitions.
+ //
+
+ genCom("Process Environment Block Structure Offset Definitions");
+
+ genDef(Pe, PEB, KernelCallbackTable);
+
+ //
+ // Define System Service Descriptor Table structures.
+ //
+
+ genCom("System Service Descriptor Table Structure Definitions");
+
+ genVal(NUMBER_SERVICE_TABLES, NUMBER_SERVICE_TABLES);
+ genVal(SERVICE_NUMBER_MASK, SERVICE_NUMBER_MASK);
+ genVal(SERVICE_TABLE_SHIFT, SERVICE_TABLE_SHIFT);
+ genVal(SERVICE_TABLE_MASK, SERVICE_TABLE_MASK);
+ genVal(SERVICE_TABLE_TEST, SERVICE_TABLE_TEST);
+
+ genSpc();
+
+ genDef(Sd, KSERVICE_TABLE_DESCRIPTOR, Base);
+ genDef(Sd, KSERVICE_TABLE_DESCRIPTOR, Count);
+ genDef(Sd, KSERVICE_TABLE_DESCRIPTOR, Limit);
+ genDef(Sd, KSERVICE_TABLE_DESCRIPTOR, Number);
+
+ //
+ // Common TEB structure offset definitions
+ //
+
+ genCom("Thread Environment Block Structure Offset Definitions");
+
+ genDef(Te, NT_TIB, StackBase);
+ genDef(Te, NT_TIB, StackLimit);
+ genDef(Te, NT_TIB, FiberData);
+ genDef(Te, TEB, EnvironmentPointer);
+ genDef(Te, TEB, ClientId);
+ genDef(Te, TEB, ActiveRpcHandle);
+ genDef(Te, TEB, ThreadLocalStoragePointer);
+ genAlt(TePeb, TEB, ProcessEnvironmentBlock);
+ genDef(Te, TEB, CsrClientThread);
+ genAlt(TeSoftFpcr, TEB, FpSoftwareStatusRegister);
+ genDef(Te, TEB, GdiClientPID);
+ genDef(Te, TEB, GdiClientTID);
+ genDef(Te, TEB, GdiThreadLocalInfo);
+ genDef(Te, TEB, glDispatchTable);
+ genDef(Te, TEB, glSectionInfo);
+ genDef(Te, TEB, glSection);
+ genDef(Te, TEB, glTable);
+ genDef(Te, TEB, glCurrentRC);
+ genDef(Te, TEB, glContext);
+ genDef(Te, TEB, DeallocationStack);
+ genDef(Te, TEB, GdiBatchCount);
+ genDef(Te, TEB, Instrumentation);
+
diff --git a/private/ntos/ke/i386/abios.h b/private/ntos/ke/i386/abios.h
new file mode 100644
index 000000000..98177131f
--- /dev/null
+++ b/private/ntos/ke/i386/abios.h
@@ -0,0 +1,147 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ abios.h
+
+Abstract:
+
+ This module contains the i386 kernel ABIOS specific header file.
+
+Author:
+
+ Shie-Lin Tzong (shielint) 22-May-1991
+
+Revision History:
+
+--*/
+
+//
+// Define public portion of the ABIOS Device Block
+//
+
+typedef struct _KDEVICE_BLOCK {
+ USHORT Length;
+ UCHAR Revision;
+ UCHAR SecondDeviceId;
+ USHORT LogicalId;
+ USHORT DeviceId;
+ USHORT NumberExclusivePortPairs;
+ USHORT NumberCommonPortPairs;
+} KDEVICE_BLOCK, *PKDEVICE_BLOCK;
+
+
+typedef struct _KABIOS_POINTER {
+ USHORT Offset;
+ USHORT Selector;
+} KABIOS_POINTER, *PKABIOS_POINTER;
+
+#pragma pack(1)
+
+//
+// ABIOS Function Transfer Table definition
+//
+
+typedef struct _KFUNCTION_TRANSFER_TABLE {
+ KABIOS_POINTER CommonRoutine[3];
+ USHORT FunctionCount;
+ USHORT Reserved;
+ KABIOS_POINTER SpecificRoutine;
+} KFUNCTION_TRANSFER_TABLE, *PKFUNCTION_TRANSFER_TABLE;
+
+
+//
+// ABIOS Commom Data Area definitions
+//
+
+typedef struct _KDB_FTT_SECTION {
+ KABIOS_POINTER DeviceBlock;
+ KABIOS_POINTER FunctionTransferTable;
+} KDB_FTT_SECTION, *PKDB_FTT_SECTION;
+
+typedef struct _KCOMMON_DATA_AREA {
+ USHORT DataPointer0Offset;
+ USHORT NumberLids;
+ ULONG Reserved;
+ PKDB_FTT_SECTION DbFttPointer;
+} KCOMMON_DATA_AREA, *PKCOMMON_DATA_AREA;
+
+#pragma pack()
+
+//
+// Available GDT Entry
+//
+
+typedef struct _KFREE_GDT_ENTRY {
+ struct _KFREE_GDT_ENTRY *Flink;
+ ULONG BaseMid : 8;
+ ULONG Type : 5;
+ ULONG Dpl : 2;
+ ULONG Present : 1;
+ ULONG LimitHi : 4;
+ ULONG Sys : 1;
+ ULONG Reserved_0 : 1;
+ ULONG Default_Big : 1;
+ ULONG Granularity : 1;
+ ULONG BaseHi : 8;
+} KFREE_GDT_ENTRY, *PKFREE_GDT_ENTRY;
+
+//
+// Logical Id table entry
+//
+
+typedef struct _KLID_TABLE_ENTRY {
+ ULONG Owner;
+ ULONG OwnerCount;
+} KLID_TABLE_ENTRY, *PKLID_TABLE_ENTRY;
+
+#define LID_NO_SPECIFIC_OWNER 0xffffffff
+#define NUMBER_LID_TABLE_ENTRIES 1024
+
+//
+// Macro to extract the high byte of a short offset
+//
+
+#define HIGHBYTE(l) ((UCHAR)(((USHORT)(l)>>8) & 0xff))
+
+//
+// Macro to extract the low byte of a short offset
+//
+
+#define LOWBYTE(l) ((UCHAR)(l))
+
+//
+// The following selectors are reserved for 16 bit stack, code and
+// ABIOS Common Data Area.
+//
+
+#define KGDT_STACK16 0xf8
+#define KGDT_CODE16 0xf0
+#define KGDT_CDA16 0xe8
+#define KGDT_GDT_ALIAS 0x70
+
+//
+// Misc. definitions
+//
+
+#define RESERVED_GDT_ENTRIES 28
+
+//
+// External references
+//
+
+extern PKFREE_GDT_ENTRY KiAbiosGdtStart;
+extern PKFREE_GDT_ENTRY KiAbiosGdtEnd;
+extern PUCHAR KiEndOfCode16;
+
+extern
+VOID
+KiI386CallAbios(
+ IN KABIOS_POINTER AbiosFunction,
+ IN KABIOS_POINTER DeviceBlockPointer,
+ IN KABIOS_POINTER FunctionTransferTable,
+ IN KABIOS_POINTER RequestBlock
+ );
+
diff --git a/private/ntos/ke/i386/abiosa.asm b/private/ntos/ke/i386/abiosa.asm
new file mode 100644
index 000000000..68dca5c4f
--- /dev/null
+++ b/private/ntos/ke/i386/abiosa.asm
@@ -0,0 +1,615 @@
+ title "Abios Support Assembly Routines"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; abiosa.asm
+;
+; Abstract:
+;
+; This module implements assembley code for ABIOS support.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 25-May-1991
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+.386p
+ .xlist
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+ .list
+
+EXTRNP _KeRaiseIrql,2,IMPORT
+EXTRNP _KeLowerIrql,1,IMPORT
+EXTRNP _KeGetCurrentIrql,0,IMPORT
+extrn _KiStack16GdtEntry:DWORD
+
+OPERAND_OVERRIDE equ 66h
+ADDRESS_OVERRIDE equ 67h
+KGDT_CDA16 equ 0E8h
+
+;++
+;
+; STACK32_TO_STACK16
+;
+; Macro Description:
+;
+; This macro remaps current 32bit stack to 16bit stack.
+;
+; Arguments:
+;
+; None.
+;
+;--
+
+STACK32_TO_STACK16 macro
+
+ mov eax, fs:PcStackLimit ; [eax] = 16-bit stack selector base
+ mov edx, eax
+ mov ecx, _KiStack16GdtEntry
+ mov word ptr [ecx].KgdtBaseLow, ax
+ shr eax, 16
+ mov byte ptr [ecx].KgdtBaseMid, al
+ mov byte ptr [ecx].KgdtBaseHi, ah
+ mov eax, esp
+ sub eax, edx
+ cli
+ mov esp, eax
+ mov eax, KGDT_STACK16
+ mov ss, ax
+
+;
+; NOTE that we MUST leave interrupts remain off.
+; We'll turn it back on after we switch to 16 bit code.
+;
+
+endm
+
+;++
+;
+; STACK16_TO_STACK32
+;
+; Macro Description:
+;
+; This macro remaps current 32bit stack to 16bit stack.
+;
+; Arguments:
+;
+; None.
+;
+;--
+
+STACK16_TO_STACK32 macro Stack32
+
+ db OPERAND_OVERRIDE
+ mov eax, esp
+ db OPERAND_OVERRIDE
+ db ADDRESS_OVERRIDE
+ add eax, fs:PcStackLimit
+ cli
+ db OPERAND_OVERRIDE
+ mov esp, eax
+ db OPERAND_OVERRIDE
+ mov eax, KGDT_R0_DATA
+ mov ss, ax
+ sti
+endm
+
+ page ,132
+ subttl "Abios Support Code"
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+; ULONG
+; KiAbiosGetGdt (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine returns the starting address of GDT of current processor.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; return Pcr->GDT
+;
+;--
+
+cPublicProc _KiAbiosGetGdt,0
+
+ mov eax, fs:PcGdt
+ stdRET _KiAbiosGetGdt
+
+stdENDP _KiAbiosGetGdt
+
+;++
+; VOID
+; KiI386CallAbios(
+; IN KABIOS_POINTER AbiosFunction,
+; IN KABIOS_POINTER DeviceBlockPointer,
+; IN KABIOS_POINTER FunctionTransferTable,
+; IN KABIOS_POINTER RequestBlock
+; )
+;
+; Routine Description:
+;
+; This function invokes ABIOS service function for device driver. This
+; routine is executing at DIAPTCH_LEVEL to prevent context swapping.
+;
+; N.B. We arrive here from the Ke386AbiosCall with a 32bit CS. That is,
+; we're executing the code with cs:eip where cs contains a selector for a
+; 32bit flat segment. We want to get to a 16bit cs. That is, cs:ip.
+; The reason is that ABIOS is running at 16 bit segment.
+; Before we can call ABIOS service we must load ss and cs segment
+; registers with selectors for 16bit segments. We start by pushing a far
+; pointer to a label in the macro and then doing a retf. This allows us
+; to fall through to the next instruction, but we're now executing
+; through cs:ip with a 16bit CS. Then, we remap our 32-bit stack to 16-bit
+; stack.
+;
+; Arguments:
+;
+; AbiosFunction - a 16:16 pointer to the abios service function.
+;
+; DeviceBlockPointer - a 16:16 pointer to Device Block.
+;
+; FunctionTransferTable - a 16:16 pointer to Function Transfer Table.
+;
+; RequestBlock - a 16:16 pointer to device driver's request block.
+;
+; Return Value:
+;
+; None.
+;--
+
+KacAbiosFunction equ [ebp + 8]
+KacDeviceBlock equ [ebp + 12]
+KacFunctionTable equ [ebp + 16]
+KacRequestBlock equ [ebp + 20]
+
+cPublicProc _KiI386CallAbios,4
+
+;
+; We're using a 32bit CS:EIP - go to a 16bit CS:IP
+; Note the base of KiAbiosCallSelector is the flat address of _KiI386AbiosCall
+; routine.
+;
+
+ push ebp
+ mov ebp, esp
+ push ebx
+
+ stdCall _KeGetCurrentIrql
+ push eax ; Local Varible
+
+ cmp al, DISPATCH_LEVEL ; Is irql > Dispatch_level?
+ jae short Kac00
+
+; Raise to Dispatch Level
+ mov eax, esp
+ stdCall _KeRaiseIrql, <DISPATCH_LEVEL,eax>
+
+Kac00:
+
+;
+; Set up parameters on stack before remapping stack.
+;
+
+ push word ptr KGDT_CDA16 ; CDA anchor selector
+ push KacRequestBlock ; Request Block
+ push KacFunctionTable ; Func transfer table
+ push KacDeviceBlock ; Device Block
+ mov ebx, KacAbiosFunction ; (ebx)-> Abios Entry
+
+;
+; Remap current stack to 16:16 stack. The base of the 16bit stack selector is
+; the base of current kernel stack.
+;
+
+ STACK32_TO_STACK16 ; Switch to 16bit stack
+ push word ptr KGDT_CODE16
+IFDEF STD_CALL
+ push word ptr (offset FLAT:Kac40 - offset FLAT:_KiI386CallAbios@16)
+ push KGDT_CODE16
+ push offset FLAT:Kac30 - offset FLAT:_KiI386CallAbios@16
+ELSE
+ push word ptr (offset FLAT:Kac40 - offset FLAT:_KiI386CallAbios)
+ push KGDT_CODE16
+ push offset FLAT:Kac30 - offset FLAT:_KiI386CallAbios
+ENDIF
+ retf
+
+Kac30:
+
+;
+; Stack switching (from 32 to 16) turns interrupt off. We must turn it
+; back on.
+;
+
+ sti
+ push bx ; Yes, BX not EBX!
+ retf
+Kac40:
+ add esp, 14 ; pop out all the parameters
+
+ STACK16_TO_STACK32 ; switch back to 32 bit stack
+
+;
+; Pull callers flat return address off stack and push the
+; flat code selector followed by the return offset, then
+; execute a far return and we'll be back in the 32-bit code space.
+;
+
+ db OPERAND_OVERRIDE
+ push KGDT_R0_CODE
+ db OPERAND_OVERRIDE
+ push offset FLAT:Kac50
+ db OPERAND_OVERRIDE
+ retf
+Kac50:
+ pop eax ; [eax] = OldIrql
+ pop ebx ; restore ebx
+ cmp al, DISPATCH_LEVEL
+ jae short Kac60
+
+ stdCall _KeLowerIrql, <eax> ; Lower irql to original level
+Kac60:
+ pop ebp
+ stdRET _KiI386CallAbios
+
+stdENDP _KiI386CallAbios
+
+
+;; ********************************************************
+;;
+;; BEGIN - power_management
+;;
+;;
+
+;++
+; VOID
+; KeI386Call16BitFunction (
+; IN OUT PCONTEXT Regs
+; )
+;
+; Routine Description:
+;
+; This function calls the 16 bit function specified in the Regs.
+;
+; Parameters:
+;
+; Regs - supplies a pointer to register context to call 16 function.
+;
+; NOTE: Caller must be at DPC_LEVEL
+;
+;--
+
+cPublicProc _KeI386Call16BitFunction,1
+
+ ; verify CurrentIrql
+ ; verify context flags
+
+ push ebp ; save nonvolatile registers
+ push ebx
+ push esi
+ push edi
+
+ mov ebx, dword ptr [esp + 20] ; (ebx)-> Context
+
+;
+; We're using a 32bit CS:EIP - go to a 16bit CS:IP
+; Note the base of KiAbiosCallSelector is the flat address of _KiI386AbiosCall
+; routine.
+;
+
+;
+; Remap current stack to 16:16 stack. The base of the 16bit stack selector is
+; the base of current kernel stack.
+;
+
+ STACK32_TO_STACK16 ; Switch to 16bit stack
+ ;
+ ; Push return address from 16 bit function call to kernel
+ ;
+
+ push word ptr KGDT_CODE16
+ push word ptr (offset FLAT:Kbf40 - offset FLAT:_KiI386CallAbios@16)
+
+ ;
+ ; Load context to call with
+ ;
+
+ push word ptr [ebx].CsEFlags
+ push word ptr [ebx].CsSegCs
+ push word ptr [ebx].CsEip
+
+ mov eax, [ebx].CsEax
+ mov ecx, [ebx].CsEcx
+ mov edx, [ebx].CsEdx
+ mov edi, [ebx].CsEdi
+ mov esi, [ebx].CsEsi
+ mov ebp, [ebx].CsEbp
+ push [ebx].CsSegGs
+ push [ebx].CsSegFs
+ push [ebx].CsSegEs
+ push [ebx].CsSegDs
+ mov ebx, [ebx].CsEbx
+ pop ds
+ pop es
+ pop fs
+ pop gs
+
+ ;
+ ; Switch to 16bit CS
+ ;
+ push KGDT_CODE16
+ push offset FLAT:Kbf30 - offset FLAT:_KiI386CallAbios@16
+ retf
+
+Kbf30:
+ ;
+ ; "call" to 16 bit function
+ ;
+ iretd
+
+Kbf40:
+ ;
+ ; Push some of the returned context which will be needed to
+ ; switch back to the 32 bit SS & CS.
+ ;
+ db OPERAND_OVERRIDE
+ push ds
+
+ db OPERAND_OVERRIDE
+ push es
+
+ db OPERAND_OVERRIDE
+ push fs
+
+ db OPERAND_OVERRIDE
+ push gs
+
+ db OPERAND_OVERRIDE
+ push eax
+
+ db OPERAND_OVERRIDE
+ pushfd
+
+ db OPERAND_OVERRIDE
+ mov eax, KGDT_R0_PCR
+ mov fs, ax
+
+ db OPERAND_OVERRIDE
+ mov eax, KGDT_R3_DATA OR RPL_MASK
+ mov ds, ax
+ mov es, ax
+
+ xor eax, eax
+
+ ;
+ ; Switch back to 32 bit stack
+ ;
+
+ STACK16_TO_STACK32
+
+;
+; Push the flat code selector followed by the return offset, then
+; execute a far return and we'll be back in the 32-bit code space.
+;
+
+
+ db OPERAND_OVERRIDE
+ push KGDT_R0_CODE
+ db OPERAND_OVERRIDE
+ push offset FLAT:Kbf50
+ db OPERAND_OVERRIDE
+ retf
+
+Kbf50:
+ ;
+ ; Return resulting context
+ ;
+
+ mov eax, dword ptr [esp+44] ; (eax) = Context Record
+ pop [eax].CsEflags
+ pop [eax].CsEax
+ pop [eax].CsSegGs
+ pop [eax].CsSegFs
+ pop [eax].CsSegEs
+ pop [eax].CsSegDs
+
+ mov [eax].CsEbx, ebx
+ mov [eax].CsEcx, ecx
+ mov [eax].CsEdx, edx
+ mov [eax].CsEdi, edi
+ mov [eax].CsEsi, esi
+ mov [eax].CsEbp, ebp
+
+;
+; Restore regs & return
+;
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ stdRET _KeI386Call16BitFunction
+
+stdENDP _KeI386Call16BitFunction
+
+;++
+; USHORT
+; KeI386Call16BitCStyleFunction (
+; IN ULONG EntryOffset,
+; IN ULONG EntrySelector,
+; IN PUCHAR Parameters,
+; IN ULONG Size
+; )
+;
+; Routine Description:
+;
+; This function calls the 16 bit function which supports C style calling convension.
+;
+; Parameters:
+;
+; EntryOffset and EntrySelector - specifies the entry point of the 16 bit function.
+;
+; Parameters - supplies a pointer to a parameter block which will be
+; passed to 16 bit function as parameters.
+;
+; Size - supplies the size of the parameter block.
+;
+; NOTE: Caller must be at DPC_LEVEL
+;
+; Returned Value:
+;
+; AX returned by 16 bit function.
+;
+;--
+
+cPublicProc _KeI386Call16BitCStyleFunction,4
+
+;
+; verify CurrentIrql
+; verify context flags
+;
+
+ push ebp ; save nonvolatile registers
+ push ebx
+ push esi
+ push edi
+
+ mov edi, esp
+ mov esi, dword ptr [esp + 28] ; (esi)->BiosParameters
+ or esi, esi
+ jz short @f
+
+ mov ecx, [esp + 32] ; (ecx) = parameter size
+ sub esp, ecx ; allocate space on TOS to copy parameters
+ mov edi, esp
+ rep movsb ; (edi)-> Top of nonvolatile reg save area
+
+@@:
+
+;
+; We're using a 32bit CS:EIP - go to a 16bit CS:IP
+; Note the base of KiAbiosCallSelector is the flat address of _KiI386AbiosCall
+; routine.
+;
+
+;
+; Remap current stack to 16:16 stack. The base of the 16bit stack selector is
+; the base of current kernel stack.
+;
+
+ STACK32_TO_STACK16 ; Switch to 16bit stack
+
+;
+; Push return address from 16 bit function call to kernel
+;
+
+ push word ptr KGDT_CODE16
+ push word ptr (offset FLAT:Kbfex40 - offset FLAT:_KiI386CallAbios@16)
+
+ push word ptr 0200h ; flags
+ push word ptr [edi + 24] ; entry selector
+ push word ptr [edi + 20] ; entry offset
+
+;
+; Switch to 16bit CS
+;
+ push KGDT_CODE16
+ push offset FLAT:Kbfex30 - offset FLAT:_KiI386CallAbios@16
+ retf
+
+Kbfex30:
+;
+; "call" to 16 bit function
+;
+ iretd
+
+Kbfex40:
+;
+; Save return value.
+;
+
+ db OPERAND_OVERRIDE
+ push eax
+
+;
+; Restore Flat mode segment registers.
+;
+
+ db OPERAND_OVERRIDE
+ mov eax, KGDT_R0_PCR
+ mov fs, ax
+
+ db OPERAND_OVERRIDE
+ mov eax, KGDT_R3_DATA OR RPL_MASK
+ mov ds, ax
+ mov es, ax
+
+ xor eax, eax
+
+;
+; Switch back to 32 bit stack
+;
+
+ STACK16_TO_STACK32
+
+;
+; Push the flat code selector followed by the return offset, then
+; execute a far return and we'll be back in the 32-bit code space.
+;
+
+
+ db OPERAND_OVERRIDE
+ push KGDT_R0_CODE
+ db OPERAND_OVERRIDE
+ push offset FLAT:Kbfex50
+ db OPERAND_OVERRIDE
+ retf
+
+Kbfex50:
+ pop eax
+
+;
+; Restore regs & return
+;
+ mov esp, edi
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ stdRET _KeI386Call16BitCStyleFunction
+
+stdENDP _KeI386Call16BitCStyleFunction
+
+;;
+;; END - power_management
+;;
+;; ********************************************************
+
+
+ public _KiEndOfCode16
+_KiEndOfCode16 equ $
+
+
+
+_TEXT ends
+ end
diff --git a/private/ntos/ke/i386/abiosc.c b/private/ntos/ke/i386/abiosc.c
new file mode 100644
index 000000000..7927d3b11
--- /dev/null
+++ b/private/ntos/ke/i386/abiosc.c
@@ -0,0 +1,767 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ abiosc.c
+
+Abstract:
+
+ This module implements ABIOS support C routines for i386 NT.
+
+Author:
+
+ Shie-Lin Tzong (shielint) 20-May-1991
+
+Environment:
+
+ Boot loader privileged, FLAT mode.
+
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#include "abios.h"
+
+extern PKCOMMON_DATA_AREA KiCommonDataArea;
+extern BOOLEAN KiAbiosPresent;
+
+extern
+ULONG
+KiAbiosGetGdt (
+ VOID
+ );
+
+//
+// The reason of having these variables defined in here is to isolate
+// ABIOS from current system.
+//
+
+//
+// KiNumberFreeSelectors defines the number of available selectors for
+// ABIOS specific drivers. This number should be the same accross all
+// the processors.
+//
+
+static USHORT KiNumberFreeSelectors = 0;
+
+//
+// KiFreeGdtListHead points to the head of free GDT list on the processor 0.
+//
+
+static PKFREE_GDT_ENTRY KiFreeGdtListHead = 0L;
+
+//
+// Logica Id Table to control the ownership of logical Id.
+//
+
+PKLID_TABLE_ENTRY KiLogicalIdTable;
+
+//
+// KiAbiosGdt[] defines the Starting address of GDT for each processor.
+//
+
+ULONG KiAbiosGdt[MAXIMUM_PROCESSORS];
+
+//
+// SpinLock for accessing GDTs
+//
+
+KSPIN_LOCK KiAbiosGdtLock;
+
+//
+// Spinlock for accessing Logical Id Table
+//
+
+KSPIN_LOCK KiAbiosLidTableLock;
+
+//
+// KiStack16GdtEntry defines the address of the gdt entry for 16 bit stack.
+//
+
+ULONG KiStack16GdtEntry;
+
+VOID
+KiInitializeAbiosGdtEntry (
+ OUT PKGDTENTRY GdtEntry,
+ IN ULONG Base,
+ IN ULONG Limit,
+ IN USHORT Type
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a GDT entry for abios specific code. Base,
+ Limit, and Type (code, data) are set according to parameters. All other
+ fields of the entry are set to match standard system values.
+
+ N.B. The BIG and GRANULARITY are always set to 0.
+
+Arguments:
+
+ GdtEntry - GDT descriptor to be filled in.
+
+ Base - Linear address of the first byte mapped by the selector.
+
+ Limit - Size of the selector in BYTE.
+
+ Type - Code or Data. All code selectors are marked readable,
+ all data selectors are marked writeable.
+
+Return Value:
+
+ Pointer to the GDT entry.
+
+--*/
+
+{
+ GdtEntry->LimitLow = (USHORT)(Limit & 0xffff);
+ GdtEntry->BaseLow = (USHORT)(Base & 0xffff);
+ GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((Base & 0xff0000) >> 16);
+ GdtEntry->HighWord.Bits.Type = Type;
+ GdtEntry->HighWord.Bits.Dpl = 0;
+ GdtEntry->HighWord.Bits.Pres = 1;
+ GdtEntry->HighWord.Bits.LimitHi = (Limit & 0xf0000) >> 16;
+ GdtEntry->HighWord.Bits.Sys = 0;
+ GdtEntry->HighWord.Bits.Reserved_0 = 0;
+ GdtEntry->HighWord.Bits.Default_Big = 0;
+ GdtEntry->HighWord.Bits.Granularity = 0;
+ GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((Base & 0xff000000) >> 24);
+}
+
+ULONG
+KiI386SelectorBase (
+ IN USHORT Selector
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base address of the specified GDT selector.
+
+Arguments:
+
+ Selector - Supplies the desired selector.
+
+Return Value:
+
+ SelectorBase - Return the base address of the specified selector;
+ (return -1L if invalid selector)
+
+
+--*/
+
+{
+ PKGDTENTRY GdtEntry;
+
+
+ GdtEntry = (PKGDTENTRY)(KiAbiosGetGdt() + Selector);
+ if (GdtEntry->HighWord.Bits.Pres) {
+ return ((ULONG)GdtEntry->BaseLow |
+ (ULONG)GdtEntry->HighWord.Bytes.BaseMid << 16 |
+ (ULONG)GdtEntry->HighWord.Bytes.BaseHi << 24);
+ } else {
+ return (ULONG)(-1L);
+ }
+}
+
+NTSTATUS
+KeI386GetLid(
+ IN USHORT DeviceId,
+ IN USHORT RelativeLid,
+ IN BOOLEAN SharedLid,
+ IN PDRIVER_OBJECT DriverObject,
+ OUT PUSHORT LogicalId
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches Device Blocks and Common Data Area for the
+ Logical Id matching the specified Device Id.
+
+ N.B. (WARNING shielint) To speed the search, this routine ASSUMES that
+ the LIDs with the same Device ID always appear consecutively in the
+ Common Data Area. IBM ABIOS doc does not explicitly specify this.
+ But from the way ABIOS initializes Device Block and Function Transfer
+ Table, I think the assumption is true.
+
+Arguments:
+
+ DeviceId - Desired Device Id.
+
+ RelativeLid - Specifies the Nth logical Id for this device Id. A value
+ of 0 indicates the first available Lid.
+
+ SharedLid - A boolean value indicates if it is a shared or exclusively
+ owned logical Id.
+
+ DriverObject - Supplies a 32-bit flat pointer of the requesting device
+ driver's driver object. The DriverObject is used to establish
+ the ownership of the desired LID.
+
+ LogicalId - A pointer to a variable which will receive the Lid.
+
+Return Value:
+
+ STATUS_SUCCESS - If the requested LID is available.
+
+ STATUS_ABIOS_NOT_PRESENT - If there is no ABIOS support in the system.
+
+ STATUS_ABIOS_LID_NOT_EXIST - If the specified LID does not exist.
+
+ STATUS_ABIOS_LID_ALREADY_OWNED - If the caller requests an exclusively
+ owned LID.
+
+--*/
+
+{
+ PKDB_FTT_SECTION CdaPointer;
+ PKDEVICE_BLOCK DeviceBlock;
+ USHORT Lid, RelativeLidCount = 1;
+ ULONG Owner;
+ USHORT Increment;
+ KIRQL OldIrql;
+ NTSTATUS Status;
+
+ if (!KiAbiosPresent) {
+ return STATUS_ABIOS_NOT_PRESENT;
+ }
+
+ if (SharedLid) {
+ Owner = LID_NO_SPECIFIC_OWNER;
+ Increment = 1;
+ } else {
+ Owner = (ULONG)DriverObject;
+ Increment = 0;
+ }
+
+ //
+ // If the Logical Id Table hasn't been created yet, create it now.
+ //
+ if (KiLogicalIdTable==NULL) {
+ KiLogicalIdTable = ExAllocatePool(NonPagedPool,
+ NUMBER_LID_TABLE_ENTRIES *
+ sizeof(KLID_TABLE_ENTRY));
+ if (KiLogicalIdTable == NULL) {
+ return(STATUS_NO_MEMORY);
+ }
+ RtlZeroMemory(KiLogicalIdTable, NUMBER_LID_TABLE_ENTRIES*sizeof(KLID_TABLE_ENTRY));
+ }
+
+ //
+ // For each Lid defined in Common Data Area, we check if it has non
+ // empty device block and function transfer table. If yes, we proceed
+ // to check the device id. Otherwise, we skip the Lid.
+ //
+
+ CdaPointer = (PKDB_FTT_SECTION)KiCommonDataArea + 2;
+ Status = STATUS_ABIOS_LID_NOT_EXIST;
+
+ ExAcquireSpinLock(&KiAbiosLidTableLock, &OldIrql);
+
+ for (Lid = 2; Lid < KiCommonDataArea->NumberLids; Lid++) {
+ if (CdaPointer->DeviceBlock.Selector != 0 &&
+ CdaPointer->FunctionTransferTable.Selector != 0) {
+
+ DeviceBlock = (PKDEVICE_BLOCK)(KiI386SelectorBase(
+ CdaPointer->DeviceBlock.Selector)
+ + (CdaPointer->DeviceBlock.Offset));
+ if (DeviceBlock->DeviceId == DeviceId) {
+ if (RelativeLid == RelativeLidCount || RelativeLid == 0) {
+ if (KiLogicalIdTable[Lid].Owner == 0L) {
+ KiLogicalIdTable[Lid].Owner = Owner;
+ KiLogicalIdTable[Lid].OwnerCount += Increment;
+ *LogicalId = Lid;
+ Status = STATUS_SUCCESS;
+ } else if (KiLogicalIdTable[Lid].Owner == LID_NO_SPECIFIC_OWNER) {
+ if (SharedLid) {
+ *LogicalId = Lid;
+ KiLogicalIdTable[Lid].OwnerCount += Increment;
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = STATUS_ABIOS_LID_ALREADY_OWNED;
+ }
+ } else if (KiLogicalIdTable[Lid].Owner == (ULONG)DriverObject) {
+ *LogicalId = Lid;
+ Status = STATUS_SUCCESS;
+ } else if (RelativeLid != 0) {
+ Status = STATUS_ABIOS_LID_ALREADY_OWNED;
+ }
+ break;
+ } else {
+ RelativeLidCount++;
+ }
+ }
+ }
+ CdaPointer++;
+ }
+
+ ExReleaseSpinLock(&KiAbiosLidTableLock, OldIrql);
+ return Status;
+}
+
+NTSTATUS
+KeI386ReleaseLid(
+ IN USHORT LogicalId,
+ IN PDRIVER_OBJECT DriverObject
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a logical Id. This routine is called at ABIOS
+ device driver destallation or termination.
+
+Arguments:
+
+ LogicalId - Logical Id to be released.
+
+ DriverObject - Supplies a 32-bit flat pointer of the requesting device
+ driver's driver object. The DriverObject is used to check
+ the ownership of the specified LID.
+
+Return Value:
+
+ STATUS_SUCCESS - If the requested LID is released.
+
+ STATUS_ABIOS_NOT_PRESENT - If there is no ABIOS support in the system.
+
+ STATUS_ABIOS_NOT_LID_OWNER - If the caller does not own the LID.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ NTSTATUS Status;
+
+ if (!KiAbiosPresent) {
+ return STATUS_ABIOS_NOT_PRESENT;
+ }
+
+ ExAcquireSpinLock(&KiAbiosLidTableLock, &OldIrql);
+
+ if (KiLogicalIdTable[LogicalId].Owner == (ULONG)DriverObject) {
+ KiLogicalIdTable[LogicalId].Owner = 0L;
+ Status = STATUS_SUCCESS;
+ } else if (KiLogicalIdTable[LogicalId].Owner == LID_NO_SPECIFIC_OWNER) {
+ KiLogicalIdTable[LogicalId].OwnerCount--;
+ if (KiLogicalIdTable[LogicalId].OwnerCount == 0L) {
+ KiLogicalIdTable[LogicalId].Owner = 0L;
+ }
+ Status = STATUS_SUCCESS;
+ } else {
+ Status = STATUS_ABIOS_NOT_LID_OWNER;
+ }
+
+ ExReleaseSpinLock(&KiAbiosLidTableLock, OldIrql);
+
+ return Status;
+}
+
+NTSTATUS
+KeI386AbiosCall(
+ IN USHORT LogicalId,
+ IN PDRIVER_OBJECT DriverObject,
+ IN PUCHAR RequestBlock,
+ IN USHORT EntryPoint
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls an ABIOS service routine on behave of device driver
+ using Operating System Transfer Convension.
+
+Arguments:
+
+ LogicalId - Logical Id for the call.
+
+ DriverObject - Supplies a 32-bit flat pointer of the requesting device
+ driver's driver object. The DriverObject is used to verify
+ the ownership of the desired LID.
+
+ RequestBlock - A 16:16 (selector:offset) pointer to the request block.
+
+ EntryPoint - Specifies which ABIOS entry point:
+
+ 0 - Start Routine
+ 1 - Interrupt Routine
+ 2 - Timeout Routine
+
+Return Value:
+
+ STATUS_SUCCESS - If no error.
+
+ STATUS_ABIOS_NOT_PRESENT - If there is no ABIOS support in the system.
+
+ STATUS_ABIOS_INVALID_COMMAND - if the specified entry point is not supported.
+
+ STATUS_ABIOS_INVALID_LID - If the Lid specified is invalid.
+
+ STATUS_ABIOS_NOT_LID_OWNER - If the caller does not own this Lid.
+
+ (Note that the request specific ABIOS returned code is in RequestBlock.)
+
+--*/
+
+{
+
+ KABIOS_POINTER FuncTransferTable;
+ KABIOS_POINTER DeviceBlock;
+ KABIOS_POINTER AbiosFunction;
+ PKFUNCTION_TRANSFER_TABLE FttPointer;
+
+ if (!KiAbiosPresent) {
+ return STATUS_ABIOS_NOT_PRESENT;
+ }
+
+ if (LogicalId >= KiCommonDataArea->NumberLids) {
+ return STATUS_ABIOS_INVALID_LID;
+ } else if (KiLogicalIdTable[LogicalId].Owner != (ULONG)DriverObject &&
+ KiLogicalIdTable[LogicalId].Owner != LID_NO_SPECIFIC_OWNER) {
+ return STATUS_ABIOS_NOT_LID_OWNER;
+ } else if (EntryPoint > 2) {
+ return STATUS_ABIOS_INVALID_COMMAND;
+ }
+
+ FuncTransferTable = ((PKDB_FTT_SECTION)KiCommonDataArea + LogicalId)->
+ FunctionTransferTable;
+ DeviceBlock = ((PKDB_FTT_SECTION)KiCommonDataArea + LogicalId)->DeviceBlock;
+ FttPointer = (PKFUNCTION_TRANSFER_TABLE)(KiI386SelectorBase(FuncTransferTable.Selector) +
+ (ULONG)FuncTransferTable.Offset);
+ AbiosFunction = FttPointer->CommonRoutine[EntryPoint];
+ KiI386CallAbios(AbiosFunction,
+ DeviceBlock,
+ FuncTransferTable,
+ *(PKABIOS_POINTER)&RequestBlock
+ );
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+KeI386AllocateGdtSelectors(
+ OUT PUSHORT SelectorArray,
+ IN USHORT NumberOfSelectors
+ )
+
+/*++
+
+Routine Description:
+
+ This function allocates a set of GDT selectors for a device driver to use.
+ Usually this allocation is performed at device driver initialization time
+ to reserve the selectors for later use.
+
+Arguments:
+
+ SelectorArray - Supplies a pointer to an array of USHORT to be filled
+ in with the GDT selectors allocated.
+
+ NumberOfSelectors - Specifies the number of selectors to be allocated.
+
+Return Value:
+
+ STATUS_SUCCESS - If the requested selectors are allocated.
+
+ STATUS_ABIOS_SELECTOR_NOT_AVAILABLE - if systen can not allocate the number
+ of selectors requested.
+
+--*/
+
+{
+ PKFREE_GDT_ENTRY GdtEntry;
+ KIRQL OldIrql;
+
+ if (KiNumberFreeSelectors >= NumberOfSelectors) {
+ ExAcquireSpinLock(&KiAbiosGdtLock, &OldIrql);
+
+ //
+ // The Free Gdt link list is maintained on Processor 0's GDT ONLY.
+ // Because the 'selector' is an offset to the beginning of GDT and
+ // it should be the same accross all the processors.
+ //
+
+ KiNumberFreeSelectors -= NumberOfSelectors;
+ GdtEntry = KiFreeGdtListHead;
+ while (NumberOfSelectors != 0) {
+ *SelectorArray++ = (USHORT)((ULONG)GdtEntry - KiAbiosGdt[0]);
+ GdtEntry = GdtEntry->Flink;
+ NumberOfSelectors--;
+ }
+ KiFreeGdtListHead = GdtEntry;
+ ExReleaseSpinLock(&KiAbiosGdtLock, OldIrql);
+ return STATUS_SUCCESS;
+ } else {
+ return STATUS_ABIOS_SELECTOR_NOT_AVAILABLE;
+ }
+}
+
+NTSTATUS
+KeI386ReleaseGdtSelectors(
+ OUT PUSHORT SelectorArray,
+ IN USHORT NumberOfSelectors
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a set of GDT selectors for a device driver.
+ Usually this function is called at device driver termination or
+ deinstallation time.
+
+Arguments:
+
+ SelectorArray - Supplies a pointer to an array of USHORT selectors
+ to be freed.
+
+ NumberOfSelectors - Specifies the number of selectors to be released.
+
+Return Value:
+
+ STATUS_SUCCESS - If the requested LID is released.
+
+--*/
+{
+ PKFREE_GDT_ENTRY GdtEntry;
+ KIRQL OldIrql;
+ ULONG Gdt;
+
+ ExAcquireSpinLock(&KiAbiosGdtLock, &OldIrql);
+
+ //
+ // The Free Gdt link list is maintained on Processor 0's GDT ONLY.
+ // Because the 'selector' is an offset to the beginning of GDT and
+ // it should be the same accross all the processors.
+ //
+
+ KiNumberFreeSelectors += NumberOfSelectors;
+ Gdt = KiAbiosGdt[0];
+ while (NumberOfSelectors != 0) {
+ GdtEntry = (PKFREE_GDT_ENTRY)(Gdt + *SelectorArray++);
+ GdtEntry->Flink = KiFreeGdtListHead;
+ KiFreeGdtListHead = GdtEntry;
+ NumberOfSelectors--;
+ }
+ ExReleaseSpinLock(&KiAbiosGdtLock, OldIrql);
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+KeI386FlatToGdtSelector(
+ IN ULONG SelectorBase,
+ IN USHORT Length,
+ IN USHORT Selector
+ )
+
+/*++
+
+Routine Description:
+
+ This function converts a 32-bit flat address to a GDT selector-offset
+ pair. The segment set up is always 16-bit ring 0 data segment.
+
+Arguments:
+
+ SelectorBase - Supplies 32 bit flat address to be set as the base address
+ of the desired selector.
+
+ Length - Supplies the Length of the segment. The Length is a 16 bit value
+ and zero means 64KB.
+
+ Selector - Supplies the selector to be set up.
+
+Return Value:
+
+ STATUS_SUCCESS - If the requested LID is released.
+
+ STATUS_ABIOS_NOT_PRESENT - If there is no ABIOS support in the system.
+
+ STATUS_ABIOS_INVALID_SELECTOR - If the selector supplied is invalid.
+
+
+--*/
+
+{
+ PKGDTENTRY GdtEntry, GdtEntry1;
+ KIRQL OldIrql;
+ ULONG i;
+
+ if (!KiAbiosPresent) {
+ return STATUS_ABIOS_NOT_PRESENT;
+ }
+ if (Selector < RESERVED_GDT_ENTRIES * sizeof(KGDTENTRY)) {
+ return STATUS_ABIOS_INVALID_SELECTOR;
+ } else {
+ ExAcquireSpinLock(&KiAbiosGdtLock, &OldIrql);
+ GdtEntry = (PKGDTENTRY)(KiAbiosGdt[0] + Selector);
+ GdtEntry->LimitLow = (USHORT)(Length - 1);
+ GdtEntry->BaseLow = LOWWORD(SelectorBase);
+ GdtEntry->HighWord.Bytes.BaseMid = LOWBYTE(HIGHWORD(SelectorBase));
+ GdtEntry->HighWord.Bytes.BaseHi = HIGHBYTE(HIGHWORD(SelectorBase));
+ GdtEntry->HighWord.Bits.Pres = 1;
+ GdtEntry->HighWord.Bits.Type = TYPE_DATA;
+ GdtEntry->HighWord.Bits.Dpl = DPL_SYSTEM;
+ for (i = 1; i < (ULONG)KeNumberProcessors; i++) {
+ GdtEntry1 = (PKGDTENTRY)(KiAbiosGdt[i] + Selector);
+ *GdtEntry1 = *GdtEntry;
+ }
+ ExReleaseSpinLock(&KiAbiosGdtLock, OldIrql);
+ return STATUS_SUCCESS;
+ }
+}
+
+VOID
+Ki386InitializeGdtFreeList (
+ PKFREE_GDT_ENTRY EndOfGdt
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes gdt free list by linking all the unused gdt
+ entries to a free list.
+
+Arguments:
+
+ EndOfGdt - Supplies the ending address of desired GDT.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ PKFREE_GDT_ENTRY GdtEntry;
+
+ GdtEntry = EndOfGdt - 1;
+ KiFreeGdtListHead = (PKFREE_GDT_ENTRY)0;
+ while (GdtEntry != (PKFREE_GDT_ENTRY)KiAbiosGetGdt() +
+ RESERVED_GDT_ENTRIES - 1) {
+ if (GdtEntry->Present == 0) {
+ GdtEntry->Flink = KiFreeGdtListHead;
+ KiFreeGdtListHead = GdtEntry;
+ KiNumberFreeSelectors++;
+ }
+ GdtEntry--;
+ }
+}
+
+VOID
+KiInitializeAbios (
+ IN UCHAR Processor
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes gdt free list and sets up selector for
+ KiI386AbiosCall (16-bit code).
+
+Arguments:
+
+ Processor - the processor who performs the initialization.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG GdtLength;
+ PKGDTENTRY AliasGdtSelectorEntry;
+ PKFREE_GDT_ENTRY EndOfGdt;
+
+ //
+ // First check if abios is recognized by osloader.
+ //
+
+ KiCommonDataArea = KeLoaderBlock->u.I386.CommonDataArea;
+
+ //
+ // NOTE For now we want to disable ABIOS support on MP.
+ //
+
+ if (KiCommonDataArea == NULL || Processor != 0) {
+ KiAbiosPresent = FALSE;
+ } else {
+ KiAbiosPresent = TRUE;
+ }
+
+ //
+ // Initialize the spinlocks for accessing GDTs and Lid Table.
+ //
+
+ KeInitializeSpinLock( &KiAbiosGdtLock );
+ KeInitializeSpinLock( &KiAbiosLidTableLock );
+
+ //
+ // Determine the starting and ending addresses of GDT.
+ //
+
+ KiAbiosGdt[Processor] = KiAbiosGetGdt();
+
+ AliasGdtSelectorEntry = (PKGDTENTRY)(KiAbiosGetGdt() + KGDT_GDT_ALIAS);
+ GdtLength = 1 + (ULONG)(AliasGdtSelectorEntry->LimitLow) +
+ (ULONG)(AliasGdtSelectorEntry->HighWord.Bits.LimitHi << 16);
+ EndOfGdt = (PKFREE_GDT_ENTRY)(KiAbiosGetGdt() + GdtLength);
+
+ //
+ // Prepare selector for 16 bit stack segment
+ //
+
+ KiStack16GdtEntry = KiAbiosGetGdt() + KGDT_STACK16;
+
+ KiInitializeAbiosGdtEntry(
+ (PKGDTENTRY)KiStack16GdtEntry,
+ 0L,
+ 0xffff,
+ TYPE_DATA
+ );
+
+ //
+ // Establish the addressability of Common Data Area selector.
+ //
+
+ KiInitializeAbiosGdtEntry(
+ (PKGDTENTRY)(KiAbiosGetGdt() + KGDT_CDA16),
+ (ULONG)KiCommonDataArea,
+ 0xffff,
+ TYPE_DATA
+ );
+
+ //
+ // Set up 16-bit code selector for KiI386AbiosCall
+ //
+
+ KiInitializeAbiosGdtEntry(
+ (PKGDTENTRY)(KiAbiosGetGdt() + KGDT_CODE16),
+ (ULONG)&KiI386CallAbios,
+ (ULONG)&KiEndOfCode16 - (ULONG)&KiI386CallAbios - 1,
+ 0x18 // TYPE_CODE
+ );
+
+ //
+ // Link all the unused GDT entries to our GDT free list.
+ //
+
+ if (Processor == 0) {
+ Ki386InitializeGdtFreeList(EndOfGdt);
+ }
+}
diff --git a/private/ntos/ke/i386/allproc.c b/private/ntos/ke/i386/allproc.c
new file mode 100644
index 000000000..410c58b58
--- /dev/null
+++ b/private/ntos/ke/i386/allproc.c
@@ -0,0 +1,397 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ allproc.c
+
+Abstract:
+
+ This module allocates and intializes kernel resources required
+ to start a new processor, and passes a complete process_state
+ structre to the hal to obtain a new processor. This is done
+ for every processor.
+
+Author:
+
+ Ken Reneris (kenr) 22-Jan-92
+
+Environment:
+
+ Kernel mode only.
+ Phase 1 of bootup
+
+Revision History:
+
+--*/
+
+
+#include "ki.h"
+
+#ifdef NT_UP
+
+VOID
+KeStartAllProcessors (
+ VOID
+ )
+{
+ // UP Build - this function is a nop
+}
+
+#else
+
+extern ULONG KeRegisteredProcessors;
+
+static VOID
+KiCloneDescriptor (
+ IN PKDESCRIPTOR pSrcDescriptorInfo,
+ IN PKDESCRIPTOR pDestDescriptorInfo
+ );
+
+static VOID
+KiCloneSelector (
+ IN ULONG SrcSelector,
+ IN PKGDTENTRY pNGDT,
+ IN PKDESCRIPTOR pDestDescriptor
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,KeStartAllProcessors)
+#pragma alloc_text(INIT,KiCloneDescriptor)
+#pragma alloc_text(INIT,KiCloneSelector)
+#endif
+
+#if !defined(NT_UP)
+
+ULONG KiBarrierWait = 0;
+
+#endif
+
+
+
+VOID
+KeStartAllProcessors (
+ VOID
+ )
+/*++
+
+Routine Description:
+
+ Called by p0 during phase 1 of bootup. This function implements
+ the x86 specific code to contact the hal for each system processor.
+
+Arguments:
+
+Return Value:
+
+ All available processors are sent to KiSystemStartup.
+
+--*/
+{
+ KPROCESSOR_STATE ProcessorState;
+ KDESCRIPTOR Descriptor;
+ KDESCRIPTOR TSSDesc, DFTSSDesc, NMITSSDesc, PCRDesc;
+ PKGDTENTRY pGDT;
+ PUCHAR pStack;
+ ULONG DFStack;
+ PUCHAR pThreadObject;
+ PULONG pTopOfStack;
+ ULONG NewProcessorNumber;
+ BOOLEAN NewProcessor;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ PKTSS pTSS;
+ PLIST_ENTRY NextEntry;
+ LONG NumberProcessors;
+
+ //
+ // If the registered number of processors is greater than the maximum
+ // number of processors supported, then only allow the maximum number
+ // of supported processors.
+ //
+
+ if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
+ KeRegisteredProcessors = MAXIMUM_PROCESSORS;
+ }
+
+ //
+ // Set barrier that will prevent any other processor from entering the
+ // idle loop until all processors have been started.
+ //
+
+ KiBarrierWait = 1;
+
+
+ while ((ULONG)KeNumberProcessors < KeRegisteredProcessors) {
+ //
+ // Build up a processor state for new processor
+ //
+
+ RtlZeroMemory ((PVOID) &ProcessorState, sizeof ProcessorState);
+
+
+ //
+ // Give the new processor it's own GDT
+ //
+
+ _asm {
+ sgdt Descriptor.Limit
+ }
+
+ KiCloneDescriptor (&Descriptor,
+ &ProcessorState.SpecialRegisters.Gdtr);
+
+ pGDT = (PKGDTENTRY) ProcessorState.SpecialRegisters.Gdtr.Base;
+
+
+ //
+ // Give new processor it's own IDT
+ //
+
+ _asm {
+ sidt Descriptor.Limit
+ }
+ KiCloneDescriptor (&Descriptor,
+ &ProcessorState.SpecialRegisters.Idtr);
+
+
+ //
+ // Give new processor it's own TSS and PCR
+ //
+
+ KiCloneSelector (KGDT_TSS, pGDT, &TSSDesc);
+ KiCloneSelector (KGDT_R0_PCR, pGDT, &PCRDesc);
+
+ //
+ // Allocate double-fault TSS & stack, and NMI TSS
+ //
+
+ KiCloneSelector (KGDT_DF_TSS, pGDT, &DFTSSDesc);
+ DFStack = (ULONG)ExAllocatePool(NonPagedPool, DOUBLE_FAULT_STACK_SIZE);
+ pTSS = (PKTSS)DFTSSDesc.Base;
+ pTSS->Esp0 = DFStack + DOUBLE_FAULT_STACK_SIZE;
+ pTSS->NotUsed2[5] = DFStack + DOUBLE_FAULT_STACK_SIZE;
+
+ KiCloneSelector (KGDT_NMI_TSS, pGDT, &NMITSSDesc);
+ pTSS = (PKTSS)NMITSSDesc.Base;
+ pTSS->Esp0 = DFStack + DOUBLE_FAULT_STACK_SIZE;
+ pTSS->NotUsed2[5] = DFStack + DOUBLE_FAULT_STACK_SIZE;
+
+
+ //
+ // Set other SpecialRegisters in processor state
+ //
+
+ _asm {
+ mov eax, cr0
+ and eax, NOT (CR0_AM or CR0_WP)
+ mov ProcessorState.SpecialRegisters.Cr0, eax
+ mov eax, cr3
+ mov ProcessorState.SpecialRegisters.Cr3, eax
+
+ pushfd
+ pop ProcessorState.ContextFrame.EFlags
+ and ProcessorState.ContextFrame.EFlags, NOT EFLAGS_INTERRUPT_MASK
+ }
+
+ ProcessorState.SpecialRegisters.Tr = KGDT_TSS;
+ pGDT[KGDT_TSS>>3].HighWord.Bytes.Flags1 = 0x89;
+
+
+ //
+ // Allocate a kernel stack and ThreadObject for the new processor
+ //
+
+ pStack = (PUCHAR)MmCreateKernelStack (FALSE);
+ pThreadObject = (PUCHAR)ExAllocatePool (NonPagedPool, sizeof(ETHREAD));
+
+ //
+ // Zero initialize these...
+ //
+
+ RtlZeroMemory ((PVOID) PCRDesc.Base, sizeof (KPCR));
+ RtlZeroMemory ((PVOID) pThreadObject, sizeof (KTHREAD));
+
+
+ //
+ // Setup context
+ // Push varibles onto new stack
+ //
+
+ pTopOfStack = (PULONG) pStack;
+ pTopOfStack[-1] = (ULONG) KeLoaderBlock;
+ ProcessorState.ContextFrame.Esp = (ULONG) (pTopOfStack-2);
+ ProcessorState.ContextFrame.Eip = (ULONG) KiSystemStartup;
+
+ ProcessorState.ContextFrame.SegCs = KGDT_R0_CODE;
+ ProcessorState.ContextFrame.SegDs = KGDT_R3_DATA;
+ ProcessorState.ContextFrame.SegEs = KGDT_R3_DATA;
+ ProcessorState.ContextFrame.SegFs = KGDT_R0_PCR;
+ ProcessorState.ContextFrame.SegSs = KGDT_R0_DATA;
+
+
+ //
+ // Initialize new processors PCR & Prcb
+ //
+
+ NewProcessorNumber = KeNumberProcessors;
+ KiInitializePcr (
+ (ULONG) NewProcessorNumber,
+ (PKPCR) PCRDesc.Base,
+ (PKIDTENTRY) ProcessorState.SpecialRegisters.Idtr.Base,
+ (PKGDTENTRY) ProcessorState.SpecialRegisters.Gdtr.Base,
+ (PKTSS) TSSDesc.Base,
+ (PKTHREAD) pThreadObject
+ );
+
+
+ //
+ // Adjust LoaderBlock so it has the next processors state
+ //
+
+ KeLoaderBlock->KernelStack = (ULONG) pTopOfStack;
+ KeLoaderBlock->Thread = (ULONG) pThreadObject;
+ KeLoaderBlock->Prcb = (ULONG) ((PKPCR) PCRDesc.Base)->Prcb;
+
+
+ //
+ // Contact hal to start new processor
+ //
+
+ NewProcessor = HalStartNextProcessor (KeLoaderBlock, &ProcessorState);
+
+
+ if (!NewProcessor) {
+ //
+ // There wasn't another processor, so free resources and
+ // break
+ //
+
+ ExFreePool ((PVOID) ProcessorState.SpecialRegisters.Gdtr.Base);
+ ExFreePool ((PVOID) ProcessorState.SpecialRegisters.Idtr.Base);
+ ExFreePool ((PVOID) TSSDesc.Base);
+ ExFreePool ((PVOID) DFTSSDesc.Base);
+ ExFreePool ((PVOID) NMITSSDesc.Base);
+ ExFreePool ((PVOID) PCRDesc.Base);
+ ExFreePool ((PVOID) pThreadObject);
+ ExFreePool ((PVOID) DFStack);
+ MmDeleteKernelStack ((PVOID) pStack, FALSE);
+ break;
+ }
+
+
+ //
+ // Wait for processor to initialize in kernel, then loop for another
+ //
+
+ while (*((volatile ULONG *) &KeLoaderBlock->Prcb) != 0)
+ { }
+ }
+
+ //
+ // Reset and synchronize the performance counters of all processors.
+ //
+
+ NumberProcessors = KeNumberProcessors;
+ KiIpiGenericCall (
+ (PKIPI_BROADCAST_WORKER) HalCalibratePerformanceCounter,
+ (ULONG)(&NumberProcessors)
+ );
+
+ //
+ // Allow all processor that were started to enter the idle loop and
+ // begin execution.
+ //
+
+ KiBarrierWait = 0;
+}
+
+
+
+static VOID
+KiCloneSelector (
+ IN ULONG SrcSelector,
+ IN PKGDTENTRY pNGDT,
+ IN PKDESCRIPTOR pDestDescriptor
+/*++
+
+Routine Description:
+
+ Makes a copy of the current selectors data, and updated the new
+ gdt's linear address to point to the new copy.
+
+Arguments:
+ SrcSelector - Selector value to clone
+ pNGDT - New gdt table which is being built
+ DescDescriptor - descriptor structure to fill in with resulting memory
+
+Return Value:
+
+--*/
+ )
+{
+ KDESCRIPTOR Descriptor;
+ PKGDTENTRY pGDT;
+ ULONG CurrentBase;
+ ULONG NewBase;
+
+ _asm {
+ sgdt fword ptr [Descriptor.Limit] ; Get GDTs addr
+ }
+
+ pGDT = (PKGDTENTRY) Descriptor.Base;
+ pGDT += SrcSelector >> 3;
+ pNGDT += SrcSelector >> 3;
+
+ CurrentBase = pGDT->BaseLow | (pGDT->HighWord.Bits.BaseMid << 16) |
+ (pGDT->HighWord.Bits.BaseHi << 24);
+
+ Descriptor.Base = CurrentBase;
+ Descriptor.Limit = pGDT->LimitLow;
+ if (pGDT->HighWord.Bits.Granularity & GRAN_PAGE)
+ Descriptor.Limit = (Descriptor.Limit << PAGE_SHIFT) -1;
+
+ KiCloneDescriptor (&Descriptor, pDestDescriptor);
+ NewBase = pDestDescriptor->Base;
+
+ pNGDT->BaseLow = (USHORT) NewBase & 0xffff;
+ pNGDT->HighWord.Bits.BaseMid = (UCHAR) (NewBase >> 16) & 0xff;
+ pNGDT->HighWord.Bits.BaseHi = (UCHAR) (NewBase >> 24) & 0xff;
+}
+
+
+
+static VOID
+KiCloneDescriptor (
+ IN PKDESCRIPTOR pSrcDescriptor,
+ IN PKDESCRIPTOR pDestDescriptor
+ )
+/*++
+
+Routine Description:
+
+ Makes a copy of the specified descriptor, and supplies a return
+ descriptor for the new copy
+
+Arguments:
+ pSrcDescriptor - descriptor to clone
+ pDescDescriptor - the cloned descriptor
+
+Return Value:
+
+--*/
+{
+ ULONG Size;
+
+ Size = pSrcDescriptor->Limit + 1;
+ pDestDescriptor->Limit = (USHORT) Size -1;
+ pDestDescriptor->Base = (ULONG) ExAllocatePool (NonPagedPool, Size);
+
+ RtlMoveMemory ((PVOID) pDestDescriptor->Base,
+ (PVOID) pSrcDescriptor->Base, Size);
+}
+
+
+#endif // !NT_UP
diff --git a/private/ntos/ke/i386/alr.inc b/private/ntos/ke/i386/alr.inc
new file mode 100644
index 000000000..8491a3aee
--- /dev/null
+++ b/private/ntos/ke/i386/alr.inc
@@ -0,0 +1,87 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; alr.inc
+;
+; Abstract:
+;
+; This inlcude file defines all the equates and macros specifically
+; used for ALR Multiprocessor system implementation.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 29-Oct-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+;
+; Virtual address map for ALR CBUS IO and address space
+; CBUS memory address space starting from 64MB to (128MB - 1)
+; CBUS IO space starting from 128MB to (192MB - 1). We are interested
+; in the first 4MB only (at least for now).
+;
+
+CBUS_ADDR_START_PHYS equ 4000000h
+
+CBUS_IO_SPACE_START equ 90000000h
+CBUS_IO_SPACE_START_PHYS equ 8000000h
+CBUS_IO_SPACE_SIZE equ 400000h
+
+;
+;CPU ID for CBUS PEs and common functions for PEs
+;
+
+ALLCPUID equ 0Fh ; ID to address all the slaves
+BASECPUID equ 0Eh ; Base CPU ID
+
+PE_CRESET equ 0 ; Clear Reset
+PE_SRESET equ 1 ; Set Reset
+PE_CONTEND equ 2 ; Contend (Place slot number on ARB0-3
+ ; lines)
+PE_SETIDA equ 3 ; Set ID value on winning processor
+PE_CSWI equ 4 ; Clear software interrupt
+PE_SSWI equ 5 ; Set software interrupt
+PE_CNMI equ 6 ; Clear NMI
+PE_SNMI equ 7 ; Set NMI
+PE_SLED equ 8 ; Set LED
+PE_CLED equ 9 ; Clear LED
+
+;
+; Miscs CBUS definitions
+;
+
+ArbitrateRegister equ 0F1h
+ArbitrateMask equ 0Fh ; Lower 4 bits of Arbitrate Register
+
+
+;
+; Macros to access CBUS I/O space
+;
+; CBUS_IO_ACCESS func, cpuid
+; func - the function which will be applied to PEs
+; cpuid - the desired PE. If not specified, the cpuid is in AL register.
+;
+
+CBUS_IO_ACCESS macro func, cpuid
+
+ifnb <cpuid>
+ mov eax, (cpuid SHL 18) + CBUS_IO_SPACE_START + (func SHL 4)
+else
+ movzx eax, al
+ shl eax, 18
+ add eax, CBUS_IO_SPACE_START + (func SHL 4)
+endif
+ or byte ptr [eax], 0ffh
+
+ endm
+
+
diff --git a/private/ntos/ke/i386/apcuser.c b/private/ntos/ke/i386/apcuser.c
new file mode 100644
index 000000000..14d327ec0
--- /dev/null
+++ b/private/ntos/ke/i386/apcuser.c
@@ -0,0 +1,169 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ apcuser.c
+
+Abstract:
+
+ This module implements the machine dependent code necessary to initialize
+ a user mode APC.
+
+Author:
+
+ David N. Cutler (davec) 23-Apr-1990
+
+Environment:
+
+ Kernel mode only, IRQL APC_LEVEL.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the context for a user mode APC.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ NormalRoutine - Supplies a pointer to the user mode APC routine.
+
+ NormalContext - Supplies a pointer to the user context for the APC
+ routine.
+
+ SystemArgument1 - Supplies the first system supplied value.
+
+ SystemArgument2 - Supplies the second system supplied value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ EXCEPTION_RECORD ExceptionRecord;
+ CONTEXT ContextFrame;
+ LONG Length;
+ ULONG UserStack;
+
+
+ //
+ // APCs are not defined for V86 mode; however, it is possible a
+ // thread is trying to set it's context to V86 mode - this isn't
+ // going to work, but we don't want to crash the system so we
+ // check for the possibility before hand.
+ //
+
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ return ;
+ }
+
+ //
+ // Move machine state from trap and exception frames to the context frame.
+ //
+
+ ContextFrame.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+
+ //
+ // Transfer the context information to the user stack, initialize the
+ // APC routine parameters, and modify the trap frame so execution will
+ // continue in user mode at the user mode APC dispatch routine.
+ //
+
+
+ try {
+ ASSERT((TrapFrame->SegCs & MODE_MASK) != KernelMode); // Assert usermode frame
+
+ //
+ // Compute length of context record and new aligned user stack pointer.
+ //
+
+ Length = ((sizeof(CONTEXT) + CONTEXT_ROUND) &
+ ~CONTEXT_ROUND) + sizeof(KAPC_RECORD);
+ UserStack = (ContextFrame.Esp & ~CONTEXT_ROUND) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack, Length, CONTEXT_ALIGN);
+ RtlMoveMemory((PULONG)(UserStack + (sizeof(KAPC_RECORD))),
+ &ContextFrame, sizeof(CONTEXT));
+
+ //
+ // Force correct R3 selectors into TrapFrame.
+ //
+
+ TrapFrame->SegCs = SANITIZE_SEG(KGDT_R3_CODE, UserMode);
+ TrapFrame->HardwareSegSs = SANITIZE_SEG(KGDT_R3_DATA, UserMode);
+ TrapFrame->SegDs = SANITIZE_SEG(KGDT_R3_DATA, UserMode);
+ TrapFrame->SegEs = SANITIZE_SEG(KGDT_R3_DATA, UserMode);
+ TrapFrame->SegFs = SANITIZE_SEG(KGDT_R3_TEB, UserMode);
+ TrapFrame->SegGs = 0;
+ TrapFrame->EFlags = SANITIZE_FLAGS( ContextFrame.EFlags, UserMode );
+
+ //
+ // If thread is supposed to have IOPL, then force it on in eflags
+ //
+
+ if (KeGetCurrentThread()->Iopl) {
+ TrapFrame->EFlags |= (EFLAGS_IOPL_MASK & -1); // IOPL = 3
+ }
+
+ //
+ // Set the address of the user APC routine, the APC parameters, the
+ // new frame pointer, and the new stack pointer in the current trap
+ // frame. Set the continuation address so control will be transfered
+ // to the user APC dispatcher.
+ //
+
+ TrapFrame->HardwareEsp = UserStack;
+ TrapFrame->Eip = (ULONG)KeUserApcDispatcher;
+ TrapFrame->ErrCode = 0;
+ *((PULONG)UserStack)++ = (ULONG)NormalRoutine;
+ *((PULONG)UserStack)++ = (ULONG)NormalContext;
+ *((PULONG)UserStack)++ = (ULONG)SystemArgument1;
+ *((PULONG)UserStack)++ = (ULONG)SystemArgument2;
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(TrapFrame->Eip);
+ KiDispatchException(&ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ UserMode,
+ TRUE);
+ }
+ return;
+}
+
diff --git a/private/ntos/ke/i386/biosa.asm b/private/ntos/ke/i386/biosa.asm
new file mode 100644
index 000000000..39116271c
--- /dev/null
+++ b/private/ntos/ke/i386/biosa.asm
@@ -0,0 +1,273 @@
+ TITLE "Call Bios support"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; spinlock.asm
+;
+; Abstract:
+;
+; This module implements the support routines for executing int bios
+; call in v86 mode.
+;
+; Author:
+;
+; Shie-Lint Tzong (shielint) Sept 10, 1992
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+.386p
+
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+
+VdmStartExecution EQU 0
+V86_STACK_POINTER equ 11ffeh ; see BIOSC.C
+
+ EXTRNP _NtVdmControl,2
+ extrn _KiExceptionExit:PROC
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ PAGE
+ SUBTTL "Switch to V86 mode"
+;++
+;
+; VOID
+; Ki386SetupAndExitTiV86Code (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function sets up return trap frame, switch stack and
+; calls VdmStartExecution routine to put vdm context to
+; base trap frame and causes the system to execute in v86 mode by
+; doing a KiExceptionExit.
+;
+; Arguments:
+;
+; BiosArguments - Supplies a pointer to a structure which contains
+; the arguments for v86 int function.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _Ki386SetupAndExitToV86Code,1
+
+NewTEB equ [ecx+32] ; location of the parameter based on
+ ; the ecx stack pointer.
+KsaeInitialStack equ [ecx]
+OriginalThTeb equ [ecx+4]
+OriginalPcTeb equ [ecx+8]
+
+;
+; Allocate TRAP FRAME at the bottom of the stack.
+;
+
+ push ebp
+ push ebx
+ push esi
+ push edi
+ sub esp, 12 ; 12 bytes for local variable
+ mov ecx, esp ; (ecx) = saved esp
+
+ sub esp, KTRAP_FRAME_LENGTH + NPX_FRAME_LENGTH
+ ; (esp)-> new trap frame
+ mov eax, esp ; (eax)->New base trap frame
+
+;
+; Initialize newly allocated trap frame to caller's nonvolatle context.
+; Note that it is very important that the trap frame we are going to create
+; is a USER mode frame. The system expects the top trap frame for user
+; mode thread is a user mode frame. (Get/SetContext enforce the rule.)
+;
+; (eax)-> Base of trap frame.
+;
+
+ mov dword ptr [eax].TsSegCs, KGDT_R0_CODE OR RPL_MASK
+ ; an invalid cs to trap it back to kernel
+ mov dword ptr [eax].TsSegEs, 0
+ mov dword ptr [eax].TsSegDs, 0
+ mov dword ptr [eax].TsSegFs, 0
+ mov dword ptr [eax].TsSegGs, 0
+ mov dword ptr [eax].TsErrCode, 0
+ mov ebx, fs:PcSelfPcr ; (ebx)->Pcr
+ mov edx, [ebx].PcInitialStack
+ mov KsaeInitialStack, edx ; (edx)->Pcr InitialSack
+
+ mov edi, [ebx]+PcPrcbData+PbCurrentThread ; (edi)->CurrentThread
+ mov edx, [edi].ThTeb
+ mov OriginalThTeb, edx
+
+ mov edx, fs:[PcTeb]
+ mov OriginalPcTeb, edx
+
+ mov edi, offset Ki386BiosCallReturnAddress
+ mov [eax].TsEsi, ecx ; Saved esp
+ mov [eax].TsEip, edi ; set up return address
+ pushfd
+ pop edi
+ and edi, 60dd7h
+ or edi, 200h ; sanitize EFLAGS
+ mov dword ptr [eax].TsHardwareSegSs, KGDT_R3_DATA OR RPL_MASK
+ mov dword ptr [eax].TsHardwareEsp, V86_STACK_POINTER
+ mov [eax].TsEflags, edi
+ mov [eax].TsExceptionList, EXCEPTION_CHAIN_END
+ mov [eax].TsPreviousPreviousMode, 0ffffffffh ; No previous mode
+if DBG
+ mov [eax].TsDbgArgMark, 0BADB0D00h ; set trap frame mark
+endif
+
+;
+; Initialize NpxState of NPX save area to zero value
+;
+
+ add eax, KTRAP_FRAME_LENGTH
+ mov dword ptr [eax]+FpCr0NpxState, 0
+
+;
+; Disable interrupt and change the stack pointer to make the new
+; trap frame be the current thread's base trap frame.
+;
+; (eax)->Npx save area
+;
+
+ mov edi, [ebx]+PcPrcbData+PbCurrentThread ; (edi)->CurrentThread
+ cli
+
+;
+; Set up various stack pointers
+;
+; Low | |
+; |-----------| <- New esp
+; | New Base |
+; |Trap Frame |
+; |-----------| <- Tss.Esp0
+; |V86 segs |
+; |-----------| <- Pcr.InitialStack
+; |Npx Area |
+; |-----------| <- Old Esp = Thread.InitialStack
+; | |
+; High | |
+;
+
+ mov [ebx].PcInitialStack, eax
+ mov esi,[ebx]+PcTss ; (esi)->TSS
+ sub eax,TsV86Gs - TsHardwareSegSs ; bias for missing fields
+ mov [ebx].PcExceptionList, EXCEPTION_CHAIN_END
+ mov [esi]+TssEsp0,eax
+ add eax, NPX_FRAME_LENGTH + (TsV86Gs - TsHardwareSegSs)
+ mov [edi].ThInitialStack, eax
+
+;
+; Set up the pointers to the fake TEB so we can execute the int10
+; call
+;
+ mov eax, NewTeb
+ mov fs:[PcTeb], eax
+ mov [edi].ThTeb, eax
+
+ mov ebx, PCR[PcGdt]
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseLow), ax
+ shr eax, 16
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseMid), al
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseHi), ah
+
+ sti
+
+; Now call VdmControl to save return 32bit frame and put vdm context
+; to new base trap frame
+
+ stdCall _NtVdmControl, <VdmStartExecution, 0>
+
+if 0
+;
+; Now call _VdmpStartExecution to save return 32bit frame and put vdm context
+; to new base trap frame
+;
+
+ mov eax, ExecAddr
+ stdCall _VdmpStartExecution, <eax>
+endif
+
+;
+; Call KiexceptionExit to 'exit' to v86 code.
+;
+
+ mov ebp, esp ; (ebp)->Exit trap frame
+ jmp _KiExceptionExit ; go execute int 10
+
+ public Ki386BiosCallReturnAddress
+Ki386BiosCallReturnAddress:
+
+;
+; After ROM BIOS int completes, the bop instruction gets executed.
+; This results in a trap to kernel mode bop handler where the
+; 16 bit Vdm context will be saved to VdmTib->VdmCOntext, and
+; the faked 32 bit user mode context (i.e. the one we created earlier)
+; be restored. Since the faked user mode context does NOT have a valid
+; iret address, the 'iret' instruction of the EXIT_ALL will be trapped to
+; our GP fault handler which recognizes this and transfers control back to
+; here.
+;
+; when we come back here, all the segment registers are set up properly
+; and esp is restored. Interrupts are disabled.
+;
+
+;
+; restore all the pointers.
+;
+
+ mov eax, fs:PcSelfPcr ; (eax)->Pcr
+ pop edx ; (edx) = Pcr InitialStack
+ mov [eax].PcInitialStack, edx ; Restore Pcr InitialStack
+ mov ecx, [eax]+PcPrcbData+PbCurrentThread ; (ecx)->CurrentThread
+ add edx, NPX_FRAME_LENGTH
+ mov [ecx].ThInitialStack, edx ; Restore Thread.InitialStack
+
+ mov eax,[eax]+PcTss ; (eax)->TSS
+ sub edx, (TsV86Gs - TsHardwareSegSs) + NPX_FRAME_LENGTH
+ mov [eax]+TssEsp0,edx
+
+;
+; restore pointers to the original TEB
+;
+ pop edx ; (edx) = OriginalThTeb
+ mov [ecx].ThTeb, edx
+ pop edx ; (edx) = OriginalPcTeb
+ mov fs:[PcTeb], edx
+
+ mov ebx, PCR[PcGdt]
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseLow), dx
+ shr edx, 16
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseMid), dl
+ mov [ebx]+(KGDT_R3_TEB+KgdtBaseHi), dh
+
+
+ sti
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ stdRET _Ki386SetupAndExitToV86Code
+
+stdENDP _Ki386SetupAndExitToV86Code
+
+_TEXT ends
+
+ end
diff --git a/private/ntos/ke/i386/biosc.c b/private/ntos/ke/i386/biosc.c
new file mode 100644
index 000000000..51434f220
--- /dev/null
+++ b/private/ntos/ke/i386/biosc.c
@@ -0,0 +1,269 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ abiosc.c
+
+Abstract:
+
+ This module implements ROM BIOS support C routines for i386 NT.
+
+Author:
+
+ Shie-Lin Tzong (shielint) 10-Sept-1992
+
+Environment:
+
+ Kernel mode.
+
+
+Revision History:
+
+--*/
+#include "ki.h"
+#pragma hdrstop
+#include "vdmntos.h"
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,Ke386CallBios)
+#endif
+
+
+//
+// Never change these equates without checking biosa.asm
+//
+
+#define V86_CODE_ADDRESS 0x10000
+#define INT_OPCODE 0xcd
+#define V86_BOP_OPCODE 0xfec4c4
+#define V86_STACK_POINTER 0x1ffe
+#define IOPM_OFFSET FIELD_OFFSET(KTSS, IoMaps[0].IoMap)
+#define VDM_TIB_ADDRESS 0x12000
+#define INT_10_TEB 0x13000
+
+//
+// External References
+//
+
+PVOID Ki386IopmSaveArea;
+BOOLEAN BiosInitialized = FALSE;
+VOID
+Ki386SetupAndExitToV86Code (
+ PVOID ExecutionAddress
+ );
+
+
+NTSTATUS
+Ke386CallBios (
+ IN ULONG BiosCommand,
+ IN OUT PCONTEXT BiosArguments
+ )
+
+/*++
+
+Routine Description:
+
+ This function invokes specified ROM BIOS code by executing
+ "INT BiosCommand." Before executing the BIOS code, this function
+ will setup VDM context, change stack pointer ...etc. If for some reason
+ the operation fails, a status code will be returned. Otherwise, this
+ function always returns success reguardless of the result of the BIOS
+ call.
+
+ N.B. This implementation relies on the fact that the direct
+ I/O access operations between apps are serialized by win user.
+
+Arguments:
+
+ BiosCommand - specifies which ROM BIOS function to invoke.
+
+ BiosArguments - specifies a pointer to the context which will be used
+ to invoke ROM BIOS.
+
+Return Value:
+
+ NTSTATUS code to specify the failure.
+
+--*/
+
+{
+
+ NTSTATUS Status = STATUS_SUCCESS;
+ PVDM_TIB VdmTib;
+ PUCHAR BaseAddress = (PUCHAR)V86_CODE_ADDRESS;
+ PTEB UserInt10Teb = (PTEB)INT_10_TEB;
+ PKTSS Tss;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ USHORT OldIopmOffset, OldIoMapBase;
+// KIRQL OldIrql;
+//#if DBG
+// PULONG IdtAddress;
+// ULONG RegionSize;
+// ULONG OldProtect;
+//#endif
+
+ //
+ // Map in ROM BIOS area to perform the int 10 code
+ //
+
+ if (!BiosInitialized) {
+ RtlZeroMemory(UserInt10Teb, sizeof(TEB));
+ }
+
+//#if DBG
+// IdtAddress = 0;
+// RegionSize = 0x1000;
+// ZwProtectVirtualMemory ( NtCurrentProcess(),
+// &IdtAddress,
+// &RegionSize,
+// PAGE_READWRITE,
+// &OldProtect
+// );
+//#endif
+
+ try {
+
+ //
+ // Write "Int BiosCommand; bop" to reserved user space (0x1000).
+ // Later control will transfer to the user space to execute
+ // these two instructions.
+ //
+
+ *BaseAddress++ = INT_OPCODE;
+ *BaseAddress++ = (UCHAR)BiosCommand;
+ *(PULONG)BaseAddress = V86_BOP_OPCODE;
+
+ //
+ // Set up Vdm(v86) context to execute the int BiosCommand
+ // instruction by copying user supplied context to VdmContext
+ // and updating the control registers to predefined values.
+ //
+
+ //
+ // We want to use a constant number for the int10.
+ //
+ //
+ // Create a fake TEB so we can switch the thread to it while we
+ // do an int10
+ //
+
+ UserInt10Teb->Vdm = (PVOID)VDM_TIB_ADDRESS;
+ VdmTib = (PVDM_TIB)VDM_TIB_ADDRESS;
+ RtlZeroMemory(VdmTib, sizeof(VDM_TIB));
+ VdmTib->Size = sizeof(VDM_TIB);
+ *pNtVDMState = 0;
+
+ VdmTib->VdmContext = *BiosArguments;
+ VdmTib->VdmContext.SegCs = (ULONG)BaseAddress >> 4;
+ VdmTib->VdmContext.SegSs = (ULONG)BaseAddress >> 4;
+ VdmTib->VdmContext.Eip = 0;
+ VdmTib->VdmContext.Esp = 2 * PAGE_SIZE - sizeof(ULONG);
+ VdmTib->VdmContext.EFlags |= EFLAGS_V86_MASK | EFLAGS_INTERRUPT_MASK;
+ VdmTib->VdmContext.ContextFlags = CONTEXT_FULL;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+
+ Status = GetExceptionCode();
+ }
+
+ if (Status == STATUS_SUCCESS) {
+
+ //
+ // Since we are going to v86 mode and accessing some I/O ports, we
+ // need to make sure the IopmOffset is set correctly across context
+ // swap and the I/O bit map has all the bits cleared.
+ // N.B. This implementation assumes that there is only one full
+ // screen DOS app and the io access between full screen DOS
+ // app and the server code is serialized by win user. That
+ // means even we change the IOPM, the full screen dos app won't
+ // be able to run on this IOPM.
+ // * In another words, IF THERE IS
+ // * MORE THAN ONE FULL SCREEN DOS APPS, THIS CODE IS BROKEN.*
+ //
+ // NOTE This code works on the assumption that winuser serializes
+ // direct I/O access operations.
+ //
+
+ //
+ // Call the bios from the processor which booted the machine.
+ //
+
+ Thread = KeGetCurrentThread();
+ KeSetSystemAffinityThread(1);
+ Tss = KeGetPcr()->TSS;
+
+ //
+ // Save away the original IOPM bit map and clear all the IOPM bits
+ // to allow v86 int 10 code to access ALL the io ports.
+ //
+
+ //
+ // Make sure there are at least 2 IOPM maps.
+ //
+
+ ASSERT(KeGetPcr()->GDT[KGDT_TSS / 8].LimitLow >= (0x2000 + IOPM_OFFSET - 1));
+ RtlMoveMemory (Ki386IopmSaveArea,
+ (PVOID)&Tss->IoMaps[0].IoMap,
+ PAGE_SIZE * 2
+ );
+ RtlZeroMemory ((PVOID)&Tss->IoMaps[0].IoMap, PAGE_SIZE * 2);
+
+ Process = Thread->ApcState.Process;
+ OldIopmOffset = Process->IopmOffset;
+ OldIoMapBase = Tss->IoMapBase;
+ Process->IopmOffset = (USHORT)(IOPM_OFFSET); // Set Process IoPmOffset before
+ Tss->IoMapBase = (USHORT)(IOPM_OFFSET); // updating Tss IoMapBase
+
+ //
+ // Call ASM routine to switch stack to exit to v86 mode to
+ // run Int BiosCommand.
+ //
+
+ Ki386SetupAndExitToV86Code(UserInt10Teb);
+
+ //
+ // After we return from v86 mode, the control comes here.
+ //
+ // Restore old IOPM
+ //
+
+ RtlMoveMemory ((PVOID)&Tss->IoMaps[0].IoMap,
+ Ki386IopmSaveArea,
+ PAGE_SIZE * 2
+ );
+
+ Process->IopmOffset = OldIopmOffset;
+ Tss->IoMapBase = OldIoMapBase;
+
+ //
+ // Restore old affinity for current thread.
+ //
+
+ KeRevertToUserAffinityThread();
+
+ //
+ // Copy 16 bit vdm context back to caller.
+ //
+
+ *BiosArguments = VdmTib->VdmContext;
+ BiosArguments->ContextFlags = CONTEXT_FULL;
+
+ }
+
+//#if DBG
+// IdtAddress = 0;
+// RegionSize = 0x1000;
+// ZwProtectVirtualMemory ( NtCurrentProcess(),
+// &IdtAddress,
+// &RegionSize,
+// PAGE_NOACCESS,
+// &OldProtect
+// );
+//#endif
+
+ return(Status);
+}
diff --git a/private/ntos/ke/i386/callback.c b/private/ntos/ke/i386/callback.c
new file mode 100644
index 000000000..d796b041f
--- /dev/null
+++ b/private/ntos/ke/i386/callback.c
@@ -0,0 +1,252 @@
+/*++
+
+Copyright (c) 1994 Microsoft Corporation
+
+Module Name:
+
+ callback.c
+
+Abstract:
+
+ This module implements user mode call back services.
+
+Author:
+
+ David N. Cutler (davec) 29-Oct-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KeUserModeCallback (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function call out from kernel mode to a user mode function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied
+ to the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that receives
+ the address of the output buffer.
+
+ Outputlength - Supplies a pointer to a variable that receives
+ the length of the output buffer.
+
+Return Value:
+
+ If the callout cannot be executed, then an error status is
+ returned. Otherwise, the status returned by the callback function
+ is returned.
+
+--*/
+
+{
+
+ ULONG Length;
+ ULONG NewStack;
+ ULONG OldStack;
+ NTSTATUS Status;
+ PULONG UserStack;
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ PEXCEPTION_REGISTRATION_RECORD ExceptionList;
+ PTEB Teb;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // Get the user mode stack pointer and attempt to copy input buffer
+ // to the user stack.
+ //
+
+ UserStack = KiGetUserModeStackAddress();
+ OldStack = *UserStack;
+ try {
+
+ //
+ // Compute new user mode stack address, probe for writability,
+ // and copy the input buffer to the user stack.
+ //
+
+ Length = (InputLength + sizeof(CHAR) - 1) & ~(sizeof(CHAR) - 1);
+ NewStack = OldStack - Length;
+ ProbeForWrite((PCHAR)(NewStack - 16), Length + 16, sizeof(CHAR));
+ RtlCopyMemory((PVOID)NewStack, InputBuffer, Length);
+
+ //
+ // Push arguments onto user stack.
+ //
+
+ *(PULONG)(NewStack - 4) = (ULONG)InputLength;
+ *(PULONG)(NewStack - 8) = (ULONG)NewStack;
+ *(PULONG)(NewStack - 12) = ApiNumber;
+ *(PULONG)(NewStack - 16) = 0;
+ NewStack -= 16;
+
+ //
+ // Save the thread's exception list to prevent total disaster
+ // if the thread returns from a callback after registering
+ // another exception handler.
+ //
+ Teb = (PTEB)KeGetCurrentThread()->Teb;
+ ExceptionList = Teb->NtTib.ExceptionList;
+
+ //
+ // Call user mode.
+ //
+
+ *UserStack = NewStack;
+ Status = KiCallUserMode(OutputBuffer, OutputLength);
+ *UserStack = OldStack;
+
+ //
+ // Restore exception list.
+ //
+ Teb->NtTib.ExceptionList = ExceptionList;
+
+ //
+ // If an exception occurs during the probe of the user stack, then
+ // always handle the exception and return the exception code as the
+ // status value.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // When returning from user mode, any drawing done to the GDI TEB
+ // batch must be flushed.
+ //
+
+ if (Teb->GdiBatchCount > 0) {
+
+ //
+ // call GDI batch flush routine
+ //
+
+ KeGdiFlushUserBatch();
+ }
+
+ return Status;
+}
+
+NTSTATUS
+NtW32Call (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ OUT PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls a W32 function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied to
+ the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that recevies the
+ output buffer address.
+
+ Outputlength - Supplies a pointer to a variable that recevies the
+ output buffer length.
+
+Return Value:
+
+ TBS.
+
+--*/
+
+{
+
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ NTSTATUS Status;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // If the current thread is not a GUI thread, then fail the service
+ // since the thread does not have a large stack.
+ //
+
+ if (KeGetCurrentThread()->Win32Thread == (PVOID)&KeServiceDescriptorTable[0]) {
+ return STATUS_NOT_IMPLEMENTED;
+ }
+
+ //
+ // Probe the output buffer address and length for writeability.
+ //
+
+ try {
+ ProbeForWriteUlong((PULONG)OutputBuffer);
+ ProbeForWriteUlong(OutputLength);
+
+ //
+ // If an exception occurs during the probe of the output buffer or
+ // length, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call out to user mode specifying the input buffer and API number.
+ //
+
+ Status = KeUserModeCallback(ApiNumber,
+ InputBuffer,
+ InputLength,
+ &ValueBuffer,
+ &ValueLength);
+
+ //
+ // If the callout is successful, then the output buffer address and
+ // length.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *OutputBuffer = ValueBuffer;
+ *OutputLength = ValueLength;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ return Status;
+}
diff --git a/private/ntos/ke/i386/callout.asm b/private/ntos/ke/i386/callout.asm
new file mode 100644
index 000000000..69e484f17
--- /dev/null
+++ b/private/ntos/ke/i386/callout.asm
@@ -0,0 +1,432 @@
+ title "Call Out to User Mode"
+;++
+;
+; Copyright (c) 1994 Microsoft Corporation
+;
+; Module Name:
+;
+; callout.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to call out from kernel
+; mode to user mode.
+;
+; Author:
+;
+; David N. Cutler (davec) 1-Nov-1994
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include callconv.inc
+ .list
+
+ extrn _KiServiceExit:PROC
+ extrn _KeUserCallbackDispatcher:DWORD
+
+ EXTRNP _MmGrowKernelStack,1
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+ page ,132
+ subttl "Call User Mode Function"
+;++
+;
+; NTSTATUS
+; KiCallUserMode (
+; IN PVOID *Outputbuffer,
+; IN PULONG OutputLength
+; )
+;
+; Routine Description:
+;
+; This function calls a user mode function from kernel mode.
+;
+; N.B. This function calls out to user mode and the NtCallbackReturn
+; function returns back to the caller of this function. Therefore,
+; the stack layout must be consistent between the two routines.
+;
+; Arguments:
+;
+; OutputBuffer - Supplies a pointer to the variable that receivies
+; the address of the output buffer.
+;
+; OutputLength - Supplies a pointer to a variable that receives
+; the length of the output buffer.
+;
+; Return Value:
+;
+; The final status of the call out function is returned as the status
+; of the function.
+;
+; N.B. This function does not return to its caller. A return to the
+; caller is executed when a NtCallbackReturn system service is
+; executed.
+;
+; N.B. This function does return to its caller if a kernel stack
+; expansion is required and the attempted expansion fails.
+;
+;--
+
+;
+; To support the debugger, the callback stack frame is now defined in i386.h.
+; If the stack frame is changed, i386.h must be updated and geni386
+; rebuilt and run, then rebuild this file and ntos\kd.
+;
+; The FPO record below must also be updated to correctly represent
+; the stack frame.
+;
+
+cPublicProc _KiCallUserMode, 2
+
+.FPO (3, 2, 4, 4, 0, 0)
+
+;
+; Save nonvolatile registers.
+;
+
+ push ebp ; save nonvolatile registers
+ push ebx ;
+ push esi ;
+ push edi ;
+
+;
+; Check if sufficient room is available on the kernel stack for another
+; system call.
+;
+
+ mov ebx,PCR[PcPrcbData + PbCurrentThread] ; get current thread address
+ lea eax,[esp]-KERNEL_LARGE_STACK_COMMIT ; compute bottom address
+ cmp eax,[ebx]+ThStackLimit ; check if limit exceeded
+ jae short Kcb10 ; if ae, limit not exceeded
+ stdCall _MmGrowKernelStack,<esp> ; attempt to grow kernel stack
+ or eax, eax ; check for successful completion
+ jne Kcb20 ; if ne, attempt to grow failed
+ mov eax, [ebx].ThStackLimit ; get new stack limit
+ mov PCR[PcStackLimit], eax ; set new stack limit
+
+;
+; Get the address of the current thread and save the previous trap frame
+; and calback stack addresses in the current frame. Also save the new
+; callback stack address in the thread object.
+;
+
+Kcb10: push [ebx].ThCallbackStack ; save callback stack address
+ mov edx,[ebx].ThTrapFrame ; get current trap frame address
+ push edx ; save trap frame address
+ mov esi,[ebx].ThInitialStack ; get initial stack address
+ push esi ; save initial stack address
+ mov [ebx].ThCallbackStack,esp ; save callback stack address
+
+KcbPrologEnd: ; help for the debugger
+
+;
+; Copy the numeric save area from the previous save area to the new save
+; area and establish a new initial kernel stack.
+;
+
+ mov edi,esp ; set new initial stack address
+ sub esp,NPX_FRAME_LENGTH ; compute destination NPX save area
+ sub esi,NPX_FRAME_LENGTH ; compute source NPX save area
+ cli ; disable interrupts
+ mov ecx,[esi].FpControlWord ; copy NPX state to new frame
+ mov [esp].FpControlWord,ecx ;
+ mov ecx,[esi].FpStatusWord ;
+ mov [esp].FpStatusWord,ecx ;
+ mov ecx,[esi].FpTagWord ;
+ mov [esp].FpTagWord,ecx ;
+ mov ecx,[esi].FpCr0NpxState ;
+ mov [esp].FpCr0NpxState,ecx ;
+ mov esi,PCR[PcTss] ; get address of task switch segment
+ mov [ebx].ThInitialStack,edi ; reset initial stack address
+ mov PCR[PcInitialStack],esp ; set stack check base address
+ sub esp,TsV86Gs - TsHardwareSegSs ; bias for missing V86 fields
+ mov [esi].TssEsp0,esp ; set kernel entry stack address
+
+;
+; Construct a trap frame to facilitate the transfer into user mode via
+; the standard system call exit.
+;
+
+ sub esp,TsHardwareSegSs + 4 ; allocate trap frame
+ mov ebp,esp ; set address of trap frame
+ mov ecx,(TsHardwareSegSs - TsSegFs + 4) / 4; set repeat count
+ lea edi,[esp].TsSegFs ; set destination address
+ lea esi,[edx].TsSegFs ; set source address
+ rep movsd ; copy trap information
+
+ test byte ptr [ebx]+ThDebugActive, -1 ; Do we need to restore Debug reg?
+ jnz short Kcb18 ; Yes, go save them.
+
+Kcb15: mov eax,_KeUserCallbackDispatcher ; st address of callback dispatchr
+ mov [esp].TsEip,eax ;
+ mov eax,PCR[PcExceptionList] ; get current exception list
+ mov [esp].TsExceptionList,eax ; set previous exception list
+ mov eax,[edx].TsPreviousPreviousMode ; get previous mode
+ mov [esp].TsPreviousPreviousMode,eax ; set previous mode
+ sti ; enable interrupts
+
+ SET_DEBUG_DATA ; set system call debug data for exit
+
+ jmp _KiServiceExit ; exit through service dispatch
+
+Kcb18:
+ mov ecx,(TsDr7 - TsDr0 + 4) / 4; set repeat count
+ lea edi,[esp].TsDr0 ; set destination address
+ lea esi,[edx].TsDr0 ; set source address
+ rep movsd ; copy trap information
+ jmp short Kcb15
+
+;
+; An attempt to grow the kernel stack failed.
+;
+
+Kcb20: pop edi ; restore nonvolitile register
+ pop esi ;
+ pop ebx ;
+ pop ebp ;
+ stdRET _KiCallUserMode
+
+stdENDP _KiCallUserMode
+
+ page ,132
+ subttl "Switch Kernel Stack"
+;++
+;
+; PVOID
+; KeSwitchKernelStack (
+; IN PVOID StackBase,
+; IN PVOID StackLimit
+; )
+;
+; Routine Description:
+;
+; This function switches to the specified large kernel stack.
+;
+; N.B. This function can ONLY be called when there are no variables
+; in the stack that refer to other variables in the stack, i.e.,
+; there are no pointers into the stack.
+;
+; Arguments:
+;
+; StackBase (esp + 4) - Supplies a pointer to the base of the new kernel
+; stack.
+;
+; StackLimit (esp + 8) - Suplies a pointer to the limit of the new kernel
+; stack.
+;
+; Return Value:
+;
+; The old kernel stack is returned as the function value.
+;
+;--
+
+SsStkBs equ 4 ; new kernel stack base address
+SsStkLm equ 8 ; new kernel stack limit address
+
+cPublicProc _KeSwitchKernelStack, 2
+
+;
+; Save the address of the new stack and copy the old stack to the new
+; stack.
+;
+
+ push esi ; save string move registers
+ push edi ;
+ mov edx,PCR[PcPrcbData + PbCurrentThread] ; get current thread address
+ mov edi,[esp]+SsStkBs + 8 ; get new kernel stack base address
+ mov ecx,[edx].ThStackBase ; get current stack base address
+ sub ebp,ecx ; relocate the callers frame pointer
+ add ebp,edi ;
+ mov eax,[edx].ThTrapFrame ; relocate the current trap frame address
+ sub eax,ecx ;
+ add eax,edi ;
+ mov [edx].ThTrapFrame,eax ;
+ sub ecx,esp ; compute length of copy
+ sub edi,ecx ; set destination address of copy
+ mov esi,esp ; set source address of copy
+ push edi ; save new stack pointer address
+ rep movsb ; copy old stack to new stack
+ pop edi ; restore new stack pointer address
+
+;
+; Switch to the new kernel stack and return the address of the old kernel
+; stack.
+;
+
+ mov eax,[edx].ThStackBase ; get old kernel stack base address
+ mov ecx,[esp]+SsStkBs + 8 ; get new kernel stack base address
+ mov esi,[esp]+SsStkLm + 8 ; get new kernel stack limit address
+ cli ; disable interrupts
+ mov [edx].ThStackBase,ecx ; set new kernel stack base address
+ mov [edx].ThStackLimit,esi ; set new kernel stack limit address
+ mov byte ptr [edx].ThLargeStack, 1 ; set large stack TRUE
+ mov [edx].ThInitialStack,ecx ; set new initial stack address
+ sub ecx,NPX_FRAME_lENGTH ; compute NPX save area address
+ mov PCR[PcInitialStack],ecx ; set stack check base address
+ mov PCR[PcStackLimit],esi ; set stack check limit address
+ mov edx,PCR[PcTss] ; get address of task switch segment
+ sub ecx,TsV86Gs - TsHardwareSegSs ; bias for missing V86 fields
+ mov [edx].TssEsp0,ecx ; set kernel entry stack address
+ mov esp,edi ; set new stack pointer address
+ sti ;
+ pop edi ; restore string move registers
+ pop esi ;
+ stdRET _KeSwitchKernelStack
+
+stdENDP _KeSwitchKernelStack
+
+ page ,132
+ subttl "Get User Mode Stack Address"
+;++
+;
+; PULONG
+; KiGetUserModeStackAddress (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function returns the address of the user stack address in the
+; current trap frame.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; The address of the user stack address.
+;
+;--
+
+cPublicProc _KiGetUserModeStackAddress, 0
+
+ mov eax,PCR[PcPrcbData + PbCurrentThread] ; get current thread address
+ mov eax,[eax].ThTrapFrame ; get current trap frame address
+ lea eax,[eax].TsHardwareEsp ; get address of stack address
+ stdRET _KiGetUserModeStackAddress
+
+stdENDP _KiGetUserModeStackAddress
+
+ page ,132
+ subttl "Return from User Mode Callback"
+;++
+;
+; NTSTATUS
+; NtCallbackReturn (
+; IN PVOID OutputBuffer OPTIONAL,
+; IN ULONG OutputLength,
+; IN NTSTATUS Status
+; )
+;
+; Routine Description:
+;
+; This function returns from a user mode callout to the kernel
+; mode caller of the user mode callback function.
+;
+; N.B. This function returns to the function that called out to user
+; mode and the KiCallUserMode function calls out to user mode.
+; Therefore, the stack layout must be consistent between the
+; two routines.
+;
+; Arguments:
+;
+; OutputBuffer - Supplies an optional pointer to an output buffer.
+;
+; OutputLength - Supplies the length of the output buffer.
+;
+; Status - Supplies the status value returned to the caller of the
+; callback function.
+;
+; Return Value:
+;
+; If the callback return cannot be executed, then an error status is
+; returned. Otherwise, the specified callback status is returned to
+; the caller of the callback function.
+;
+; N.B. This function returns to the function that called out to user
+; mode is a callout is currently active.
+;
+;--
+
+cPublicProc _NtCallbackReturn, 3
+
+ mov eax,PCR[PcPrcbData + PbCurrentThread] ; get current thread address
+ mov ecx,[eax].ThCallbackStack ; get callback stack address
+ jecxz short CbExit ; if zero, no callback stack present
+
+;
+; Restore the current exception list from the saved exception list in the
+; current trap frame, restore the trap frame and callback stack addresses,
+; store the output buffer address and length, and set the service status.
+;
+
+ mov ebx,[eax].ThTrapFrame ; get current trap frame address
+ mov edx,[ebx].TsExceptionList ; get saved exception list address
+ mov PCR[PcExceptionList],edx ; restore exception list address
+ mov edi,[esp] + 4 ; get output buffer address
+ mov esi,[esp] + 8 ; get output buffer length
+ mov ebp,[esp] + 12 ; get callout service status
+ mov ebx,[ecx].CuOutBf ; get address to store output buffer
+ mov [ebx],edi ; store output buffer address
+ mov ebx,[ecx].CuOutLn ; get address to store output length
+ mov [ebx],esi ; store output buffer length
+ cli ; disable interrupt
+ mov esi,PCR[PcInitialStack] ; get source NPX save area address
+ mov esp,ecx ; trim stack back to callback frame
+ pop ecx ; get previous initial stack address
+ mov [eax].ThInitialStack,ecx ; restore initial stack address
+ sub ecx,NPX_FRAME_LENGTH ; compute destination NPX save area
+ mov edx,[esi].FpControlWord ; copy NPX state to previous frame
+ mov [ecx].FpControlWord,edx ;
+ mov edx,[esi].FpStatusWord ;
+ mov [ecx].FpStatusWord,edx ;
+ mov edx,[esi].FpTagWord ;
+ mov [ecx].FpTagWord,edx ;
+ mov edx,[esi].FpCr0NpxState ;
+ mov [ecx].FpCr0NpxState,edx ;
+ mov edx,PCR[PcTss] ; get address of task switch segment
+ mov PCR[PcInitialStack],ecx ; restore stack check base address
+ sub ecx,TsV86Gs - TsHardwareSegSs ; bias for missing V86 fields
+ mov [edx].TssEsp0,ecx ; restore kernel entry stack address
+ sti ; enable interrupts
+ pop [eax].ThTrapFrame ; restore current trap frame address
+ pop [eax].ThCallbackStack ; restore callback stack address
+ mov eax,ebp ; set callback service status
+
+;
+; Restore nonvolatile registers, clean call parameters from stack, and
+; return to callback caller.
+;
+
+ pop edi ; restore nonvolatile registers
+ pop esi ;
+ pop ebx ;
+ pop ebp ;
+ pop edx ; save return address
+ add esp,8 ; remove parameters from stack
+ jmp edx ; return to callback caller
+
+;
+; No callback is currently active.
+;
+
+CbExit: mov eax,STATUS_NO_CALLBACK_ACTIVE ; set service status
+ stdRET _NtCallBackReturn
+
+stdENDP _NtCallbackReturn
+
+_TEXT ends
+ end
diff --git a/private/ntos/ke/i386/clockint.asm b/private/ntos/ke/i386/clockint.asm
new file mode 100644
index 000000000..4784f4dd9
--- /dev/null
+++ b/private/ntos/ke/i386/clockint.asm
@@ -0,0 +1,881 @@
+ title "Interval Clock Interrupt"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; clockint.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to field and process the
+; interval clock interrupt.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 12-Jan-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+; bryanwi 20-Sep-90
+;
+; Add KiSetProfileInterval, KiStartProfileInterrupt,
+; KiStopProfileInterrupt procedures.
+; KiProfileInterrupt ISR.
+; KiProfileList, KiProfileLock are delcared here.
+;
+; shielint 10-Dec-90
+; Add performance counter support.
+; Move system clock to irq8, ie we now use RTC to generate system
+; clock. Performance count and Profile use timer 1 counter 0.
+; The interval of the irq0 interrupt can be changed by
+; KiSetProfileInterval. Performance counter does not care about the
+; interval of the interrupt as long as it knows the rollover count.
+; Note: Currently I implemented 1 performance counter for the whole
+; i386 NT. It works on UP and SystemPro.
+;
+;--
+
+.386p
+ .xlist
+KERNELONLY equ 1
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+include mac386.inc
+ .list
+
+ EXTRNP Kei386EoiHelper
+ EXTRNP HalRequestSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP _HalEndSystemInterrupt,2,IMPORT
+ extrn _KeTimeIncrement:DWORD
+ extrn _KeMaximumIncrement:DWORD
+ extrn _KeTickCount:DWORD
+ extrn _KeTimeAdjustment:DWORD
+ extrn _KiAdjustDpcThreshold:DWORD
+ extrn _KiIdealDpcRate:DWORD
+ extrn _KiMaximumDpcQueueDepth:DWORD
+ extrn _KiTickOffset:DWORD
+ extrn _KiTimerTableListHead:DWORD
+ extrn _KiTimerExpireDpc:DWORD
+ extrn _KiTimeUpdateNotifyRoutine:DWORD
+ extrn _KiProfileListHead:DWORD
+ extrn _KiProfileLock:DWORD
+ extrn _KiProfileInterval:DWORD
+ extrn _KdDebuggerEnabled:BYTE
+ EXTRNP _DbgBreakPoint
+ EXTRNP _DbgBreakPointWithStatus,1
+ EXTRNP _KdPollBreakIn
+ EXTRNP _KiDeliverApc,3
+ extrn _KeI386MachineType:DWORD
+
+ifdef NT_UP
+ LOCK_INC equ inc
+else
+ LOCK_INC equ lock inc
+endif
+
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+public ProfileCount
+ProfileCount DD 0
+
+_DATA ends
+
+ page ,132
+ subttl "Update System Time"
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+;++
+;
+; VOID
+; KeUpdateSystemTime (
+; IN KIRQL PreviousIrql,
+; IN KTRAP_FRAME TrapFrame
+; )
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt generated by CLOCK2.
+; Its function is to update the system time and check to determine if a timer
+; has expired.
+;
+; N.B. This routine is executed on a single processor in a multiprocess
+; system. The remainder of the processors only execute the quantum end
+; and runtime update code.
+;
+; N.B. This routine is not called, but directly jumped to. Thus, there
+; is no return address. It returns via the INTERRUPT_EXIT macro.
+;
+; Arguments:
+;
+; PreviousIrql (esp) - supplies previous irql of system
+;
+; HardwareVector (esp+4) - supplies hardware vector for EndSystemInterrupt
+;
+; TrapFrame (esp+8) - supplies base of trap frame
+;
+; EAX is the TimeIncrement value
+;
+; EBP is a pointer to the trap frame
+;
+;
+; Environment:
+;
+; IRQL = CLOCK_LEVEL
+;
+; Return Value:
+;
+; None.
+;
+;--
+cPublicProc _KeUpdateSystemTime ,0
+
+.FPO (2, 0, 0, 0, 0, 1) ; treat params as locals since functions is JMPed too
+
+if DBG
+ cmp byte ptr PCR[PcPrcbData+PbSkipTick], 0
+ jnz kust_skiptick
+endif
+
+;
+; Update interrupt time.
+;
+; N.B. The interrupt time is updated in a very strict manner so that an
+; interlock does not have to be used in an MP system to read time.
+;
+
+ mov ecx,USER_SHARED_DATA ; set address of user shared data
+ mov edi,[ecx].UsInterruptTime+0 ; get low interrupt time
+ mov esi,[ecx].UsInterruptTime+4 ; get high interrupt time
+ add edi,eax ; add time increment
+ adc esi,0 ; propagate carry
+ mov [ecx].UsInterruptTime+8,esi ; store high 2 interrupt time
+ mov [ecx].UsInterruptTime+0,edi ; store low interrupt time
+ mov [ecx].UsInterruptTime+4,esi ; store high 1 interrupt time
+
+ sub _KiTickOffset,eax ; subtract time increment
+ mov eax,_KeTickCount+0 ; get low tick count
+ mov ebx,eax ; copy low tick count
+ jg kust10 ; if greater, not complete tick
+
+;
+; Update system time.
+;
+; N.B. The system time is updated in a very strict manner so that an
+; interlock does not have to be used in an MP system to read time.
+;
+
+ mov ebx,USER_SHARED_DATA ; set address of user shared data
+ mov ecx,[ebx].UsSystemTime+0 ; get low interrupt time
+ mov edx,[ebx].UsSystemTime+4 ; get high interrupt time
+ add ecx,_KeTimeAdjustment ; add time increment
+ adc edx,0 ; propagate carry
+ mov [ebx].UsSystemTime+8,edx ; store high 2 interrupt time
+ mov [ebx].UsSystemTime+0,ecx ; store low interrupt time
+ mov [ebx].UsSystemTime+4,edx ; store high 1 interrupt time
+ mov ebx,eax ; restore low tick count
+
+;
+; Update tick count.
+;
+; N.B. The tick count is updated in a very strict manner so that an
+; interlock does not have to be used in an MP system to read count.
+;
+
+ mov ecx,eax ; copy low tick count
+ mov edx,_KeTickCount+4 ; get high tick count
+ add ecx,1 ; increment tick count
+ adc edx,0 ; propagate carry
+ mov _KeTickCount+8,edx ; store high 2 tick count
+ mov _KeTickCount+0,ecx ; store low tick count
+ mov _KeTickCount+4,edx ; store high 1 tick count
+ mov USERDATA[UsTickCountLow], ecx
+
+if 0
+ ; debug code
+ push eax
+ mov edx, esi
+ mov eax, edi ; (eax:edx) = InterruptTime
+ mov ecx, _KeMaximumIncrement
+ div ecx
+ cmp al, bl ; same bucket?
+ je short @f
+ int 3 ; no - stop
+@@:
+ pop eax
+endif
+
+;
+; Check to determine if a timer has expired.
+; (edi:esi) = KiInterruptTime
+; (eax) = KeTickCount.LowPart
+; (ebx) = KeTickCount.LowPart
+;
+
+ and eax,TIMER_TABLE_SIZE-1 ; isolate current hand value
+ lea ecx,_KiTimerTableListHead[eax*8] ; get listhead addrees
+ mov edx,[ecx] ; get first entry address
+ cmp ecx,edx ; check if list is empry
+ je short kust5 ; if equal, list is empty
+ cmp esi,[edx].TiDueTime.TmHighTime-TiTimerListEntry ; compare high
+ jb short kust5 ; if below, timer has not expired
+ ja short kust15 ; if above, timer has expired
+ cmp edi,[edx].TiDueTime.TmLowTime-TiTimerListEntry ; compare low
+ jae short kust15 ; if above or equal, time has expired
+kust5: inc eax ; advance hand value to next entry
+ inc ebx
+
+;
+; Check to determine if a timer has expired.
+; (edi:esi) = KiInterruptTime
+; (eax) = bucket
+; (ebx) = KeTickCount.LowPart
+;
+
+kust10: and eax,TIMER_TABLE_SIZE-1 ; isolate current hand value
+ lea ecx,_KiTimerTableListHead[eax*8] ; get listhead addrees
+ mov edx,[ecx] ; get first entry address
+ cmp ecx,edx ; check if list is empry
+ je kustxx ; if equal, list is empty
+ cmp esi,[edx].TiDueTime.TmHighTime-TiTimerListEntry ; compare high
+ jb kustxx ; if below, timer has not expired
+ ja short kust15 ; if above, timer has expired
+ cmp edi,[edx].TiDueTime.TmLowTime-TiTimerListEntry ; compare low
+ jb kustxx ; if below, timer has not expired
+
+kust15:
+;
+; Timer has expired, put timer expiration DPC in the current processor's DPC
+; queue.
+;
+; (ebx) = KeTickCount.LowPart
+;
+
+ mov ecx,PCR[PcPrcb] ; get processor control block address
+ lea eax,_KiTimerExpireDpc+DpDpcListEntry ; get list entry address
+ lea edx,[ecx]+PbDpcLock ; get DPC lock address
+ cmp dword ptr [eax]+(DpLock-DpDpcListEntry), 0H ; check if inserted
+ jnz kustxx ; if nz, DPC already inserted
+
+kust20: cli
+ ACQUIRE_SPINLOCK edx, kust60
+
+ inc dword ptr [ecx].PbDpcQueueDepth ; increment DPC queue depth
+ mov dword ptr [eax]+(DpLock-DpDpcListEntry), edx ; set lock address
+ mov [eax]+(DpSystemArgument1-DpDpcListEntry),ebx ; pass tick count
+ add ecx,PbDpcListHead ; compute DPC listhead address
+ mov ebx,[ecx]+LsBlink ; get address of last entry in list
+ mov [ecx]+LsBlink, eax ; set new address of last entry
+ mov [ebx]+LsFlink, eax ; set forward link in old last entry
+ mov [eax]+LsFlink, ecx ; set forward link in new last entry
+ mov [eax]+LsBlink, ebx ; set backward link in new last entry
+
+ RELEASE_SPINLOCK edx
+ sti ; enable interrupt
+
+; request dispatch interrupt
+
+ mov ecx, DISPATCH_LEVEL
+ fstCall HalRequestSoftwareInterrupt
+
+kustxx:
+if DEVL
+ cmp _KdDebuggerEnabled, 0
+ jnz short kust45
+kust30:
+endif
+ cmp _KiTickOffset,0 ; check if full tick
+ jg short Kust40 ; if not less, not a full tick
+
+ mov eax,_KeMaximumIncrement ; get maximum time incrmeent
+ add _KiTickOffset,eax ; add maximum tine to residue
+
+;
+; call KeUpdateRunTime to do the acutal work
+;
+
+; TOS const PreviousIrql
+ push [esp]
+ call _KeUpdateRunTime@4
+
+;
+; Do interrupt exit processing
+;
+
+ INTERRUPT_EXIT
+
+kust40:
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+ INTERRUPT_EXIT
+
+if DEVL
+kust45:
+ stdCall _KdPollBreakIn
+ or al,al
+ jz short kust30
+ stdCall _DbgBreakPointWithStatus,<DBG_STATUS_CONTROL_C>
+ jmp short kust30
+endif
+
+if DBG
+kust_skiptick:
+ mov byte ptr PCR[PcPrcbData+PbSkipTick], 0
+ jmp short kust40
+endif
+
+;
+; Lock is currently owned; spin until free and then attempt to acquire
+; lock again.
+;
+
+ALIGN 4
+kust60: sti ; spin with interrupts enabled
+ SPIN_ON_SPINLOCK edx, kust20,,DbgMp
+
+stdENDP _KeUpdateSystemTime
+
+
+ page ,132
+ subttl "Update Thread and Process Runtime"
+;++
+;
+; Routine Description:
+;
+; This routines does the actual work to update the runtime of the current
+; thread, update the runtime of the current thread's process, and
+; decrement the current thread's quantum.
+;
+; It also updates the system global counters for user and kernel mode time.
+;
+; It increments InterruptCount so that clock ticks get counted as
+; interrupts.
+;
+; Arguments:
+;
+; esp+4 constant PreviousIrql
+;
+; ebp MUST point to the machine state frame.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KeUpdateRunTime ,1
+cPublicFpo 1, 1
+
+ mov eax, PCR[PcSelfPcr]
+if DBG
+ cmp byte ptr [eax]+PcPrcbData+PbSkipTick, 0
+ jnz kutp_skiptick
+endif
+ push ebx ; we will destroy ebx
+ inc dword ptr [eax]+PcPrcbData+PbInterruptCount
+ mov ebx, [eax]+PcPrcbData+PbCurrentThread ; (ebx)->current thread
+ mov ecx, ThApcState+AsProcess[ebx]
+ ; (ecx)->current thread's process
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jne Kutp20 ; if ne, user mode
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; test if prev mode was kernel
+ jne Kutp20 ; if ne, user mode
+
+;
+; Update the total time spent in kernel mode
+;
+
+ mov edx, 0 ; set kernel mode
+ inc dword ptr [eax].PcPrcbData.PbKernelTime
+ cmp byte ptr [esp+8], DISPATCH_LEVEL
+ jc short Kutp4 ; OldIrql<2, then kernel
+ ja short Kutp3 ; OldIrql>2, then interrupt
+
+ cmp dword ptr PCR[PcPrcbData.PbDpcRoutineActive], 0
+ jz short Kutp4 ; Executing Dpc?, no then thread time
+
+ inc dword ptr [eax].PcPrcbData.PbDpcTime
+ jmp Kutp51
+
+ALIGN 4
+Kutp3:
+ inc dword ptr [eax].PcPrcbData.PbInterruptTime
+ jmp Kutp51
+
+ALIGN 4
+Kutp4:
+
+;
+; Update the time spent in kernel mode for the current thread and the current
+; thread's process.
+;
+ inc dword ptr [ebx]+ThKernelTime
+
+ LOCK_INC dword ptr [ecx]+PrKernelTime
+
+ jmp Kutp50
+
+
+;
+; Update total time spent in user mode
+;
+
+ALIGN 4
+Kutp20:
+ mov edx, 1 ; set user mode
+ inc dword ptr [eax].PcPrcbData.PbUserTime
+;
+; Update the time spend in user mode for the current thread and the current
+; thread's process.
+;
+
+ inc dword ptr [ebx]+ThUserTime
+
+ LOCK_INC dword ptr [ecx]+PrUserTime
+
+;
+; Notify registered callout routine of update time.
+;
+; N.B. The register edx contains the processor mode.
+;
+
+ALIGN 4
+Kutp50: ;
+
+ifndef NT_UP
+
+ cmp _KiTimeUpdateNotifyRoutine, 0 ; check for callout routine
+ je short Kutp51 ; if eq, no callout routine registered
+ mov ecx, [ebx].EtCid.CidUniqueThread ; set current thread unique id
+ call [_KiTimeUpdateNotifyRoutine] ; notify callout routine
+ mov eax, PCR[PcSelfPcr] ; restore PCR address
+
+endif
+
+;
+; Update the DPC request rate which is computed as the average between
+; the previous rate and the current rate.
+;
+
+ALIGN 4
+Kutp51: mov ecx, [eax].PcPrcbData.PbDpcCount ; get current DPC count
+ mov edx, [eax].PcPrcbData.PbDpcLastCount ; get last DPC count
+ mov [eax].PcPrcbData.PbDpcLastCount, ecx ; set last DPC count
+ sub ecx, edx ; compute count during interval
+ add ecx, [eax].PcPrcbData.PbDpcRequestRate ; compute sum
+ shr ecx, 1 ; average current and last
+ mov [eax].PcPrcbData.PbDpcRequestRate, ecx ; set new DPC request rate
+
+;
+; If the current DPC queue depth is not zero, a DPC routine is not active,
+; and a DPC interrupt has not been requested, then request a dispatch
+; interrupt, decrement the maximum DPC queue depth, and reset the threshold
+; counter if appropriate.
+;
+
+ cmp dword ptr [eax].PcPrcbData.PbDpcQueueDepth, 0 ; check queue depth
+ je short Kutp53 ; if eq, DPC queue depth is zero
+ cmp dword ptr [eax].PcPrcbData.PbDpcRoutineActive, 0 ; check if DPC active
+ jne short Kutp53 ; if ne, DPC routine active
+ cmp dword ptr [eax].PcPrcbData.PbDpcInterruptRequested, 0 ; check if interrupt
+ jne short Kutp53 ; if ne, DPC routine active
+ mov ecx, DISPATCH_LEVEL ; request a dispatch interrupt
+ fstCall HalRequestSoftwareInterrupt ;
+ mov eax, PCR[PcSelfPcr] ; restore address of current PCR
+ mov ecx, [eax].PcPrcbData.PbDpcRequestRate ; get DPC request rate
+ mov edx, _KiAdjustDpcThreshold ; reset initial threshold counter
+ mov [eax].PcPrcbData.PbAdjustDpcThreshold, edx ;
+ cmp ecx, _KiIdealDpcRate ; test if current rate less than ideal
+ jge short Kutp55 ; if ge, rate greater or equal ideal
+ cmp [eax].PcPrcbData.PbMaximumDpcQueueDepth, 1 ; check if depth one
+ je short Kutp55 ; if eq, maximum depth is one
+ dec dword ptr [eax].PcPrcbData.PbMaximumDpcQueueDepth ; decrement depth
+ jmp short Kutp55 ;
+
+;
+; The DPC queue is empty or a DPC routine is active or a DPC interrupt
+; has been requested. Count down the adjustment threshold and if the
+; count reaches zero, then increment the maximum DPC queue depth, but
+; no above the initial value and reset the adjustment threshold value.
+;
+
+Kutp53: dec dword ptr [eax].PcPrcbData.PbAdjustDpcThreshold ; decrement threshold
+ jnz short Kutp55 ; if nz, threshold not zero
+ mov ecx, _KiAdjustDpcThreshold ; reset initial threshold counter
+ mov [eax].PcprcbData.PbAdjustDpcThreshold, ecx ;
+ mov ecx, _KiMaximumDpcQueueDepth ; get maximum DPC queue depth
+ cmp ecx, [eax].PcPrcbData.PbMaximumDpcQueueDepth ; check depth
+ je short Kutp55 ; if eq, aleady a maximum level
+ inc dword ptr [eax].PcPrcbData.PbMaximumDpcQueueDepth ; increment maximum depth
+
+;
+; Decrement current thread quantum and check to determine if a quantum end
+; has occurred.
+;
+
+ALIGN 4
+Kutp55: sub byte ptr [ebx]+ThQuantum, CLOCK_QUANTUM_DECREMENT ; decrement quantum
+ jg Kutp75 ; if > 0, time remaining on quantum
+
+;
+; Set quantum end flag and initiate a dispather interrupt on the current
+; processor.
+;
+
+ cmp ebx,[eax].PcPrcbData.PbIdleThread ; check if idle thread
+ jz Kutp75 ; if z, then idle thread
+ mov [eax].PcPrcbData.PbQuantumEnd, esp ; set quantum end indicator
+ mov ecx, DISPATCH_LEVEL ; request dispatch interrupt
+ fstCall HalRequestSoftwareInterrupt ;
+Kutp75: ;
+ pop ebx ;
+ stdRET _KeUpdateRunTime ;
+
+if DBG
+kutp_skiptick:
+ mov byte ptr [eax]+PcPrcbData+PbSkipTick, 0
+ stdRET _KeUpdateRunTime
+endif
+
+stdENDP _KeUpdateRunTime
+
+
+;++
+;
+; PROFILING SUPPORT
+;
+;--
+
+
+;++
+;
+; VOID
+; KeProfileInterrupt (
+; IN PKTRAP_FRAME TrapFrame,
+; )
+;
+; Routine Description:
+;
+; This procedure is the ISR for the profile sampling interrupt,
+; which for x86 machines is driven off the 8254 timer1 channel 0.
+;
+; The procedure scans the list of profile objects, looking for those
+; which match the address space and return program counter captured
+; at entry. For each object that does match, the counter in its
+; profile buffer matching the bucket the PC falls into is computed,
+; and that counter is incremented.
+;
+; N.B. This routine is executed on all processors in a multiprocess
+; system.
+;
+; Arguments:
+;
+; Return Address (esp)
+;
+; TrapFrame (esp+4) - supplies pointer to profile trap frame
+;
+; Environment:
+;
+; IRQL = KiProfileIrql
+;
+;
+; Return Value:
+;
+; None.
+;
+; WARNING: Uses ALL registers
+;
+;--
+
+cPublicProc _KeProfileInterrupt ,1
+;
+; rearrange arguments to pass a source of 0 to KeProfileInterruptWithSource
+;
+ pop eax ; return code in eax
+ pop ebx ; trap frame in ebx
+ push 0 ; push source of 0 (ProfileTime)
+ push ebx ; push trap frame
+ push eax ; push return address
+ jmp short _KeProfileInterruptWithSource@8
+stdENDP _KeProfileInterrupt
+
+;++
+;
+; VOID
+; KeProfileInterruptWithSource (
+; IN PKTRAP_FRAME TrapFrame,
+; IN KPROFILE_SOURCE ProfileSource
+; )
+;
+; Routine Description:
+;
+; This procedure is the ISR for the multiple-source profile interrupt.
+;
+; Since no x86 HAL currently implements any source other than the
+; clock interrupt, this routine is just a stub that calls KeProfileInterrupt
+;
+; Arguments:
+;
+; Return Address (esp)
+;
+; TrapFrame (esp+4) - supplies pointer to profile trap frame
+;
+; ProfileSource (esp+8) - supplies source of profile interrupt
+;
+; Environment:
+;
+; IRQL = KiProfileIrql
+;
+;
+; Return Value:
+;
+; None.
+;
+; WARNING: Uses ALL registers
+;
+;--
+cPublicProc _KeProfileInterruptWithSource,2
+
+kipieip equ <dword ptr [ebp+TsEip]>
+kipsegcs equ <word ptr [ebp+TsSegCs]>
+kipeflags equ <dword ptr [ebp+TsEFlags]>
+
+ mov ebp, dword ptr [esp+4] ; (ebp)-> trap frame
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+
+ifndef NT_UP
+ lea eax,_KiProfileLock
+kipi05: ACQUIRE_SPINLOCK eax,kipi96
+endif
+
+;
+; Update profile data
+;
+; NOTE:
+; System and Process update loops are duplicates, to avoid overhead
+; of call instruction in what could be very high freq. interrupt.
+; be sure to update both loops for changes.
+;
+; NOTE:
+; The process loop contains code to update segment profile objects.
+; This code is not present in the system loop, because we do not
+; allow attachment of profile objects for non-flat segments on a
+; system wide basis.
+;
+; NOTE:
+; Profiling in V86 mode is handled by converting the CS:IP value to
+; a linear address (CS<<4 + IP)
+;
+
+ inc ProfileCount ; total number of hits
+
+;
+; Update system profile entries
+;
+
+ mov ebx, kipieip
+ mov edx,offset FLAT:_KiProfileListHead
+ mov esi,[edx].LsFlink ; (esi) -> profile object
+ifndef NT_UP
+ mov edi, PCR[PcSetMember] ; (edi) = current processor
+endif
+ mov ecx, [esp+8] ; (cx) = profile source
+ cmp esi,edx
+ je kipi30 ; end of system list, go do process
+
+;
+; (ebx) = sample program counter
+; (esi) -> profile object
+;
+
+ALIGN 4
+kipi10: cmp ebx,[esi+PfRangeBase-PfProfileListEntry] ; >= base?
+ jb kipi20 ; no, skip entry
+ cmp ebx,[esi+PfRangeLimit-PfProfileListEntry] ; < limit?
+ jae kipi20 ; no, skip entry
+ cmp cx,word ptr [esi+PfSource-PfProfileListEntry] ; == source?
+ jne kipi20 ; no, skip entry
+ifndef NT_UP
+ test edi,[esi+PfAffinity-PfProfileListEntry] ; affinity match?
+ jz kipi20 ; no, skip entry
+endif
+
+;
+; RangeBase <= program counter < RangeLimit, we have a hit
+;
+
+ sub ebx,[esi+PfRangeBase-PfProfileListEntry] ; (ebx) = offset in profile range
+ mov cl,[esi+PfBucketShift-PfProfileListEntry]
+ shr ebx,cl
+ and ebx,NOT 3 ; (ebx) = offset of counter for bucket
+ mov edi,[esi+PfBuffer-PfProfileListEntry] ; (edi) -> buffer
+ inc dword ptr [edi+ebx] ; record hit
+ mov ebx, kipieip ; (ebx) = sample pc
+ mov ecx, [esp+8] ; (cx) = profile source
+ifndef NT_UP
+ mov edi, PCR[PcSetMember] ; (edi) = current processor
+endif
+
+
+;
+; Go to next entry
+;
+
+ALIGN 4
+kipi20: mov esi,[esi].LsFlink ; (esi) -> profile object
+ cmp esi,edx
+ jne kipi10 ; not end of list, repeat
+
+
+;
+; Update process profile entries
+; (ebx) = sample program counter
+;
+
+ALIGN 4
+kipi30: mov eax,PCR[PcPrcbData+PbCurrentThread] ; (eax)-> current thread
+ mov eax,ThApcState+AsProcess[eax] ; (eax)-> current process
+ lea edx,[eax]+PrProfileListHead ; (edx)-> listhead
+ mov esi,[edx].LsFlink ; (esi)-> profile object
+ cmp esi,edx
+ je kipi60 ; process list end, return
+
+;
+; Check for 16 bitness
+;
+ movzx ecx,word ptr kipsegcs
+ test kipeflags,EFLAGS_V86_MASK
+ jnz kipi100 ; convert cs:ip to linear
+
+ cmp cx,KGDT_R0_CODE
+ je short kipi40
+
+ cmp cx,KGDT_R3_CODE or RPL_MASK
+ jne kipi110
+
+;
+; (ebx) = sample program counter
+; (esi) -> profile object
+;
+
+ALIGN 4
+kipi40: cmp [esi+PfSegment-PfProfileListEntry],word ptr 0 ; flat object?
+ jne kipi50 ; no, skip entry
+ cmp ebx,[esi+PfRangeBase-PfProfileListEntry] ; >= base?
+ jb kipi50 ; no, skip entry
+ cmp ebx,[esi+PfRangeLimit-PfProfileListEntry] ; < limit?
+ jae kipi50 ; no, skip entry
+ mov ecx, [esp+8] ; (cx) = profile source
+ cmp cx,word ptr [esi+PfSource-PfProfileListEntry] ; == source?
+ jne kipi50 ; no, skip entry
+ifndef NT_UP
+ mov edi,PCR[PcSetMember] ; (edi) = set member
+ test edi,[esi+PfAffinity-PfProfileListEntry] ; affinity match?
+ jz kipi50 ; no, skip entry
+endif
+
+
+;
+; RangeBase <= program counter < RangeLimit, we have a hit
+;
+
+ sub ebx,[esi+PfRangeBase-PfProfileListEntry] ; (ebx) = offset in profile range
+ mov cl,[esi+PfBucketShift-PfProfileListEntry]
+ shr ebx,cl
+ and ebx,NOT 3 ; (ebx) = offset of counter for bucket
+ mov edi,[esi+PfBuffer-PfProfileListEntry] ; (edi) -> buffer
+ inc dword ptr [edi+ebx] ; record hit
+ mov ebx, kipieip ; (ebx) = sample pc
+ mov ecx, [esp+8] ; (cx) = profile source
+
+;
+; Go to next entry
+;
+
+ALIGN 4
+kipi50: mov esi,[esi].LsFlink ; (esi) -> profile object
+ cmp esi,edx
+ jne kipi40 ; not end of list, repeat
+
+ALIGN 4
+kipi60:
+
+ifndef NT_UP
+ lea eax,_KiProfileLock
+ RELEASE_SPINLOCK eax
+endif
+ stdRet _KeProfileInterruptWithSource
+
+ifndef NT_UP
+ALIGN 4
+kipi96: SPIN_ON_SPINLOCK eax,kipi05,,DbgMp
+endif
+
+ALIGN 4
+kipi100:
+ shl ecx,4 ; segment -> paragraph
+ add ebx,ecx ; paragraph offset -> linear
+ jmp kipi40
+
+;
+; Update segment profile objects
+;
+
+;
+; (ebx) = sample program counter
+; (esi) -> profile object
+;
+
+ALIGN 4
+kipi110:
+ cmp [esi+PfSegment-PfProfileListEntry],ecx ; This segment?
+ jne kipi120 ; no, skip entry
+ cmp ebx,[esi+PfRangeBase-PfProfileListEntry] ; >= base?
+ jb kipi120 ; no, skip entry
+ cmp ebx,[esi+PfRangeLimit-PfProfileListEntry] ; < limit?
+ jae kipi120 ; no, skip entry
+ mov ecx, [esp+8] ; (cx) = profile source
+ cmp cx,word ptr [esi+PfSource-PfProfileListEntry] ; == source?
+ jne kipi120 ; no, skip entry
+ifndef NT_UP
+ mov edi,PCR[PcSetMember] ; (edi) = set member
+ test edi,[esi+PfAffinity-PfProfileListEntry] ; affinity match?
+ jnz kipi120 ; no, skip entry
+endif
+
+;
+; RangeBase <= program counter < RangeLimit, we have a hit
+;
+
+ sub ebx,[esi+PfRangeBase-PfProfileListEntry] ; (ebx) = offset in profile range
+ mov cl,[esi+PfBucketShift-PfProfileListEntry]
+ shr ebx,cl
+ and ebx,NOT 3 ; (ebx) = offset of counter for bucket
+ mov edi,[esi+PfBuffer-PfProfileListEntry] ; (edi) -> buffer
+ inc dword ptr [edi+ebx] ; record hit
+ mov ebx, kipieip ; (ebx) = sample pc
+ mov cx,kipsegcs ; ecx = sample cs
+
+;
+; Go to next entry
+;
+
+ALIGN 4
+kipi120:
+ mov esi,[esi].LsFlink ; (esi) -> profile object
+ cmp esi,edx
+ jne kipi110 ; not end of list, repeat
+
+ jmp kipi60
+
+stdENDP _KeProfileInterruptWithSource
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/cpu.asm b/private/ntos/ke/i386/cpu.asm
new file mode 100644
index 000000000..64b031484
--- /dev/null
+++ b/private/ntos/ke/i386/cpu.asm
@@ -0,0 +1,1037 @@
+ title "Processor type and stepping detection"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; cpu.asm
+;
+; Abstract:
+;
+; This module implements the assembley code necessary to determine
+; cpu type and stepping information.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 28-Oct-1991.
+; Some of the code is extracted from Cruiser (mainly,
+; the code to determine 386 stepping.)
+;
+; Environment:
+;
+; 80x86
+;
+; Revision History:
+;
+;--
+
+ .xlist
+include i386\cpu.inc
+include ks386.inc
+include callconv.inc
+ .list
+
+;
+; constant for i386 32-bit multiplication test
+;
+
+MULTIPLIER equ 00000081h
+MULTIPLICAND equ 0417a000h
+RESULT_HIGH equ 00000002h
+RESULT_LOW equ 0fe7a000h
+
+;
+; Constants for Floating Point test
+;
+
+REALLONG_LOW equ 00000000
+REALLONG_HIGH equ 3FE00000h
+PSEUDO_DENORMAL_LOW equ 00000000h
+PSEUDO_DENORMAL_MID equ 80000000h
+PSEUDO_DENORMAL_HIGH equ 0000h
+
+.386p
+
+INIT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+
+;++
+;
+; USHORT
+; KiSetProcessorType (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function determines type of processor (80486, 80386),
+; and it's corrisponding stepping. The results are saved in
+; the current processor's PRCB.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Prcb->CpuType
+; 3, 4, 5, ... 3 = 386, 4 = 486, etc..
+;
+; Prcb->CpuStep is encoded as follows:
+; lower byte as stepping #
+; upper byte as stepping letter (0=a, 1=b, 2=c, ...)
+;
+; (ax) = x86h or 0 if unrecongnized processor.
+;
+;--
+cPublicProc _KiSetProcessorType,0
+
+ mov byte ptr fs:PcPrcbData.PbCpuID, 0
+
+ push edi
+ push esi
+ push ebx ; Save C registers
+ mov eax, cr0
+ push eax
+ pushfd ; save Cr0 & flags
+
+ pop ebx ; Get flags into eax
+ push ebx ; Save original flags
+
+ mov ecx, ebx
+ xor ecx, EFLAGS_ID ; flip ID bit
+ push ecx
+ popfd ; load it into flags
+ pushfd ; re-save flags
+ pop ecx ; get flags into eax
+ cmp ebx, ecx ; did bit stay flipped?
+ jne short cpu_has_cpuid ; Yes, go use CPUID
+
+cpuid_unsupported:
+ pop ebx ; Get flags into eax
+ push ebx ; Save original flags
+
+ mov ecx, ebx
+ xor ecx, EFLAGS_AC ; flip AC bit
+ push ecx
+ popfd ; load it into flags
+ pushfd ; re-save flags
+ pop ecx ; get flags into eax
+ cmp ebx, ecx ; did bit stay flipped?
+ je short cpu_is_386 ; No, then this is a 386
+
+cpu_is_486:
+ mov byte ptr fs:PcPrcbData.PbCpuType, 4h ; Save CPU Type
+ call Get486Stepping
+ jmp cpu_save_stepping
+
+cpu_is_386:
+ mov byte ptr fs:PcPrcbData.PbCpuType, 3h ; Save CPU Type
+ call Get386Stepping
+ jmp cpu_save_stepping
+
+cpu_has_cpuid:
+ or ebx, EFLAGS_ID
+ push ebx
+ popfd ; Make sure ID bit is set
+
+ mov ecx, fs:PcIdt ; Address of IDT
+ push dword ptr [ecx+30h] ; Save Trap06 handler incase
+ push dword ptr [ecx+34h] ; the CPUID instruction faults
+
+ mov eax, offset CpuIdTrap6Handler
+ mov word ptr [ecx+30h], ax ; Set LowWord
+ shr eax, 16
+ mov word ptr [ecx+36h], ax ; Set HighWord
+
+ mov eax, 0 ; argument to CPUID
+.586p
+ cpuid ; Uses eax, ebx, ecx, edx
+.386p
+
+ mov ecx, fs:PcIdt ; Address of IDT
+ pop dword ptr [ecx+34h] ; restore trap6 handler
+ pop dword ptr [ecx+30h]
+
+ cmp eax, 3 ; check for A step of P5
+ jg short cpu_is_p5a ; A step returned family&step here
+
+ cmp eax, 1 ; make sure level 1 is supported
+ jc short cpuid_unsupported ; no, then punt
+
+ mov eax, 1 ; get the family and stepping
+ db 0fh, 0a2h
+
+ mov ebx, eax
+
+ and eax, 0F0h ; (eax) = Model
+ shl eax, 4
+ mov al, bl
+ and eax, 0F0Fh ; (eax) = Model[15:8] | Step[7:0]
+
+ and ebx, 0700h ; (bh) = CpuType
+
+ mov byte ptr fs:PcPrcbData.PbCpuID, 1 ; Has ID support
+ mov byte ptr fs:PcPrcbData.PbCpuType, bh ; Save CPU Type
+ jmp short cpu_save_stepping
+
+cpuid_trap:
+ mov ecx, fs:PcIdt ; Address of IDT
+ pop dword ptr [ecx+34h] ; restore trap6 handler
+ pop dword ptr [ecx+30h]
+ jmp cpuid_unsupported ; Go get processor information
+
+cpu_is_p5a:
+ mov byte ptr fs:PcPrcbData.PbCpuType, 5h ; CPU Type = P5
+ xor eax, eax
+
+cpu_save_stepping:
+ mov word ptr fs:PcPrcbData.PbCpuStep, ax ; Save CPU Stepping
+ popfd ; Restore flags
+ pop eax
+ mov cr0, eax
+ pop ebx
+ pop esi
+ pop edi
+ stdRET _KiSetProcessorType
+
+stdENDP _KiSetProcessorType
+
+;++
+;
+; BOOLEAN
+; CpuIdTrap6 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; Temporary int 6 handler - assumes the cause of the exception was the
+; attempted CPUID instruction.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; none.
+;
+;--
+
+CpuIdTrap6Handler proc
+
+ mov [esp].IretEip,offset cpuid_trap
+ iretd
+
+CpuIdTrap6Handler endp
+
+
+;++
+;
+; USHORT
+; Get386Stepping (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function determines cpu stepping for i386 CPU stepping.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; [ax] - Cpu stepping.
+; 0 = A, 1 = B, 2 = C, ...
+;
+;--
+
+ public Get386Stepping
+Get386Stepping proc
+
+ call MultiplyTest ; Perform mutiplication test
+ jnc short G3s00 ; if nc, muttest is ok
+ mov ax, 0
+ ret
+G3s00:
+ call Check386B0 ; Check for B0 stepping
+ jnc short G3s05 ; if nc, it's B1/later
+ mov ax, 100h ; It is B0/earlier stepping
+ ret
+
+G3s05:
+ call Check386D1 ; Check for D1 stepping
+ jc short G3s10 ; if c, it is NOT D1
+ mov ax, 301h ; It is D1/later stepping
+ ret
+
+G3s10:
+ mov ax, 101h ; assume it is B1 stepping
+ ret
+
+Get386Stepping endp
+
+;++
+;
+; USHORT
+; Get486Stepping (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function determines cpu stepping for i486 CPU type.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; [ax] - Cpu stepping. For example, [ax] = D0h for D0 stepping.
+;
+;--
+
+ public Get486Stepping
+Get486Stepping proc
+
+ call Check486AStepping ; Check for A stepping
+ jnc short G4s00 ; if nc, it is NOT A stepping
+
+ mov ax, 0 ; set to A stepping
+ ret
+
+G4s00: call Check486BStepping ; Check for B stepping
+ jnc short G4s10 ; if nc, it is NOT a B stepping
+
+ mov ax, 100h ; set to B stepping
+ ret
+
+;
+; Before we test for 486 C/D step, we need to make sure NPX is present.
+; Because the test uses FP instruction to do the detection.
+;
+G4s10:
+ call _KiIsNpxPresent ; Check if cpu has coprocessor support?
+ or ax, ax
+ jz short G4s15 ; it is actually 486sx
+
+ call Check486CStepping ; Check for C stepping
+ jnc short G4s20 ; if nc, it is NOT a C stepping
+G4s15:
+ mov ax, 200h ; set to C stepping
+ ret
+
+G4s20: mov ax, 300h ; Set to D stepping
+ ret
+
+Get486Stepping endp
+
+;++
+;
+; BOOLEAN
+; Check486AStepping (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks for 486 A Stepping.
+;
+; It takes advantage of the fact that on the A-step of the i486
+; processor, the ET bit in CR0 could be set or cleared by software,
+; but was not used by the hardware. On B or C -step, ET bit in CR0
+; is now hardwired to a "1" to force usage of the 386 math coprocessor
+; protocol.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear if B or later stepping.
+; Carry Flag set if A or earlier stepping.
+;
+;--
+ public Check486AStepping
+Check486AStepping proc near
+.386p
+ mov eax, cr0 ; reset ET bit in cr0
+ and eax, NOT CR0_ET
+ mov cr0, eax
+
+ mov eax, cr0 ; get cr0 back
+ test eax, CR0_ET ; if ET bit still set?
+ jnz short cas10 ; if nz, yes, still set, it's NOT A step
+ stc
+ ret
+
+cas10: clc
+ ret
+Check486AStepping endp
+
+;++
+;
+; BOOLEAN
+; Check486BStepping (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks for 486 B Stepping.
+;
+; On the i486 processor, the "mov to/from DR4/5" instructions were
+; aliased to "mov to/from DR6/7" instructions. However, the i486
+; B or earlier steps generate an Invalid opcode exception when DR4/5
+; are used with "mov to/from special register" instruction.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear if C or later stepping.
+; Carry Flag set if B stepping.
+;
+;--
+ public Check486BStepping
+Check486BStepping proc
+
+ push ebx
+
+ mov ebx, fs:PcIdt ; Address of IDT
+ push dword ptr [ebx+30h]
+ push dword ptr [ebx+34h] ; Save Trap06 handler
+
+ mov eax, offset Temporary486Int6
+ mov word ptr [ebx+30h], ax ; Set LowWord
+ shr eax, 16
+ mov word ptr [ebx+36h], ax ; Set HighWord
+
+c4bs50: db 0fh, 21h, 0e0h ; mov eax, DR4
+ nop
+ nop
+ nop
+ nop
+ nop
+ clc ; it is C step
+ jmp short c4bs70
+c4bs60: stc ; it's B step
+c4bs70: pop dword ptr [ebx+34h] ; restore old int 6 vector
+ pop dword ptr [ebx+30h]
+
+ pop ebx
+ ret
+
+ ret
+
+Check486BStepping endp
+
+;++
+;
+; BOOLEAN
+; Temporary486Int6 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; Temporary int 6 handler - assumes the cause of the exception was the
+; attempted execution of an mov to/from DR4/5 instruction.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; none.
+;
+;--
+
+Temporary486Int6 proc
+
+ mov [esp].IretEIp,offset c4bs60 ; set EIP to stc instruction
+ iretd
+
+Temporary486Int6 endp
+
+;++
+;
+; BOOLEAN
+; Check486CStepping (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks for 486 C Stepping.
+;
+; This routine takes advantage of the fact that FSCALE produces
+; wrong result with Denormal or Pseudo-denormal operand on 486
+; C and earlier steps.
+;
+; If the value contained in ST(1), second location in the floating
+; point stack, is between 1 and 11, and the value in ST, top of the
+; floating point stack, is either a pseudo-denormal number or a
+; denormal number with the underflow exception unmasked, the FSCALE
+; instruction produces an incorrect result.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear if D or later stepping.
+; Carry Flag set if C stepping.
+;
+;--
+
+FpControl equ [ebp - 2]
+RealLongSt1 equ [ebp - 10]
+PseudoDenormal equ [ebp - 20]
+FscaleResult equ [ebp - 30]
+
+ public Check486CStepping
+Check486CStepping proc
+
+ push ebp
+ mov ebp, esp
+ sub esp, 30 ; Allocate space for temp real variables
+
+ mov eax, cr0 ; Don't trap while doing math
+ and eax, NOT (CR0_ET+CR0_MP+CR0_TS+CR0_EM)
+ mov cr0, eax
+
+;
+; Initialize the local FP variables to predefined values.
+; RealLongSt1 = 1.0 * (2 ** -1) = 0.5 in normalized double precision FP form
+; PseudoDenormal = a unsupported format by IEEE.
+; Sign bit = 0
+; Exponent = 000000000000000B
+; Significand = 100000...0B
+; FscaleResult = The result of FSCALE instruction. Depending on 486 step,
+; the value will be different:
+; Under C and earlier steps, 486 returns the original value
+; in ST as the result. The correct returned value should be
+; original significand and an exponent of 0...01.
+;
+
+ mov dword ptr RealLongSt1, REALLONG_LOW
+ mov dword ptr RealLongSt1 + 4, REALLONG_HIGH
+ mov dword ptr PseudoDenormal, PSEUDO_DENORMAL_LOW
+ mov dword ptr PseudoDenormal + 4, PSEUDO_DENORMAL_MID
+ mov word ptr PseudoDenormal + 8, PSEUDO_DENORMAL_HIGH
+
+.387
+ fnstcw FpControl ; Get FP control word
+ fwait
+ or word ptr FpControl, 0FFh ; Mask all the FP exceptions
+ fldcw FpControl ; Set FP control
+
+ fld qword ptr RealLongSt1 ; 0 < ST(1) = RealLongSt1 < 1
+ fld tbyte ptr PseudoDenormal; Denormalized operand. Note, i486
+ ; won't report denormal exception
+ ; on 'FLD' instruction.
+ ; ST(0) = Extended Denormalized operand
+ fscale ; try to trigger 486Cx errata
+ fstp tbyte ptr FscaleResult ; Store ST(0) in FscaleResult
+ cmp word ptr FscaleResult + 8, PSEUDO_DENORMAL_HIGH
+ ; Is Exponent changed?
+ jz short c4ds00 ; if z, no, it is C step
+ clc
+ jmp short c4ds10
+c4ds00: stc
+c4ds10: mov esp, ebp
+ pop ebp
+ ret
+
+Check486CStepping endp
+
+;++
+;
+; BOOLEAN
+; Check386B0 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks for 386 B0 or earlier stepping.
+;
+; It takes advantage of the fact that the bit INSERT and
+; EXTRACT instructions that existed in B0 and earlier versions of the
+; 386 were removed in the B1 stepping. When executed on the B1, INSERT
+; and EXTRACT cause an int 6 (invalid opcode) exception. This routine
+; can therefore discriminate between B1/later 386s and B0/earlier 386s.
+; It is intended to be used in sequence with other checks to determine
+; processor stepping by exercising specific bugs found in specific
+; steppings of the 386.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear if B1 or later stepping
+; Carry Flag set if B0 or prior
+;
+;--
+
+Check386B0 proc
+
+ push ebx
+
+ mov ebx, fs:PcIdt ; Address of IDT
+ push dword ptr [ebx+30h]
+ push dword ptr [ebx+34h] ; Save Trap06 handler
+
+ mov eax, offset TemporaryInt6
+ mov word ptr [ebx+30h], ax ; Set LowWord
+ shr eax, 16
+ mov word ptr [ebx+36h], ax ; Set HighWord
+
+
+;
+; Attempt execution of Extract Bit String instruction. Execution on
+; B0 or earlier with length (CL) = 0 will return 0 into the destination
+; (CX in this case). Execution on B1 or later will fail either due to
+; taking the invalid opcode trap, or if the opcode is valid, we don't
+; expect CX will be zeroed by any new instruction supported by newer
+; steppings. The dummy int 6 handler will clears the Carry Flag and
+; returns execution to the appropriate label. If the instruction
+; actually executes, CX will *probably* remain unchanged in any new
+; stepping that uses the opcode for something else. The nops are meant
+; to handle newer steppings with an unknown instruction length.
+;
+
+ xor eax,eax
+ mov edx,eax
+ mov ecx,0ff00h ; Extract length (CL) == 0, (CX) != 0
+
+b1c50: db 0fh, 0a6h, 0cah ; xbts cx,dx,ax,cl
+ nop
+ nop
+ nop
+ nop
+ nop
+ stc ; assume B0
+ jecxz short b1c70 ; jmp if B0
+b1c60: clc
+b1c70: pop dword ptr [ebx+34h] ; restore old int 6 vector
+ pop dword ptr [ebx+30h]
+
+ pop ebx
+ ret
+
+Check386B0 endp
+
+;++
+;
+; BOOLEAN
+; TemporaryInt6 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; Temporary int 6 handler - assumes the cause of the exception was the
+; attempted execution of an XTBS instruction.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; none.
+;
+;--
+
+TemporaryInt6 proc
+
+ mov [esp].IretEip,offset b1c60 ; set IP to clc instruction
+ iretd
+
+TemporaryInt6 endp
+
+;++
+;
+; BOOLEAN
+; Check386D1 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks for 386 D1 Stepping.
+;
+; It takes advantage of the fact that on pre-D1 386, if a REPeated
+; MOVS instruction is executed when single-stepping is enabled,
+; a single step trap is taken every TWO moves steps, but should
+; occuu each move step.
+;
+; NOTE: This routine cannot distinguish between a D0 stepping and a D1
+; stepping. If a need arises to make this distinction, this routine
+; will need modification. D0 steppings will be recognized as D1.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear if D1 or later stepping
+; Carry Flag set if B1 or prior
+;
+;--
+
+Check386D1 proc
+ push ebx
+
+ mov ebx, fs:PcIdt ; Address of IDT
+ push dword ptr [ebx+08h]
+ push dword ptr [ebx+0ch] ; Save Trap01 handler
+
+ mov eax, offset TemporaryInt1
+ mov word ptr [ebx+08h], ax ; Set LowWord
+ shr eax, 16
+ mov word ptr [ebx+0eh], ax ; Set HighWord
+
+;
+; Attempt execution of rep movsb instruction with the Trace Flag set.
+; Execution on B1 or earlier with length (CX) > 1 will trace over two
+; iterations before accepting the trace trap. Execution on D1 or later
+; will accept the trace trap after a single iteration. The dummy int 1
+; handler will return execution to the instruction following the movsb
+; instruction. Examination of (CX) will reveal the stepping.
+;
+
+ sub esp,4 ; make room for target of movsb
+ mov esi, offset TemporaryInt1 ; (ds:esi) -> some present data
+ mov edi,esp
+ mov ecx,2 ; 2 iterations
+ pushfd
+ or dword ptr [esp], EFLAGS_TF
+ popfd ; cause a single step trap
+ rep movsb
+
+d1c60: add esp,4 ; clean off stack
+ pop dword ptr [ebx+0ch] ; restore old int 1 vector
+ pop dword ptr [ebx+08h]
+ stc ; assume B1
+ jecxz short d1cx ; jmp if <= B1
+ clc ; else clear carry to indicate >= D1
+d1cx:
+ pop ebx
+ ret
+
+Check386D1 endp
+
+;++
+;
+; BOOLEAN
+; TemporaryInt1 (
+; VOID
+; )
+;
+; Routine Description:
+;
+; Temporary int 1 handler - assumes the cause of the exception was
+; trace trap at the above rep movs instruction.
+;
+; Arguments:
+;
+; (esp)->eip of trapped instruction
+; cs of trapped instruction
+; eflags of trapped instruction
+;
+;--
+
+TemporaryInt1 proc
+
+ and [esp].IretEFlags,not EFLAGS_TF ; clear caller's Trace Flag
+ mov [esp].IretEip,offset d1c60 ; set IP to next instruction
+ iretd
+
+TemporaryInt1 endp
+
+;++
+;
+; BOOLEAN
+; MultiplyTest (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine checks the 386 32-bit multiply instruction.
+; The reason for this check is because some of the i386 fail to
+; perform this instruction.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear on success
+; Carry Flag set on failure
+;
+;--
+;
+
+MultiplyTest proc
+
+ xor cx,cx ; 64K times is a nice round number
+mlt00: push cx
+ call Multiply ; does this chip's multiply work?
+ pop cx
+ jc short mltx ; if c, No, exit
+ loop mlt00 ; if nc, YEs, loop to try again
+ clc
+mltx:
+ ret
+
+MultiplyTest endp
+
+;++
+;
+; BOOLEAN
+; Multiply (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine performs 32-bit multiplication test which is known to
+; fail on bad 386s.
+;
+; Note, the supplied pattern values must be used for consistent results.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Carry Flag clear on success.
+; Carry Flag set on failure.
+;
+;--
+
+Multiply proc
+
+ mov ecx, MULTIPLIER
+ mov eax, MULTIPLICAND
+ mul ecx
+
+ cmp edx, RESULT_HIGH ; Q: high order answer OK ?
+ stc ; assume failure
+ jnz short mlpx ; N: exit with error
+
+ cmp eax, RESULT_LOW ; Q: low order answer OK ?
+ stc ; assume failure
+ jnz short mlpx ; N: exit with error
+
+ clc ; indicate success
+mlpx:
+ ret
+
+Multiply endp
+
+;++
+;
+; BOOLEAN
+; KiIsNpxPresent(
+; VOID
+; );
+;
+; Routine Description:
+;
+; This routine determines if there is any Numeric coprocessor
+; present.
+;
+; Note that we do NOT determine its type (287, 387).
+; This code is extracted from Intel book.
+;
+; Arguments:
+;
+; None.
+;
+; Return:
+;
+; TRUE - If NPX is present. Else a value of FALSE is returned.
+; Sets CR0 NPX bits accordingly.
+;
+;--
+
+cPublicProc _KiIsNpxPresent,0
+
+ push ebp ; Save caller's bp
+ mov eax, cr0
+ and eax, NOT (CR0_ET+CR0_MP+CR0_TS+CR0_EM)
+ mov cr0, eax
+ xor edx, edx
+.287
+ fninit ; Initialize NPX
+ mov ecx, 5A5A5A5Ah ; Put non-zero value
+ push ecx ; into the memory we are going to use
+ mov ebp, esp
+ fnstsw word ptr [ebp] ; Retrieve status - must use non-wait
+ cmp byte ptr [ebp], 0 ; All bits cleared by fninit?
+ jne Inp10
+
+ or eax, CR0_ET
+ mov edx, 1
+
+ cmp fs:PcPrcbData.PbCpuType, 3h
+ jbe Inp10
+
+ or eax, CR0_NE
+
+Inp10:
+ or eax, CR0_EM+CR0_TS ; During Kernel Initialization set
+ ; the EM bit
+ mov cr0, eax
+ pop eax ; clear scratch value
+ pop ebp ; Restore caller's bp
+ mov eax, edx
+ stdRet _KiIsNpxPresent
+
+
+stdENDP _KiIsNpxPresent
+
+.586p
+
+;++
+;
+; VOID
+; CPUID (
+; ULONG InEax,
+; PULONG OutEax,
+; PULONG OutEbx,
+; PULONG OutEcx,
+; PULONG OutEdx
+; );
+;
+; Routine Description:
+;
+; Executes the CPUID instruction and returns the registers from it
+;
+; Only available at INIT time
+;
+; Arguments:
+;
+; Return Value:
+;
+;--
+cPublicProc _CPUID,5
+
+ push ebx
+ push esi
+
+ mov eax, [esp+12]
+
+ cpuid
+
+ mov esi, [esp+16] ; return EAX
+ mov [esi], eax
+
+ mov esi, [esp+20] ; return EBX
+ mov [esi], ebx
+
+ mov esi, [esp+24] ; return ECX
+ mov [esi], ecx
+
+ mov esi, [esp+28] ; return EDX
+ mov [esi], edx
+
+ pop esi
+ pop ebx
+
+ stdRET _CPUID
+
+stdENDP _CPUID
+
+;++
+;
+; LONGLONG
+; RDTSC (
+; VOID
+; );
+;
+; Routine Description:
+;
+; Arguments:
+;
+; Return Value:
+;
+;--
+cPublicProc _RDTSC
+ rdtsc
+ stdRET _RDTSC
+
+stdENDP _RDTSC
+
+INIT ENDS
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE' ; Put IdleLoop in text section
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+;
+; ULONGLONG
+; FASTCALL
+; RDMSR (
+; IN ULONG MsrRegister
+; );
+;
+; Routine Description:
+;
+; Arguments:
+;
+; Return Value:
+;
+;--
+cPublicFastCall RDMSR, 1
+ rdmsr
+ fstRET RDMSR
+fstENDP RDMSR
+
+
+;++
+;
+; VOID
+; WRMSR (
+; IN ULONG MsrRegister
+; IN LONGLONG MsrValue
+; );
+;
+; Routine Description:
+;
+; Arguments:
+;
+; Return Value:
+;
+;--
+cPublicProc _WRMSR, 3
+ mov ecx, [esp+4]
+ mov eax, [esp+8]
+ mov edx, [esp+12]
+ wrmsr
+ stdRET _WRMSR
+stdENDP _WRMSR
+
+_TEXT ENDS
+ END
diff --git a/private/ntos/ke/i386/cpu.inc b/private/ntos/ke/i386/cpu.inc
new file mode 100644
index 000000000..ca404c6c2
--- /dev/null
+++ b/private/ntos/ke/i386/cpu.inc
@@ -0,0 +1,61 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; cpu.inc
+;
+; Abstract:
+;
+; This module contains the assembly structures and definitions
+; for INTEL 80x86 CPU specifiec information. This include file
+; is mainly used by CPU.ASM to determine CPU type and stepping
+; number.
+;
+; Author:
+;
+; Shie-Lin (shielint) 1-Oct-1991
+;
+; Revision History:
+;
+;--
+
+;
+; The following equates define the control bits of CR0 register
+;
+
+CR0_AM equ 40000h
+CR0_ET equ 00010h
+
+;
+; The following equates define the control bits of EFALGS register
+;
+
+EFLAGS_AC equ 40000h
+EFLAGS_VM equ 20000h
+EFLAGS_RF equ 10000h
+EFLAGS_NF equ 4000h
+EFLAGS_IOPL equ 3000h
+EFLAGS_IF equ 200h
+EFLAGS_TF equ 100h
+EFLAGS_ID equ 200000h
+
+;
+; Define the iret frame
+;
+
+IretFrame struc
+
+IretEip dd 0
+IretCs dd 0
+IretEFlags dd 0
+
+IretFrame ends
+
+;
+; Misc. definitions
+;
+
+ADDRESS_OVERRIDE equ 67h
+OPERAND_OVERRIDE equ 66h
diff --git a/private/ntos/ke/i386/ctxswap.asm b/private/ntos/ke/i386/ctxswap.asm
new file mode 100644
index 000000000..1213fcdc1
--- /dev/null
+++ b/private/ntos/ke/i386/ctxswap.asm
@@ -0,0 +1,1923 @@
+ title "Context Swap"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; ctxswap.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to field the dispatch
+; interrupt and to perform kernel initiated context switching.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 14-Jan-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+; 22-feb-90 bryanwi
+; write actual swap context procedure
+;
+;--
+
+.486p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include callconv.inc
+ .list
+
+ EXTRNP HalClearSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP HalRequestSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP KiActivateWaiterQueue,1,,FASTCALL
+ EXTRNP KiReadyThread,1,,FASTCALL
+ EXTRNP KiWaitTest,2,,FASTCALL
+ EXTRNP KfLowerIrql,1,IMPORT,FASTCALL
+ EXTRNP KfRaiseIrql,1,IMPORT,FASTCALL
+ EXTRNP _KeGetCurrentIrql,0,IMPORT
+ EXTRNP _KeGetCurrentThread,0
+ EXTRNP _KiContinueClientWait,3
+ EXTRNP _KiDeliverApc,3
+ EXTRNP _KiQuantumEnd,0
+ EXTRNP _KeBugCheckEx,5
+ extrn KiRetireDpcList:PROC
+ extrn _KiContextSwapLock:DWORD
+ extrn _KiDispatcherLock:DWORD
+ extrn _KeFeatureBits:DWORD
+ extrn _KeThreadSwitchCounters:DWORD
+ extrn _KeTickCount:DWORD
+
+ extrn __imp_@KfLowerIrql@4:DWORD
+
+ extrn _KiWaitInListHead:DWORD
+ extrn _KiWaitOutListHead:DWORD
+ extrn _KiDispatcherReadyListHead:DWORD
+ extrn _KiIdleSummary:DWORD
+ extrn _KiReadySummary:DWORD
+ extrn _KiSwapContextNotifyRoutine:DWORD
+ extrn _KiThreadSelectNotifyRoutine:DWORD
+
+if DBG
+ extrn _KdDebuggerEnabled:BYTE
+ EXTRNP _DbgBreakPoint,0
+ extrn _DbgPrint:near
+ extrn _MsgDpcTrashedEsp:BYTE
+ extrn _MsgDpcTimeout:BYTE
+ extrn _KiDPCTimeout:DWORD
+endif
+
+_TEXT$00 SEGMENT PARA PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ page ,132
+ subttl "Unlock Dispatcher Database"
+;++
+;
+; VOID
+; KiUnlockDispatcherDatabase (
+; IN KIRQL OldIrql
+; )
+;
+; Routine Description:
+;
+; This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+; database locked. Its function is to either unlock the dispatcher
+; database and return or initiate a context switch if another thread
+; has been selected for execution.
+;
+; Arguments:
+;
+; (TOS) Return address
+;
+; (ecx) OldIrql - Supplies the IRQL when the dispatcher database
+; lock was acquired.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicFastCall KiUnlockDispatcherDatabase, 1
+
+;
+; Check if a new thread is scheduled for execution.
+;
+
+ cmp PCR[PcPrcbData+PbNextThread], 0 ; check if next thread
+ jne short Kiu20 ; if ne, new thread scheduled
+
+;
+; Release dispatcher database lock, lower IRQL to its previous level,
+; and return.
+;
+
+Kiu00: ;
+
+ifndef NT_UP
+
+ mov _KiDispatcherLock, 0 ; release dispatcher lock
+
+endif
+
+;
+; N.B. This exit jumps directly to the lower IRQL routine which has a
+; compatible fastcall interface.
+;
+
+ jmp dword ptr [__imp_@KfLowerIrql@4] ; lower IRQL to previous level
+
+;
+; A new thread has been selected to run on the current processor, but
+; the new IRQL is not below dispatch level. If the current processor is
+; not executing a DPC, then request a dispatch interrupt on the current
+; processor before restoring IRQL.
+;
+
+Kiu10: cmp dword ptr PCR[PcPrcbData.PbDpcRoutineActive],0 ; check if DPC routine active
+ jne short Kiu00 ; if ne, DPC routine is active
+
+ifndef NT_UP
+
+ mov _KiDispatcherLock, 0 ; release dispatcher lock
+
+endif
+
+ push ecx ; save new IRQL
+ mov cl, DISPATCH_LEVEL ; request dispatch interrupt
+ fstCall HalRequestSoftwareInterrupt ;
+ pop ecx ; restore new IRQL
+
+;
+; N.B. This exit jumps directly to the lower IRQL routine which has a
+; compatible fastcall interface.
+;
+
+ jmp dword ptr [__imp_@KfLowerIrql@4] ; lower IRQL to previous level
+
+;
+; Check if the previous IRQL is less than dispatch level.
+;
+
+Kiu20: cmp cl, DISPATCH_LEVEL ; check if IRQL below dispatch level
+ jge short Kiu10 ; if ge, not below dispatch level
+
+;
+; There is a new thread scheduled for execution and the previous IRQL is
+; less than dispatch level. Context swith to the new thread immediately.
+;
+;
+; N.B. The following registers MUST be saved such that ebp is saved last.
+; This is done so the debugger can find the saved ebp for a thread
+; that is not currently in the running state.
+;
+
+.fpo (4, 0, 0, 0, 0, 0)
+ sub esp, 4*4
+ mov [esp+12], ebx ; save registers
+ mov [esp+8], esi ;
+ mov [esp+4], edi ;
+ mov [esp+0], ebp ;
+ mov ebx, PCR[PcSelfPcr] ; get address of PCR
+ mov esi, [ebx].PcPrcbData.PbNextThread ; get next thread address
+ mov edi, [ebx].PcPrcbData.PbCurrentThread ; get current thread address
+ mov dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; clear next thread address
+ mov [ebx].PcPrcbData.PbCurrentThread, esi ; set current thread address
+ mov [edi].ThWaitIrql, cl ; save previous IRQL
+ mov ecx, edi ; set address of current thread
+ fstCall KiReadyThread ; reready thread for execution
+ mov cl, [edi].ThWaitIrql ; set APC interrupt bypass disable
+ call SwapContext ; swap context
+ or al, al ; check if kernel APC pending
+ mov cl, [esi].ThWaitIrql ; get original wait IRQL
+ jnz short Kiu50 ; if nz, kernel APC pending
+
+Kiu30: mov ebp, [esp+0] ; restore registers
+ mov edi, [esp+4] ;
+ mov esi, [esp+8] ;
+ mov ebx, [esp+12] ;
+ add esp, 4*4
+
+;
+; N.B. This exit jumps directly to the lower IRQL routine which has a
+; compatible fastcall interface.
+;
+
+ jmp dword ptr [__imp_@KfLowerIrql@4] ; lower IRQL to previous level
+
+Kiu50: mov cl, APC_LEVEL ; lower IRQL to APC level
+ fstCall KfLowerIrql ;
+ xor eax, eax ; set previous mode to kernel
+ stdCall _KiDeliverApc, <eax, eax, eax> ; deliver kernel mode APC
+ inc dword ptr [ebx].PcPrcbData.PbApcBypassCount ; increment count
+ xor ecx, ecx ; set original wait IRQL
+ jmp short Kiu30
+
+fstENDP KiUnlockDispatcherDatabase
+
+ page ,132
+ subttl "Swap Thread"
+;++
+;
+; VOID
+; KiSwapThread (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine is called to select the next thread to run on the
+; current processor and to perform a context switch to the thread.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; Wait completion status (eax).
+;
+;--
+
+cPublicFastCall KiSwapThread, 0
+.fpo (4, 0, 0, 0, 1, 0)
+
+;
+; N.B. The following registers MUST be saved such that ebp is saved last.
+; This is done so the debugger can find the saved ebp for a thread
+; that is not currently in the running state.
+;
+
+ sub esp, 4*4
+ mov [esp+12], ebx ; save registers
+ mov [esp+8], esi ;
+ mov [esp+4], edi ;
+ mov [esp+0], ebp ;
+
+ mov ebx, PCR[PcSelfPcr] ; get address of PCR
+ mov edx, [ebx].PcPrcbData.PbNextThread ; get next thread address
+ or edx, edx ; check if next thread selected
+ jnz Swt140 ; if nz, next thread selected
+
+;
+; Find the highest nibble in the ready summary that contains a set bit
+; and left justify so the nibble is in bits <31:28>
+;
+
+ mov ecx, 16 ; set base bit number
+ mov edi, _KiReadySummary ; get ready summary
+ mov esi, edi ; copy ready summary
+ shr esi, 16 ; isolate bits <31:16> of summary
+ jnz short Swt10 ; if nz, bits <31:16> are nonzero
+ xor ecx, ecx ; set base bit number
+ mov esi, edi ; set bits <15:0> of summary
+Swt10: shr esi, 8 ; isolate bits <15:8> of low bits
+ jz short Swt20 ; if z, bits <15:8> are zero
+ add ecx, 8 ; add offset to nonzero byte
+Swt20: mov esi, edi ; isolate highest nonzero byte
+ shr esi, cl ;
+ add ecx, 3 ; adjust to high bit of nibble
+ cmp esi, 10h ; check if high nibble nonzero
+ jb short Swt30 ; if b, then high nibble is zero
+ add ecx, 4 ; compute ready queue priority
+Swt30: mov esi, ecx ; left justify ready summary nibble
+ not ecx ;
+ shl edi, cl ;
+ or edi, edi ;
+
+;
+; If the next bit is set in the ready summary, then scan the corresponding
+; dispatcher ready queue.
+;
+
+Swt40: js short Swt60 ; if s, queue contains an entry
+Swt50: sub esi, 1 ; decrement ready queue priority
+ shl edi, 1 ; position next ready summary bit
+ jnz short Swt40 ; if nz, more queues to scan
+
+;
+; All ready queues were scanned without finding a runnable thread so
+; default to the idle thread and set the appropriate bit in idle summary.
+;
+
+ifdef _COLLECT_SWITCH_DATA_
+
+ inc _KeThreadSwitchCounters + TwSwitchToIdle ; increment counter
+
+endif
+
+ifdef NT_UP
+
+ mov _KiIdleSummary, 1 ; set idle summary bit
+
+else
+
+ mov eax, [ebx].PcPrcbData.PbSetMember ; get processor set member
+ or _KiIdleSummary, eax ; set idle summary bit
+
+endif
+
+ mov edx, [ebx].PcPrcbData.PbIdleThread ; set idle thread address
+ jmp Swt140 ;
+
+;
+; If the thread can execute on the current processor, then remove it from
+; the dispatcher ready queue.
+;
+
+ align 4
+swt60: lea ebp, [esi*8] + _KiDispatcherReadyListHead ; get ready queue address
+ mov ecx, [ebp].LsFlink ; get address of first queue entry
+Swt70: mov edx, ecx ; compute address of thread object
+ sub edx, ThWaitListEntry ;
+
+ifndef NT_UP
+
+ mov eax, [edx].ThAffinity ; get thread affinity
+ test eax, [ebx].PcPrcbData.PbSetMember ; test if compatible affinity
+ jnz short Swt80 ; if nz, thread affinity compatible
+ mov ecx, [ecx].LsFlink ; get address of next entry
+ cmp ebp, ecx ; check if end of list
+ jnz short Swt70 ; if nz, not end of list
+ jmp short Swt50 ;
+
+;
+; If the thread last ran on the current processor, has been waiting for
+; longer than a quantum, or its priority is greater than low realtime
+; plus 9, then select the thread. Otherwise, an attempt is made to find
+; a more appropriate candidate.
+;
+
+ align 4
+Swt80: cmp _KiThreadSelectNotifyRoutine, 0 ; check for callout routine
+ je short Swt85 ; if eq, no callout routine registered
+ push edx ; save volatile registers
+ push ecx ;
+ mov ecx, [edx].EtCid.CidUniqueThread ; set trial thread unique id
+ call [_KiThreadSelectNotifyRoutine] ; notify callout routine
+ pop ecx ; restore volatile registers
+ pop edx ;
+ or eax, eax ; check if trial thread selectable
+ jnz Swt120 ; if nz, trial thread selectable
+ jmp Swt87 ;
+
+ align 4
+Swt85: mov al, [edx].ThNextProcessor ; get last processor number
+ cmp al, [ebx].PcPrcbData.PbNumber ; check if current processor
+ jz Swt120 ; if z, same as current processor
+ mov al, [edx].ThIdealProcessor ; get ideal processor number
+ cmp al, [ebx].PcPrcbData.PbNumber ; check if current processor
+ jz short Swt120 ; if z, same as current processor
+Swt87: cmp esi, LOW_REALTIME_PRIORITY + 9 ; check if priority in range
+ jae short Swt120 ; if ae, priority not in range
+ mov edi, _KeTickCount + 0 ; get low part of tick count
+ sub edi, [edx].ThWaitTime ; compute length of wait
+ cmp edi, READY_SKIP_QUANTUM + 1 ; check if wait time exceeded
+ jae short Swt120 ; if ae, wait time exceeded
+ mov edi, edx ; set address of thread
+
+;
+; Search forward in the ready queue until the end of the list is reached
+; or a more appropriate thread is found.
+;
+
+Swt90: mov edi, [edi].ThWaitListEntry ; get address of next entry
+ cmp ebp, edi ; check if end of list
+ jz short Swt120 ; if z, end of list
+ sub edi, ThWaitListEntry ; compute address of thread
+ mov eax, [edi].ThAffinity ; get thread affinity
+ test eax, [ebx].PcPrcbData.PbSetMember ; test if compatible infinity
+ jz short Swt100 ; if z, thread affinity not compatible
+ cmp _KiThreadSelectNotifyRoutine, 0 ; check for callout routine
+ je short Swt95 ; if eq, no callout routine registered
+ push edx ; save volatile registers
+ push ecx ;
+ mov ecx, [edi].EtCid.CidUniqueThread ; set trial thread unique id
+ call [_KiThreadSelectNotifyRoutine] ; notify callout routine
+ pop ecx ; restore volatile registers
+ pop edx ;
+ or eax, eax ; check if trial thread selectable
+ jnz short Swt110 ; if nz, trial thread selectable
+ jmp short Swt100 ;
+
+ align 4
+Swt95: mov al, [edi].ThNextProcessor ; get last processor number
+ cmp al, [ebx].PcPrcbData.PbNumber ; check if current processor
+ jz short Swt110 ; if z, same as current processor
+ mov al, [edi].ThIdealProcessor ; get ideal processor number
+ cmp al, [ebx].PcPrcbData.PbNumber ; check if current processor
+ jz short Swt110 ; if z, same as current processor
+Swt100: mov eax, _KeTickCount + 0 ; get low part of tick count
+ sub eax, [edi].ThWaitTime ; compute length of wait
+ cmp eax, READY_SKIP_QUANTUM + 1 ; check if wait time exceeded
+ jb short Swt90 ; if b, wait time not exceeded
+ jmp short Swt120 ;
+
+ align 4
+Swt110: mov edx, edi ; set address of thread
+ mov ecx, edi ; compute address of list entry
+ add ecx, ThWaitListEntry ;
+Swt120: mov al, [ebx].PcPrcbData.PbNumber ; get current processor number
+
+ifdef _COLLECT_SWITCH_DATA_
+
+ lea ebp, _KeThreadSwitchCounters + TwFindIdeal ; get counter address
+ cmp al, [edx].ThIdealProcessor ; check if same as ideal processor
+ jz short Swt130 ; if z, same as ideal processor
+ add ebp, TwFindLast - TwFindIdeal ; compute address of last counter
+ cmp al, [edx].ThNextProcessor ; check if same as last processor
+ jz short Swt130 ; if z, same as last processor
+ add ebp,TwFindAny - TwFindLast ; compute address of correct counter
+Swt130: inc dword ptr [ebp] ; increment appropriate switch counter
+
+endif
+
+ mov [edx].ThNextProcessor, al ; set next processor number
+
+endif
+
+;
+; Remove the selected thread from the ready queue.
+;
+
+ mov eax, [ecx].LsFlink ; get list entry forward link
+ mov ebp, [ecx].LsBlink ; get list entry backward link
+ mov [ebp].LsFlink, eax ; set forward link in previous entry
+ mov [eax].LsBlink, ebp ; set backward link in next entry
+ cmp eax, ebp ; check if list is empty
+ jnz short Swt140 ; if nz, list is not empty
+ mov ebp, 1 ; clear ready summary bit
+ mov ecx, esi ;
+ shl ebp, cl ;
+ xor _KiReadySummary, ebp ;
+
+;
+; Swap context to the next thread.
+;
+
+Swt140: mov esi, edx ; set address of next thread
+ mov edi, [ebx].PcPrcbData.PbCurrentThread ; set current thread address
+ mov dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; clear next thread address
+ mov [ebx].PcPrcbData.PbCurrentThread, esi ; set current thread address
+ mov cl, [edi].ThWaitIrql ; set APC interrupt bypass disable
+ call SwapContext ; swap context
+ or al, al ; check if kernel APC pending
+ mov edi, [esi].ThWaitStatus ; save wait completion status
+ mov cl, [esi].ThWaitIrql ; get wait IRQL
+ jnz short Swt160 ; if nz, kernel APC pending
+
+Swt150: fstCall KfLowerIrql ; lower IRQL to previous value
+ mov eax, edi ; set wait completion status
+ mov ebp, [esp+0] ; restore registers
+ mov edi, [esp+4] ;
+ mov esi, [esp+8] ;
+ mov ebx, [esp+12] ;
+ add esp, 4*4 ;
+ fstRET KiSwapThread ;
+
+Swt160: mov cl, APC_LEVEL ; lower IRQL to APC level
+ fstCall KfLowerIrql ;
+ xor eax, eax ; set previous mode to kernel
+ stdCall _KiDeliverApc, <eax, eax, eax> ; deliver kernel mode APC
+ inc dword ptr [ebx].PcPrcbData.PbApcBypassCount ; increment count
+ xor ecx, ecx ; set original wait IRQL
+ jmp short Swt150
+
+fstENDP KiSwapThread
+
+ page ,132
+ subttl "Dispatch Interrupt"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of a software interrupt generated
+; at DISPATCH_LEVEL. Its function is to process the Deferred Procedure Call
+; (DPC) list, and then perform a context switch if a new thread has been
+; selected for execution on the processor.
+;
+; This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+; database unlocked. When a return to the caller finally occurs, the
+; IRQL remains at DISPATCH_LEVEL, and the dispatcher database is still
+; unlocked.
+;
+; Arguments:
+;
+; None
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+ align 16
+cPublicProc _KiDispatchInterrupt ,0
+cPublicFpo 0, 0
+
+ mov ebx, PCR[PcSelfPcr] ; get address of PCR
+kdi00: lea eax, [ebx].PcPrcbData.PbDpcListHead ; get DPC listhead address
+
+;
+; Disable interrupts and check if there is any work in the DPC list
+; of the current processor.
+;
+
+kdi10: cli ; disable interrupts
+ cmp eax, [eax].LsFlink ; check if DPC List is empty
+ je short kdi40 ; if eq, list is empty
+ push ebp ; save register
+ mov ebp, eax ; set address of DPC listhead
+ call KiRetireDpcList ; process the current DPC list
+ pop ebp ; restore register
+
+;
+; Check to determine if quantum end is requested.
+;
+; N.B. If a new thread is selected as a result of processing the quantum
+; end request, then the new thread is returned with the dispatcher
+; database locked. Otherwise, NULL is returned with the dispatcher
+; database unlocked.
+;
+
+kdi40: sti ; enable interrupts
+ cmp dword ptr [ebx].PcPrcbData.PbQuantumEnd, 0 ; quantum end requested
+ jne kdi90 ; if neq, quantum end request
+
+;
+; Check to determine if a new thread has been selected for execution on this
+; processor.
+;
+
+kdi50: cmp dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; check addr of next thread object
+ je short kdi70 ; if eq, then no new thread
+
+;
+; Disable interrupts and attempt to acquire the dispatcher database lock.
+;
+
+ifndef NT_UP
+
+ lea eax, _KiDispatcherLock ; get dispatch database lock address
+ cli ; disable interrupts
+ TEST_SPINLOCK eax, <short kdi80> ; Is it busy?
+ ACQUIRE_SPINLOCK eax, <short kdi80> ; Try to acquire dispatch database lock
+
+endif
+
+;
+; Raise IRQL to synchronization level.
+;
+
+ mov ecx,SYNCH_LEVEL ; raise IRQL to synchronization level
+ fstCall KfRaiseIrql ;
+ sti ; enable interrupts
+ mov eax, [ebx].PcPrcbData.PbNextThread ; get next thread address
+
+;
+; N.B. The following registers MUST be saved such that ebp is saved last.
+; This is done so the debugger can find the saved ebp for a thread
+; that is not currently in the running state.
+;
+
+.fpo (3, 0, 0, 0, 0, 0)
+
+kdi60: sub esp, 3*4
+ mov [esp+8], esi ; save registers
+ mov [esp+4], edi ;
+ mov [esp+0], ebp ;
+ mov esi, eax ; set next thread address
+ mov edi, [ebx].PcPrcbData.PbCurrentThread ; get current thread address
+ mov dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; clear next thread address
+ mov [ebx].PcPrcbData.PbCurrentThread, esi ; set current thread address
+ mov ecx, edi ; set address of current thread
+ fstCall KiReadyThread ; ready thread (ecx) for execution
+ mov cl, 1 ; set APC interrupt bypass disable
+ call SwapContext ; call context swap routine
+ mov ebp, [esp+0] ; restore registers
+ mov edi, [esp+4] ;
+ mov esi, [esp+8] ;
+ add esp, 3*4
+kdi70: stdRET _KiDispatchInterrupt ; return
+
+;
+; Enable interrupts and check DPC queue.
+;
+
+ifndef NT_UP
+
+kdi80: sti ; enable interrupts
+ jmp kdi00 ;
+
+endif
+
+;
+; Process quantum end event.
+;
+; N.B. If the quantum end code returns a NULL value, then no next thread
+; has been selected for execution. Otherwise, a next thread has been
+; selected and the dispatcher databased is locked.
+;
+
+kdi90: mov dword ptr [ebx].PcPrcbData.PbQuantumEnd, 0 ; clear quantum end indicator
+ stdCall _KiQuantumEnd ; process quantum end
+ or eax, eax ; check if new thread selected
+ jne short kdi60 ; if ne, new thread selected
+ stdRET _KiDispatchInterrupt ; return
+
+stdENDP _KiDispatchInterrupt
+
+ page ,132
+ subttl "Swap Context to Next Thread"
+;++
+;
+; Routine Description:
+;
+; This routine is called to swap context from one thread to the next.
+; It swaps context, flushes the data, instruction, and translation
+; buffer caches, restores nonvolatile integer registers, and returns
+; to its caller.
+;
+; N.B. It is assumed that the caller (only caller's are within this
+; module) saved the nonvolatile registers, ebx, esi, edi, and
+; ebp. This enables the caller to have more registers available.
+;
+; Arguments:
+;
+; cl - APC interrupt bypass disable (zero enable, nonzero disable).
+; edi - Address of previous thread.
+; esi - Address of next thread.
+; ebx - Address of PCR.
+;
+; Return value:
+;
+; al - Kernel APC pending.
+; ebx - Address of PCR.
+; esi - Address of current thread object.
+;
+;--
+
+ align 16
+ public SwapContext
+SwapContext proc
+cPublicFpo 0, 2
+
+;
+; NOTE: The ES: override on the move to ThState is part of the
+; lazy-segment load system. It assures that ES has a valid
+; selector in it, thus preventing us from propagating a bad
+; ES accross a context switch.
+;
+; Note that if segments, other than the standard flat segments,
+; with limits above 2 gig exist, neither this nor the rest of
+; lazy segment loads are reliable.
+;
+; Note that ThState must be set before the dispatcher lock is released
+; to prevent KiSetPriorityThread from seeing a stale value.
+;
+
+ mov byte ptr es:[esi]+ThState, Running ; set thread state to running
+
+;
+; Acquire the context swap lock so the address space of the old process
+; cannot be deleted and then release the dispatcher database lock.
+;
+; N.B. This lock is used to protect the address space until the context
+; switch has sufficiently progressed to the point where the address
+; space is no longer needed. This lock is also acquired by the reaper
+; thread before it finishes thread termination.
+;
+
+ifndef NT_UP
+
+ lea eax,_KiContextSwapLock ; get context swap lock address
+
+sc00: ACQUIRE_SPINLOCK eax, sc100, NoChecking ; acquire context swap lock
+
+ mov _KiDispatcherLock, 0 ; release dispatcher lock
+
+endif
+
+;
+; Save the APC disable flag and the exception listhead.
+;
+
+ or cl, cl ; set zf in flags
+ mov ecx, [ebx]+PcExceptionList ; save exception list
+ pushfd ; save flags
+ push ecx ;
+
+;
+; Notify registered callout routine of swap context.
+;
+
+ifndef NT_UP
+
+ cmp _KiSwapContextNotifyRoutine, 0 ; check for callout routine
+ je short sc03 ; if eq, no callout routine registered
+ mov edx, [esi].EtCid.CidUniqueThread ; set new thread unique id
+ mov ecx, [edi].EtCid.CidUniqueThread ; set old thread unique id
+ call [_KiSwapContextNotifyRoutine] ; notify callout routine
+sc03: ;
+
+endif
+
+;
+; Accumulate the total time spent in a thread.
+;
+
+ifdef PERF_DATA
+
+ test _KeFeatureBits, KF_RDTSC ; feature supported?
+ jz short @f ; if z, feature not present
+
+.586p
+ rdtsc ; read cycle counter
+.486p
+
+ sub eax, [ebx].PcPrcbData.PbThreadStartCount.LiLowPart ; sub off thread
+ sbb edx, [ebx].PcPrcbData.PbThreadStartCount.LiHighPart ; starting time
+ add [edi].EtPerformanceCountLow, eax ; accumlate thread run time
+ adc [edi].EtPerformanceCountHigh, edx ;
+ add [ebx].PcPrcbData.PbThreadStartCount.LiLowPart, eax ; set new thread
+ adc [ebx].PcPrcbData.PbThreadStartCount.LiHighPart, edx ; starting time
+@@: ;
+
+endif
+
+;
+; On a uniprocessor system the NPX state is swapped in a lazy manner.
+; If a thread who's state is not in the coprocessor attempts to perform
+; a coprocessor operation, the current NPX state is swapped out (if needed),
+; and the new state is swapped in durning the fault. (KiTrap07)
+;
+; On a multiprocessor system we still fault in the NPX state on demand, but
+; we save the state when the thread switches out (assuming the NPX state
+; was loaded). This is because it could be difficult to obtain the threads
+; NPX in the trap handler if it was loaded into a different processors
+; coprocessor.
+;
+ mov ebp, cr0 ; get current CR0
+ mov edx, ebp
+
+ifndef NT_UP
+ cmp byte ptr [edi]+ThNpxState, NPX_STATE_LOADED ; check if NPX state
+ je sc_save_npx_state
+endif
+
+
+sc05: mov cl, [esi]+ThDebugActive ; get debugger active state
+ mov [ebx]+PcDebugActive, cl ; set new debugger active state
+
+;
+; Switch stacks:
+;
+; 1. Save old esp in old thread object.
+; 2. Copy stack base and stack limit into TSS AND PCR
+; 3. Load esp from new thread object
+;
+; Keep interrupts off so we don't confuse the trap handler into thinking
+; we've overrun the kernel stack.
+;
+
+ cli ; disable interrupts
+ mov [edi]+ThKernelStack, esp ; save old kernel stack pointer
+ mov eax, [esi]+ThInitialStack ; get new initial stack pointer
+ lea ecx, [eax]-KERNEL_STACK_SIZE ; get new kernel stack limit
+ sub eax, NPX_FRAME_LENGTH ; space for NPX_FRAME & NPX CR0 flags
+ mov [ebx]+PcStackLimit, ecx ; set new stack limit
+ mov [ebx]+PcInitialStack, eax ; set new stack base
+
+.errnz (NPX_STATE_NOT_LOADED - CR0_TS - CR0_MP)
+.errnz (NPX_STATE_LOADED - 0)
+
+; (eax) = Initial Stack
+; (ebx) = Prcb
+; (edi) = OldThread
+; (esi) = NewThread
+; (ebp) = Current CR0
+; (edx) = Current CR0
+
+ xor ecx, ecx
+ mov cl, [esi]+ThNpxState ; New NPX state is (or is not) loaded
+
+ and edx, NOT (CR0_MP+CR0_EM+CR0_TS) ; clear thread setable NPX bits
+ or ecx, edx ; or in new threads cr0
+ or ecx, [eax]+FpCr0NpxState ; merge new thread setable state
+ cmp ebp, ecx ; check if old and new CR0 match
+ jne sc_reload_cr0 ; if ne, no change in CR0
+
+;
+; N.B. It is important that the following adjustment NOT be applied to
+; the initial stack value in the PCR. If it is, it will cause the
+; location in memory that the processor pushes the V86 mode segment
+; registers and the first 4 ULONGs in the FLOATING_SAVE_AREA to
+; occupy the same memory locations, which could result in either
+; trashed segment registers in V86 mode, or a trashed NPX state.
+;
+; Adjust ESP0 so that V86 mode threads and 32 bit threads can share
+; a trapframe structure, and the NPX save area will be accessible
+; in the same manner on all threads
+;
+; This test will check the user mode flags. On threads with no user
+; mode context, the value of esp0 does not matter (we will never run
+; in user mode without a usermode context, and if we don't run in user
+; mode the processor will never use the esp0 value.
+;
+
+ align 4
+sc06: test dword ptr [eax] - KTRAP_FRAME_LENGTH + TsEFlags, EFLAGS_V86_MASK
+ jnz short sc07 ; if nz, V86 frame, no adjustment
+ sub eax, TsV86Gs - TsHardwareSegSs ; bias for missing fields
+sc07: mov ecx, [ebx]+PcTss ;
+ mov [ecx]+TssEsp0, eax ;
+ mov esp, [esi]+ThKernelStack ; set new stack pointer
+ mov eax, [esi]+ThTeb ; get user TEB address
+ mov [ebx]+PcTeb, eax ; set user TEB address
+
+;
+; Edit the TEB descriptor to point to the TEB
+;
+
+ sti ; enable interrupts
+ mov ecx, [ebx]+PcGdt ;
+ mov [ecx]+(KGDT_R3_TEB+KgdtBaseLow), ax ;
+ shr eax, 16 ;
+ mov [ecx]+(KGDT_R3_TEB+KgdtBaseMid), al ;
+ shr eax, 8
+ mov [ecx]+(KGDT_R3_TEB+KgdtBaseHi), al
+
+;
+; NOTE: Keep KiSwapProcess (below) in sync with this code!
+;
+; If the new process is not the same as the old process, then switch the
+; address space to the new process.
+;
+
+ mov eax, [edi].ThApcState.AsProcess ; get old process address
+ cmp eax, [esi].ThApcState.AsProcess ; check if process match
+ jz short sc22 ; if z, old and new process match
+ mov edi, [esi].ThApcState.AsProcess ; get new process address
+
+;
+; Update the processor set masks.
+;
+
+ifndef NT_UP
+
+if DBG
+
+ mov cl, [esi]+ThNextProcessor ; get current processor number
+ cmp cl, [ebx]+PcPrcbData+PbNumber ; same as running processor?
+ jne sc_error2 ; if ne, processor number mismatch
+
+endif
+
+ mov ecx, [ebx]+PcSetMember ; get processor set member
+ xor [eax]+PrActiveProcessors, ecx ; clear bit in old processor set
+ xor [edi]+PrActiveProcessors, ecx ; set bit in new processor set
+
+if DBG
+ test [eax]+PrActiveProcessors, ecx ; test if bit clear in old set
+ jnz sc_error4 ; if nz, bit not clear in old set
+ test [edi]+PrActiveProcessors, ecx ; test if bit set in new set
+ jz sc_error5 ; if z, bit not set in new set
+
+endif
+endif
+
+;
+; New CR3, flush tb, sync tss, set IOPM
+; CS, SS, DS, ES all have flat (GDT) selectors in them.
+; FS has the pcr selector.
+; Therefore, GS is only selector we need to flush. We null it out,
+; it will be reloaded from a stack frame somewhere above us.
+; Note: this load of GS before CR3 works around P6 step B0 errata 11
+;
+
+ xor eax, eax ; assume null ldt
+ mov gs, ax ;
+ mov eax, [edi]+PrDirectoryTableBase ; get new directory base
+ mov ebp, [ebx]+PcTss ; get new TSS
+ mov ecx, [edi]+PrIopmOffset ; get IOPM offset
+ mov cr3, eax ; flush TLB and set new directory base
+ mov [ebp]+TssCR3, eax ; make TSS be in sync with hardware
+ mov [ebp]+TssIoMapBase, cx ;
+
+;
+; LDT switch
+;
+
+ xor eax, eax ; check if null LDT limit
+ cmp word ptr [edi]+PrLdtDescriptor, ax
+ jnz short sc_load_ldt ; if nz, LDT limit
+
+ lldt ax ; set LDT
+
+;
+; Release the context swap lock.
+;
+
+ align 4
+sc22: ;
+
+ifndef NT_UP
+
+ mov _KiContextSwapLock, 0 ; release context swap lock
+
+endif
+
+;
+; Update context switch counters.
+;
+
+ inc dword ptr [esi]+ThContextSwitches ; thread count
+ inc dword ptr [ebx]+PcPrcbData+PbContextSwitches ; processor count
+ pop ecx ; restore exception list
+ mov [ebx].PcExceptionList, ecx ;
+
+;
+; If the new thread has a kernel mode APC pending, then request an APC
+; interrupt.
+;
+
+ cmp byte ptr [esi].ThApcState.AsKernelApcPending, 0 ; APC pending?
+ jne short sc80 ; if ne, kernel APC pending
+ popfd ; restore flags
+ xor eax, eax ; clear kernel APC pending
+ ret ; return
+
+;
+; The new thread has an APC interrupt pending. If APC interrupt bypass is
+; enable, then return kernel APC pending. Otherwise, request a software
+; interrupt at APC_LEVEL and return no kernel APC pending.
+;
+
+sc80: popfd ; restore flags
+ jnz short sc90 ; if nz, APC interupt bypass disabled
+ mov al, 1 ; set kernel APC pending
+ ret ;
+
+sc90: mov cl, APC_LEVEL ; request software interrupt level
+ fstCall HalRequestSoftwareInterrupt ;
+ xor eax, eax ; clear kernel APC pending
+ ret ;
+
+;
+; Wait for context swap lock to be released.
+;
+
+ifndef NT_UP
+
+sc100: SPIN_ON_SPINLOCK eax, sc00 ;
+
+endif
+
+;
+; Set for new LDT value
+;
+
+sc_load_ldt:
+ mov ebp, [ebx]+PcGdt ;
+ mov eax, [edi+PrLdtDescriptor] ;
+ mov [ebp+KGDT_LDT], eax ;
+ mov eax, [edi+PrLdtDescriptor+4] ;
+ mov [ebp+KGDT_LDT+4], eax ;
+ mov eax, KGDT_LDT ;
+
+;
+; Set up int 21 descriptor of IDT. If the process does not have Ldt, it
+; should never make any int 21 call. If it does, an exception is generated.
+; If the process has Ldt, we need to update int21 entry of LDT for the process.
+; Note the Int21Descriptor of the process may simply indicate an invalid
+; entry. In which case, the int 21 will be trpped to kernel.
+;
+
+ mov ebp, [ebx]+PcIdt ;
+ mov ecx, [edi+PrInt21Descriptor] ;
+ mov [ebp+21h*8], ecx ;
+ mov ecx, [edi+PrInt21Descriptor+4] ;
+ mov [ebp+21h*8+4], ecx ;
+ lldt ax ; set LDT
+ jmp short sc22
+
+;
+; Cr0 has changed (ie, floating point processor present), load the new value
+;
+
+sc_reload_cr0:
+if DBG
+
+ test byte ptr [esi]+ThNpxState, NOT (CR0_TS+CR0_MP)
+ jnz sc_error ;
+ test dword ptr [eax]+FpCr0NpxState, NOT (CR0_PE+CR0_MP+CR0_EM+CR0_TS)
+ jnz sc_error3 ;
+
+endif
+ mov cr0,ecx ; set new CR0 NPX state
+ jmp sc06
+
+
+ifndef NT_UP
+
+
+; Save coprocessors current context. FpCr0NpxState is the current threads
+; CR0 state. The following bits are valid: CR0_MP, CR0_EM, CR0_TS. MVDMs
+; may set and clear MP & EM as they please and the settings will be reloaded
+; on a context switch (but they will not be saved from CR0 to Cr0NpxState).
+; The kernel sets and clears TS as required.
+;
+; (ebp) = Current CR0
+; (edx) = Current CR0
+
+sc_save_npx_state:
+ and edx, NOT (CR0_MP+CR0_EM+CR0_TS) ; we need access to the NPX state
+
+ mov ecx,[ebx]+PcInitialStack ; get NPX save save area address
+
+ cmp ebp, edx ; Does CR0 need reloaded?
+ je short sc_npx10
+
+ mov cr0, edx ; set new cr0
+ mov ebp, edx ; (ebp) = (edx) = current cr0 state
+
+sc_npx10:
+;
+; The fwait following the fnsave is to make sure that the fnsave has stored the
+; data into the save area before this coprocessor state could possibly be
+; context switched in and used on a different (co)processor. I've added the
+; clocks from when the dispatcher lock is released and don't believe it's a
+; possibility. I've also timed the impact this fwait seems to have on a 486
+; when performing lots of numeric calculations. It appears as if there is
+; nothing to wait for after the fnsave (although the 486 manual says there is)
+; and therefore the calculation time far outweighed the 3clk fwait and it
+; didn't make a noticable difference.
+;
+
+ fnsave [ecx] ; save NPX state
+ fwait ; wait until NPX state is saved
+ mov byte ptr [edi]+ThNpxState, NPX_STATE_NOT_LOADED ; set no NPX state
+
+if DBG
+ mov dword ptr [ebx]+PcPrcbData+PbNpxThread, 0 ; owner of coprocessors state
+endif
+ jmp sc05
+endif
+
+
+if DBG
+sc_error5: int 3
+sc_error4: int 3
+sc_error3: int 3
+sc_error2: int 3
+sc_error: int 3
+endif
+
+SwapContext endp
+
+ page , 132
+ subttl "Flush Data Cache"
+;++
+;
+; VOID
+; KiFlushDcache (
+; )
+;
+; VOID
+; KiFlushIcache (
+; )
+;
+; Routine Description:
+;
+; This routine does nothing on i386 and i486 systems. Why? Because
+; (a) their caches are completely transparent, (b) they don't have
+; instructions to flush their caches.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiFlushDcache ,0
+cPublicProc _KiFlushIcache ,0
+
+ stdRET _KiFlushIcache
+
+stdENDP _KiFlushIcache
+stdENDP _KiFlushDcache
+
+ page , 132
+ subttl "Flush EntireTranslation Buffer"
+;++
+;
+; VOID
+; KeFlushCurrentTb (
+; )
+;
+; Routine Description:
+;
+; This function flushes the entire translation buffer (TB) on the current
+; processor and also flushes the data cache if an entry in the translation
+; buffer has become invalid.
+;
+; Arguments:
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KeFlushCurrentTb ,0
+
+if DBG
+ pushfd ; ensure all flushes occur at dispatch_level or higher...
+ pop eax
+ test eax, EFLAGS_INTERRUPT_MASK
+ jz short @f
+
+ stdCall _KeGetCurrentIrql
+ cmp al, DISPATCH_LEVEL
+ jnc short @f
+ int 3
+@@:
+endif
+
+ktb00: mov eax, cr3 ; (eax) = directroy table base
+ mov cr3, eax ; flush TLB
+ stdRET _KeFlushCurrentTb
+
+.586p
+ktb_gb: mov eax, cr4 ; *** see Ki386EnableGlobalPage ***
+ and eax, not CR4_PGE ; This FlushCurrentTb version gets copied into
+ mov cr4, eax ; ktb00 at initialization time if needed.
+ or eax, CR4_PGE
+ mov cr4, eax
+ktb_eb: stdRET _KeFlushCurrentTb
+.486p
+
+stdENDP _KeFlushCurrentTb
+
+_TEXT$00 ends
+
+INIT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+;
+; VOID
+; Ki386EnableGlobalPage (
+; IN volatile PLONG Number
+; )
+;
+; /*++
+;
+; Routine Description:
+;
+; This routine enables the global page PDE/PTE support in the system,
+; and stalls until complete and them sets the current processors cr4
+; register to enable global page support.
+;
+; Arguments:
+;
+; Number - Supplies a pointer to count of the number of processors in
+; the configuration.
+;
+; Return Value:
+;
+; None.
+;--
+
+cPublicProc _Ki386EnableGlobalPage,1
+ push esi
+ push edi
+ push ebx
+
+ mov edx, [esp+16] ; pointer to Number
+ pushfd
+ cli
+
+;
+; Wait for all processors
+;
+ lock dec dword ptr [edx] ; count down
+egp10: cmp dword ptr [edx], 0 ; wait for all processors to signal
+ jnz short egp10
+
+ cmp PCR[PcNumber], 0 ; processor 0?
+ jne short egp20
+
+;
+; Install proper KeFlustCurrentTb function.
+;
+
+ mov edi, ktb00
+ mov esi, ktb_gb
+ mov ecx, ktb_eb - ktb_gb + 1
+ rep movsb
+
+ mov byte ptr [ktb_eb], 0
+
+;
+; Wait for P0 to signal that proper flush tb handlers have been installed
+;
+egp20: cmp byte ptr [ktb_eb], 0
+ jnz short egp20
+
+;
+; Flush TB, and enable global page support
+; (note load of CR4 is explicity done before the load of CR3
+; to work around P6 step B0 errata 11)
+;
+.586p
+ mov eax, cr4
+ and eax, not CR4_PGE ; should not be set, but let's be safe
+ mov ecx, cr3
+ mov cr4, eax
+
+ mov cr3, ecx ; Flush TB
+
+ or eax, CR4_PGE ; enable global TBs
+ mov cr4, eax
+.486p
+ popfd
+ pop ebx
+ pop edi
+ pop esi
+
+ stdRET _Ki386EnableGlobalPage
+stdENDP _Ki386EnableGlobalPage
+
+
+;++
+;
+; VOID
+; Ki386EnableCurrentLargePage (
+; IN ULONG IdentityAddr,
+; IN ULONG IdentityCr3
+; )
+;
+; /*++
+;
+; Routine Description:
+;
+; This routine enables the large page PDE support in the processor
+;
+; Arguments:
+;
+; IdentityAddr - Supplies the linear address of the label within this
+; function where (linear == physical).
+;
+; IdentityCr3 - Supplies a pointer to temporary page directory and
+; page tables that provide both the kernel (virtual ->physical) and
+; identity (linear->physical) mappings needed for this function.
+;
+; Return Value:
+;
+; None.
+;--
+
+public _Ki386LargePageIdentityLabel
+cPublicProc _Ki386EnableCurrentLargePage,2
+ mov ecx,[esp]+4 ; (ecx)-> IdentityAddr
+ mov edx,[esp]+8 ; (edx)-> IdentityCr3
+ pushfd ; save current IF state
+ cli ; disable interrupts
+
+ mov eax, cr3 ; (eax)-> original Cr3
+ mov cr3, edx ; load Cr3 with Identity mapping
+ jmp ecx ; jump to (linear == physical)
+
+_Ki386LargePageIdentityLabel:
+ mov ecx, cr0
+ and ecx, NOT CR0_PG ; clear PG bit to disable paging
+ mov cr0, ecx ; disable paging
+ jmp $+2
+
+.586p
+ mov edx, cr4
+ or edx, CR4_PSE ; enable Page Size Extensions
+ mov cr4, edx
+
+.486p
+ mov edx, offset OriginalMapping
+ or ecx, CR0_PG ; set PG bit to enable paging
+ mov cr0, ecx ; enable paging
+ jmp edx ; Return to original mapping.
+
+OriginalMapping:
+ mov cr3, eax ; restore original Cr3
+ popfd ; restore interrupts to previous
+
+ stdRET _Ki386EnableCurrentLargePage
+stdENDP _Ki386EnableCurrentLargePage
+
+INIT ends
+
+_TEXT$00 SEGMENT PARA PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ page , 132
+ subttl "Flush Single Translation Buffer"
+;++
+;
+; VOID
+; FASTCALL
+; KiFlushSingleTb (
+; IN BOOLEAN Invalid,
+; IN PVOID Virtual
+; )
+;
+; Routine Description:
+;
+; This function flushes a single TB entry.
+;
+; It only works on a 486 or greater.
+;
+; Arguments:
+;
+; Invalid - Supplies a boolean value that specifies the reason for
+; flushing the translation buffer.
+;
+; Virtual - Supplies the virtual address of the single entry that is
+; to be flushed from the translation buffer.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicFastCall KiFlushSingleTb ,2
+
+;
+; 486 or above code
+;
+ invlpg [edx]
+ fstRET KiFlushSingleTb
+
+fstENDP KiFlushSingleTb
+
+ page , 132
+ subttl "Swap Process"
+;++
+;
+; VOID
+; KiSwapProcess (
+; IN PKPROCESS NewProcess,
+; IN PKPROCESS OldProcess
+; )
+;
+; Routine Description:
+;
+; This function swaps the address space to another process by flushing
+; the data cache, the instruction cache, the translation buffer, and
+; establishes a new directory table base.
+;
+; It also swaps in the LDT and IOPM of the new process. This is necessary
+; to avoid bogus mismatches in SwapContext.
+;
+; NOTE: keep in sync with process switch part of SwapContext
+;
+; Arguments:
+;
+; Process - Supplies a pointer to a control object of type process.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiSwapProcess ,2
+cPublicFpo 2, 0
+
+ mov edx,[esp]+4 ; (edx)-> New process
+ mov eax,[esp]+8 ; (eax)-> Old Process
+
+;
+; Acquire the context swap lock, clear the processor set member in he old
+; process, set the processor member in the new process, and release the
+; context swap lock.
+;
+
+ifndef NT_UP
+
+ lea ecx,_KiContextSwapLock ; get context swap lock address
+
+sp10: ACQUIRE_SPINLOCK ecx, sp20, NoChecking ; acquire context swap lock
+
+ mov ecx, PCR[PcSetMember]
+ xor [eax]+PrActiveProcessors,ecx ; clear bit in old processor set
+ xor [edx]+PrActiveProcessors,ecx ; set bit in new processor set
+
+if DBG
+
+ test [eax]+PrActiveProcessors,ecx ; test if bit clear in old set
+ jnz kisp_error ; if nz, bit not clear in old set
+ test [edx]+PrActiveProcessors,ecx ; test if bit set in new set
+ jz kisp_error1 ; if z, bit not set in new set
+
+endif
+
+ mov _KiContextSwapLock, 0 ; release context swap lock
+
+endif
+
+ mov ecx,PCR[PcTss] ; (ecx)-> TSS
+
+;
+; Change address space
+;
+
+ xor eax,eax ; assume ldtr is to be NULL
+ mov gs,ax ; Clear gs. (also workarounds
+ ; P6 step B0 errata 11)
+ mov eax,[edx]+PrDirectoryTableBase
+ mov cr3,eax
+ mov [ecx]+TssCR3,eax ; be sure TSS in sync with processor
+
+;
+; Change IOPM
+;
+
+ mov ax,[edx]+PrIopmOffset
+ mov [ecx]+TssIoMapBase,ax
+
+;
+; Change LDT
+;
+
+ xor eax, eax
+ cmp word ptr [edx]+PrLdtDescriptor,ax ; limit 0?
+ jz short kisp10 ; null LDT, go load NULL ldtr
+
+;
+; Edit LDT descriptor
+;
+
+ mov ecx,PCR[PcGdt]
+ mov eax,[edx+PrLdtDescriptor]
+ mov [ecx+KGDT_LDT],eax
+ mov eax,[edx+PrLdtDescriptor+4]
+ mov [ecx+KGDT_LDT+4],eax
+
+;
+; Set up int 21 descriptor of IDT. If the process does not have Ldt, it
+; should never make any int 21 call. If it does, an exception is generated.
+; If the process has Ldt, we need to update int21 entry of LDT for the process.
+; Note the Int21Descriptor of the process may simply indicate an invalid
+; entry. In which case, the int 21 will be trpped to kernel.
+;
+
+ mov ecx, PCR[PcIdt]
+ mov eax, [edx+PrInt21Descriptor]
+ mov [ecx+21h*8], eax
+ mov eax, [edx+PrInt21Descriptor+4]
+ mov [ecx+21h*8+4], eax
+
+ mov eax,KGDT_LDT ;@@32-bit op to avoid prefix
+
+;
+; Load LDTR
+;
+
+kisp10: lldt ax
+ stdRET _KiSwapProcess
+
+;
+; Wait for context swap lock to be released.
+;
+
+ifndef NT_UP
+
+sp20: SPIN_ON_SPINLOCK ecx, sp10 ;
+
+endif
+
+if DBG
+kisp_error1: int 3
+kisp_error: int 3
+endif
+
+stdENDP _KiSwapProcess
+
+ page , 132
+ subttl "Adjust TSS ESP0 value"
+;++
+;
+; VOID
+; KiAdjustEsp0 (
+; IN PKTRAP_FRAME TrapFrame
+; )
+;
+; Routine Description:
+;
+; This routine puts the apropriate ESP0 value in the esp0 field of the
+; TSS. This allows protect mode and V86 mode to use the same stack
+; frame. The ESP0 value for protected mode is 16 bytes lower than
+; for V86 mode to compensate for the missing segement registers.
+;
+; Arguments:
+;
+; TrapFrame - Supplies a pointer to the TrapFrame
+;
+; Return Value:
+;
+; None.
+;
+;--
+cPublicProc _Ki386AdjustEsp0 ,1
+
+ stdCall _KeGetCurrentThread
+
+ mov edx,[esp + 4] ; edx -> trap frame
+ mov eax,[eax]+thInitialStack ; eax = base of stack
+ test dword ptr [edx]+TsEFlags,EFLAGS_V86_MASK ; is this a V86 frame?
+ jnz short ae10
+
+ sub eax,TsV86Gs - TsHardwareSegSS ; compensate for missing regs
+ae10: sub eax,NPX_FRAME_LENGTH
+ pushfd ; Make sure we don't move
+ cli ; processors while we do this
+ mov edx,PCR[PcTss]
+ mov [edx]+TssEsp0,eax ; set Esp0 value
+ popfd
+ stdRET _Ki386AdjustEsp0
+
+stdENDP _Ki386AdjustEsp0
+
+;++
+;
+; NTSTATUS
+; KiSwitchToThread (
+; IN PKTHREAD NextThread,
+; IN ULONG WaitReason,
+; IN ULONG WaitMode,
+; IN PKEVENT WaitObject
+; )
+;
+; Routine Description:
+;
+; This function performs an optimal switch to the specified target thread
+; if possible. No timeout is associated with the wait, thus the issuing
+; thread will wait until the wait event is signaled or an APC is deliverd.
+;
+; N.B. This routine is called with the dispatcher database locked.
+;
+; N.B. The wait IRQL is assumed to be set for the current thread and the
+; wait status is assumed to be set for the target thread.
+;
+; N.B. It is assumed that if a queue is associated with the target thread,
+; then the concurrency count has been incremented.
+;
+; N.B. Control is returned from this function with the dispatcher database
+; unlocked.
+;
+; Arguments:
+;
+; NextThread - Supplies a pointer to a dispatcher object of type thread.
+;
+; WaitReason - supplies the reason for the wait operation.
+;
+; WaitMode - Supplies the processor wait mode.
+;
+; WaitObject - Supplies a pointer to a dispatcher object of type event
+; or semaphore.
+;
+; Return Value:
+;
+; The wait completion status. A value of STATUS_SUCCESS is returned if
+; the specified object satisfied the wait. A value of STATUS_USER_APC is
+; returned if the wait was aborted to deliver a user APC to the current
+; thread.
+;
+;--
+
+NextThread equ 20 ; next thread offset
+WaitReason equ 24 ; wait reason offset
+WaitMode equ 28 ; wait mode offset
+WaitObject equ 32 ; wait object offset
+
+cPublicProc _KiSwitchToThread, 4
+.fpo (4, 4, 0, 0, 1, 0)
+
+;
+; N.B. The following registers MUST be saved such that ebp is saved last.
+; This is done so the debugger can find the saved ebp for a thread
+; that is not currently in the running state.
+;
+
+ sub esp,4*4 ; save registers
+ mov [esp + 12],ebx ;
+ mov [esp + 8],esi ;
+ mov [esp + 4],edi ;
+ mov [esp + 0],ebp ;
+
+;
+; If the target thread's kernel stack is resident, the target thread's
+; process is in the balance set, the target thread can can run on the
+; current processor, and another thread has not already been selected
+; to run on the current processor, then do a direct dispatch to the
+; target thread bypassing all the general wait logic, thread priorities
+; permiting.
+;
+
+ mov esi,[esp] + NextThread ; get target thread address
+ mov ebx,PCR[PcSelfPcr] ; get address of PCR
+ mov ebp,[esi].ThApcState.AsProcess ; get target process address
+ mov edi,[ebx].PcPrcbData.PbCurrentThread ; get current thread address
+ cmp byte ptr [esi].ThKernelStackResident,1 ; check if kernel stack resident
+ jne short LongWay ; if ne, kernel stack not resident
+ cmp byte ptr [ebp].PrState,ProcessInMemory ; check if process in memory
+ jne short LongWay ; if ne, process not in memory
+
+ifndef NT_UP
+
+ cmp dword ptr [ebx].PcPrcbData.PbNextThread,0 ; check if next thread
+ jne short LongWay ; if ne, next thread already selected
+ mov ecx,[esi].ThAffinity ; get target thread affinity
+ test [ebx].PcSetMember,ecx ; check if compatible affinity
+ jz short LongWay ; if z, affinity not compatible
+
+endif
+
+;
+; Compute the new thread priority.
+;
+
+ mov cl,[edi].ThPriority ; get client thread priority
+ mov dl,[esi].ThPriority ; get server thread priority
+ cmp cl,LOW_REALTIME_PRIORITY ; check if realtime client
+ jae short ClientRealtime ; if ae, realtime client thread
+ cmp dl,LOW_REALTIME_PRIORITY ; check if realtime server
+ jae short ServerRealtime ; if ae, realtime server thread
+ cmp byte ptr [esi].ThPriorityDecrement,0 ; check if boost actice
+ jne short BoostActive ; if ne, priority boost already active
+
+;
+; Both the client and the server are not realtime and a priority boost
+; is not currently active for the server. Under these conditions an
+; optimal switch to the server can be performed if the base priority
+; of the server is above a minimum threshold or the boosted priority
+; of the server is not less than the client priority.
+;
+
+ mov al,[esi].ThBasePriority ; get server thread base priority
+ inc al ; compute boosted priority level
+ mov [esi].ThPriority,al ; assume boosted priority is okay
+ cmp al,cl ; check if high enough boost
+ jb short BoostTooLow ; if b, boosted priority less
+ cmp al,LOW_REALTIME_PRIORITY ; check if less than realtime
+ jb short SetProcessor ; if b, boosted priority not realtime
+ dec byte ptr [esi].ThPriority ; reduce priority back to base
+ jmp short SetProcessor ;
+
+;
+; The boosted priority of the server is less than the current priority of
+; the client. If the server base priority is above the required threshold,
+; then a optimal switch to the server can be performed by temporarily
+; raising the priority of the server to that of the client.
+;
+
+BoostTooLow: ;
+ cmp byte ptr [esi].ThBasePriority,BASE_PRIORITY_THRESHOLD ; check if above threshold
+ jb short LongWay ; if b, priority below threshold
+ mov [esi].ThPriority,cl ; set server thread priority
+ sub cl,[esi].ThBasePriority ; compute priority decrement value
+ mov [esi].ThPriorityDecrement,cl ; set priority decrement count value
+ mov byte ptr [esi].ThDecrementCount,ROUND_TRIP_DECREMENT_COUNT ; set count
+ jmp short SetProcessor ;
+
+;
+; A server boost has previously been applied to the server thread. Count
+; down the decrement count to determine if another optimal server switch
+; is allowed.
+;
+
+BoostActive: ;
+ dec byte ptr [esi].ThDecrementCount ; decrement server count value
+ jz short LastSwitch ; if z, no more switches allowed
+
+;
+; Another optimal switch to the server is allowed provided that the
+; server priority is not less than the client priority.
+;
+
+ cmp dl,cl ; check if server higher priority
+ jae short SetProcessor ; if ae, server higher priority
+ jmp short LongWay ;
+
+;
+; The server has exhausted the number of times an optimal switch may
+; be performed without reducing it priority. Reduce the priority of
+; the server to its original unboosted value minus one.
+;
+
+LastSwitch: ;
+ mov byte ptr [esi].ThPriorityDecrement,0 ; clear server decrement
+ mov al,[esi].ThBasePriority ; set server thread priority to base
+ mov [esi].ThPriority,al ;
+
+;
+; Ready the target thread for execution and wait on the specified wait
+; object.
+;
+
+LongWay: ;
+ mov ecx,esi ; set address of server thread
+ fstCall KiReadyThread ; ready thread for execution
+ jmp ContinueWait ;
+
+;
+; The client is realtime. In order for an optimal switch to occur, the
+; server must also be realtime and run at a high or equal priority.
+;
+
+ClientRealtime: ;
+ cmp dl,cl ; check if server lower priority
+ jb short LongWay ; if b, server is lower priority
+
+;
+; The client is not realtime and the server is realtime. An optimal switch
+; to the server can be performed.
+;
+
+ServerRealtime: ;
+ mov al,[ebp].PrThreadQuantum ; set server thread quantum
+ mov [esi].ThQuantum,al ;
+
+;
+; Set the next processor for the server thread.
+;
+
+SetProcessor: ;
+
+ifndef NT_UP
+
+ mov al,[edi].ThNextProcessor ; set server next processor number
+ mov [esi].ThNextProcessor,al ;
+
+endif
+
+;
+; Set the address of the wait block list in the client thread, initialization
+; the event wait block, and insert the wait block in client event wait list.
+;
+
+ mov edx,edi ; compute wait block address
+ add edx,EVENT_WAIT_BLOCK_OFFSET ;
+ mov [edi].ThWaitBlockList,edx ; set address of wait block list
+ mov dword ptr [edi].ThWaitStatus,0 ; set initial wait status
+ mov ecx,[esp] + WaitObject ; get address of wait object
+ mov [edx].WbNextWaitBlock,edx ; set next wait block address
+ mov [edx].WbObject,ecx ; set address of wait object
+ mov dword ptr [edx].WbWaitKey,WaitAny shl 16; set wait key and wait type
+ add ecx,EvWaitListHead ; compute wait object listhead address
+ add edx,WbWaitListEntry ; compute wait block list entry address
+ mov eax,[ecx].LsBlink ; get backward link of listhead
+ mov [ecx].LsBlink,edx ; set backward link of listhead
+ mov [eax].LsFlink,edx ; set forward link in last entry
+ mov [edx].LsFlink,ecx ; set forward link in wait entry
+ mov [edx].LsBlink,eax ; set backward link wait entry
+
+;
+; Set the client thread wait parameters, set the thread state to Waiting,
+; and in the thread in the proper wait queue.
+;
+
+ mov byte ptr [edi].ThAlertable,0 ; set alertable FALSE
+ mov al,[esp] + WaitReason ; set wait reason
+ mov [edi].ThWaitReason,al ;
+ mov al,[esp] + WaitMode ; set wait mode
+ mov [edi].ThWaitMode,al ;
+ mov ecx,_KeTickCount + 0 ; get low part of tick count
+ mov [edi].ThWaitTime,ecx ; set thread wait time
+ mov byte ptr [edi].ThState,Waiting ; set thread state
+ lea edx,_KiWaitInListHead ; get address of wait in listhead
+ cmp al,0 ; check if wait mode is kernel
+ je short Stt10 ; if e, wait mode is kernel
+ cmp byte ptr [edi].ThEnableStackSwap,0 ; check is stack swappable
+ je short Stt10 ; if e, kernel stack swap disabled
+ cmp [edi].ThPriority,LOW_REALTIME_PRIORITY + 9 ; check if priority in range
+ jb short Stt20 ; if b, thread priority in range
+Stt10: lea edx,_KiWaitOutListHead ; get address of wait out listhead
+Stt20: mov eax,[edx].LsBlink ; get backlink of wait listhead
+ mov ecx,edi ; compute list entry address
+ add ecx,ThWaitListEntry ;
+ mov [edx].LsBlink,ecx ; set backlink of wait listhead
+ mov [eax].LsFlink,ecx ; set forward link in last entry
+ mov [ecx].LsFlink,edx ; set forward link in wait entry
+ mov [ecx].LsBlink,eax ; set backward link in wait entry
+
+;
+; If the current thread is processing a queue entry, then attempt to
+; activate another thread that is blocked on the queue object.
+;
+; N.B. The next thread address can change if the routine to activate
+; a queue waiter is called.
+;
+
+ cmp dword ptr [edi].ThQueue,0 ; check if thread processing queue
+ je short Stt30 ; if e, thread not processing queue
+ mov ecx,[edi].ThQueue ; get queue object address
+ mov [ebx].PcPrcbData.PbNextThread,esi ; set next thread address
+ fstCall KiActivateWaiterQueue ; attempt to activate waiter (ecx)
+ mov esi,[ebx].PcPrcbData.PbNextThread ; get next thread address
+ mov dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; get next thread to NULL
+Stt30: mov [ebx].PcPrcbData.PbCurrentThread,esi ; set current thread object address
+ mov cl,1 ; set APC interrupt bypass disable
+ call SwapContext ; swap context to target thread
+
+;
+; Lower IRQL to its previous level.
+;
+; N.B. SwapContext releases the dispatcher database lock.
+;
+; N.B. The register s2 contains the address of the new thread on return.
+;
+
+ mov ebp,[esi].ThWaitStatus ; get wait completion status
+ mov cl,[esi].ThWaitIrql ; get original IRQL
+ fstCall KfLowerIrql ; set new IRQL
+
+;
+; If the wait was not interrupted to deliver a kernel APC, then return the
+; completion status.
+;
+
+ cmp ebp,STATUS_KERNEL_APC ; check if awakened for kernel APC
+ je short KernelApc ; if e, thread awakened for kernel APC
+ mov eax, ebp ; set wait completion status
+ mov ebp,[esp + 0] ; restore registers
+ mov edi,[esp + 4] ;
+ mov esi,[esp + 8] ;
+ mov ebx,[esp + 12] ;
+ add esp,4 * 4 ;
+
+ stdRET _KiSwitchToThread ; return
+
+;
+; Disable interrupts and attempt to acquire the dispatcher database lock.
+;
+
+KernelApc: ;
+
+ifndef NT_UP
+
+ lea ecx,_KiDispatcherLock ; get dispatcher database lock address
+Stt40: cli ; disable interrupts
+ ACQUIRE_SPINLOCK ecx,<short Stt50> ; acquire dispatcher database lock
+
+endif
+
+;
+; Raise IRQL to synchronization level and save wait IRQL.
+;
+
+ mov ecx,SYNCH_LEVEL ; raise IRQL to synchronization level
+ fstCall KfRaiseIrql ;
+ sti ; enable interrupts
+ mov [esi].ThWaitIrql,al ; set wait IRQL
+
+ContinueWait: ;
+ mov eax,[esp] + WaitObject ; get wait object address
+ mov ecx,[esp] + WaitReason ; get wait reason
+ mov edx,[esp] + WaitMode ; get wait mode
+ stdCall _KiContinueClientWait,<eax, ecx, edx> ; continue client wait
+ mov ebp,[esp + 0] ; restore registers
+ mov edi,[esp + 4] ;
+ mov esi,[esp + 8] ;
+ mov ebx,[esp + 12] ;
+ add esp,4 * 4 ;
+
+ stdRET _KiSwitchToThread ; return
+
+;
+; Spin until dispatcher database lock is available.
+;
+
+ifndef NT_UP
+
+Stt50: sti ; enable interrupts
+ SPIN_ON_SPINLOCK ecx,<short Stt40> ; wait for dispatcher database lock
+
+endif
+
+stdENDP _KiSwitchToThread
+
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/cyrix.c b/private/ntos/ke/i386/cyrix.c
new file mode 100644
index 000000000..9cc786ff6
--- /dev/null
+++ b/private/ntos/ke/i386/cyrix.c
@@ -0,0 +1,350 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ cyrix.c
+
+Abstract:
+
+ Detects and initializes Cryix processors
+
+Author:
+
+ Ken Reneris (kenr) 24-Feb-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#define Cx486_SLC 0x0
+#define Cx486_DLC 0x1
+#define Cx486_SLC2 0x2
+#define Cx486_DLC2 0x3
+#define Cx486_SRx 0x4 // Retail Upgrade Cx486SLC
+#define Cx486_DRx 0x5 // Retail Upgrade Cx486DLC
+#define Cx486_SRx2 0x6 // Retail Upgrade 2x Cx486SLC
+#define Cx486_DRx2 0x7 // Retail Upgrade 2x Cx486DLC
+#define Cx486DX 0x1a
+#define Cx486DX2 0x1b
+#define M1 0x30
+
+#define CCR0 0xC0
+#define CCR1 0xC1
+#define CCR2 0xC2
+#define CCR3 0xC3
+
+#define DIR0 0xFE
+#define DIR1 0xFF
+
+
+// SRx & DRx flags
+#define CCR0_NC0 0x01 // No cache 64k @ 1M boundaries
+#define CCR0_NC1 0x02 // No cache 640k - 1M
+#define CCR0_A20M 0x04 // Enables A20M#
+#define CCR0_KEN 0x08 // Enables KEN#
+#define CCR0_FLUSH 0x10 // Enables FLUSH#
+
+// DX flags
+#define CCR1_NO_LOCK 0x10 // Ignore lock prefixes
+
+
+ULONG
+Ke386CyrixId (
+ VOID
+ );
+
+UCHAR
+ReadCyrixRegister (
+ IN UCHAR Register
+ );
+
+VOID
+WriteCyrixRegister (
+ IN UCHAR Register,
+ IN UCHAR Value
+ );
+
+VOID
+Ke386ConfigureCyrixProcessor (
+ VOID
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,Ke386CyrixId)
+#pragma alloc_text(PAGELK,Ke386ConfigureCyrixProcessor)
+#endif
+
+
+extern UCHAR CmpCyrixID[];
+
+
+
+ULONG
+Ke386CyrixId (
+ VOID
+ )
+/*++
+
+Routine Description:
+
+ Detects and returns the Cyrix ID of the processor.
+ This function only detects Cyrix processors which have internal
+ cache support.
+
+Arguments:
+
+ Configure - If TRUE, causes this function to alter
+ the Cyrix CCR registers for the optimal NT
+ performance.
+
+ If FALSE, the processors configuration is
+ not altered.
+
+
+Return Value:
+
+ Cyrix ID of the processor
+ 0 if not a Cyrix processor
+
+--*/
+
+{
+ ULONG CyrixID;
+ UCHAR r3, c;
+ UCHAR flags;
+ PKPRCB Prcb;
+
+ CyrixID = 0;
+
+ Prcb = KeGetCurrentPrcb();
+ if (Prcb->CpuID && strcmp (Prcb->VendorString, CmpCyrixID)) {
+
+ //
+ // Not a Cyrix processor
+ //
+
+ return 0;
+ }
+
+ //
+ // Test Div instruction to see if the flags
+ // do not get altered
+ //
+
+ _asm {
+ xor eax, eax
+ sahf ; flags = ah
+
+ lahf ; ah = flags
+ mov flags, ah ; save flags
+
+ mov eax, 5
+ mov ecx, 2
+ div cl ; 5 / 2 = ?
+
+ lahf
+ sub flags, ah ; flags = orig_flags - new_flags
+ }
+
+ if (flags == 0) {
+
+ //
+ // See if the Cyrix CCR3 register bit 0x80 can be editted.
+ //
+
+ r3 = ReadCyrixRegister(CCR3); // Read CCR3
+ c = r3 ^ 0x80; // flip bit 80
+ WriteCyrixRegister(CCR3, c); // Write CCR3
+ ReadCyrixRegister(CCR0); // select new register
+ c = ReadCyrixRegister(CCR3); // Read new CCR3 value
+
+ if (ReadCyrixRegister(CCR3) != r3) {
+
+ //
+ // Read the Cyrix ID type register
+ //
+
+ CyrixID = ReadCyrixRegister(DIR0) + 1;
+ }
+
+ WriteCyrixRegister(CCR3, r3); // restore original CCR3 value
+ }
+
+ if (CyrixID > 0x7f) {
+ // invalid setting
+ CyrixID = 0;
+ }
+
+ return CyrixID;
+}
+
+static UCHAR
+ReadCyrixRegister (
+ IN UCHAR Register
+ )
+/*++
+
+Routine Description:
+
+ Reads an internal Cyrix ID register. Note the internal register
+ space is accessed via I/O addresses which are hooked internally
+ to the processor.
+
+ The caller is responsible for only calling this function on
+ a Cyrix processor.
+
+Arguments:
+
+ Register - Which Cyrix register to read
+
+Return Value:
+
+ The registers value
+
+--*/
+
+{
+ UCHAR Value;
+
+ _asm {
+ mov al, Register
+ cli
+ out 22h, al
+ in al, 23h
+ sti
+ mov Value, al
+ }
+ return Value;
+}
+
+
+static VOID
+WriteCyrixRegister (
+ IN UCHAR Register,
+ IN UCHAR Value
+ )
+/*++
+
+Routine Description:
+
+ Write an internal Cyrix ID register. Note the internal register
+ space is accessed via I/O addresses which are hooked internally
+ to the processor.
+
+ The caller is responsible for only calling this function on
+ a Cyrix processor.
+
+Arguments:
+
+ Register - Which Cyrix register to written
+ Value - Value to write into the register
+
+Return Value:
+
+ The registers value
+
+--*/
+
+{
+ _asm {
+ mov al, Register
+ mov cl, Value
+ cli
+ out 22h, al
+ mov al, cl
+ out 23h, al
+ sti
+ }
+}
+
+
+VOID
+Ke386ConfigureCyrixProcessor (
+ VOID
+ )
+{
+ UCHAR r0, r1;
+ ULONG id, rev;
+ PVOID LockHandle;
+
+
+ PAGED_CODE();
+
+ id = Ke386CyrixId();
+ if (id) {
+
+ LockHandle = MmLockPagableCodeSection (&Ke386ConfigureCyrixProcessor);
+
+ id = id - 1;
+ rev = ReadCyrixRegister(DIR1);
+
+ if ((id >= 0x20 && id <= 0x27) ||
+ ((id & 0xF0) == M1 && rev < 0x17)) {
+
+ //
+ // These steppings have a write-back cache problem.
+ // On these chips the L1 w/b cache can be disabled by
+ // setting only the NW bit.
+ //
+
+ _asm {
+ cli
+
+ mov eax, cr0
+ or eax, CR0_NW
+ mov cr0, eax
+
+ sti
+ }
+ }
+
+
+ switch (id) {
+ case Cx486_SRx:
+ case Cx486_DRx:
+ case Cx486_SRx2:
+ case Cx486_DRx2:
+
+ //
+ // These processors have an internal cache feature
+ // let's turn it on.
+ //
+
+ r0 = ReadCyrixRegister(CCR0);
+ r0 |= CCR0_NC1 | CCR0_FLUSH;
+ r0 &= ~CCR0_NC0;
+ WriteCyrixRegister(CCR0, r0);
+
+ // Clear Non-Cacheable Region 1
+ WriteCyrixRegister(0xC4, 0);
+ WriteCyrixRegister(0xC5, 0);
+ WriteCyrixRegister(0xC6, 0);
+ break;
+
+ case Cx486DX:
+ case Cx486DX2:
+ //
+ // Set NO_LOCK flag on these processors according to
+ // the number of booted processors
+ //
+
+ r1 = ReadCyrixRegister(CCR1);
+ r1 |= CCR1_NO_LOCK;
+ if (KeNumberProcessors > 1) {
+ r1 &= ~CCR1_NO_LOCK;
+ }
+ WriteCyrixRegister(CCR1, r1);
+ break;
+ }
+
+ MmUnlockPagableImageSection (LockHandle);
+ }
+}
diff --git a/private/ntos/ke/i386/dmpstate.c b/private/ntos/ke/i386/dmpstate.c
new file mode 100644
index 000000000..d0a2f51c8
--- /dev/null
+++ b/private/ntos/ke/i386/dmpstate.c
@@ -0,0 +1,363 @@
+/*++
+
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ dmpstate.c
+
+Abstract:
+
+ This module implements the architecture specific routine that dumps
+ the machine state when a bug check occurs and no debugger is hooked
+ to the system. It is assumed that it is called from bug check.
+
+Author:
+
+ David N. Cutler (davec) 17-Jan-1992
+
+Environment:
+
+ Kernel mode.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+
+BOOLEAN
+KiReadStackValue(
+ IN ULONG Address,
+ OUT PULONG Value
+ );
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ );
+
+//
+// Define external data.
+//
+
+extern ULONG ExBuildVersion;
+extern LIST_ENTRY PsLoadedModuleList;
+
+VOID
+KeDumpMachineState (
+ IN PKPROCESSOR_STATE ProcessorState,
+ IN PCHAR Buffer,
+ IN PULONG BugCheckParameters,
+ IN ULONG NumberOfParameters,
+ IN PKE_BUGCHECK_UNICODE_TO_ANSI UnicodeToAnsiRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function formats and displays the machine state at the time of the
+ to bug check.
+
+Arguments:
+
+ ProcessorState - Supplies a pointer to the processor's state
+
+ Buffer - Supplies a pointer to a buffer to be used to output machine
+ state information.
+
+ BugCheckParameters - Supplies additional bugcheck information
+
+ NumberOfParameters - sizeof BugCheckParameters array
+
+ UnicodeToAnsiRoutine - Supplies a pointer to a routine to convert Unicode strings
+ to Ansi strings without touching paged translation tables.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PLIST_ENTRY ModuleListHead;
+ PLIST_ENTRY Next;
+ ULONG StackAddr;
+ ULONG PossiblePc;
+ ULONG Index, NoLines;
+ ULONG DisplayWidth, DisplayHeight;
+ ULONG CursorColumn, CursorRow;
+ ULONG i, j;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PVOID ImageBase;
+ PKPRCB Prcb;
+ UCHAR AnsiBuffer[ 32 ];
+ ULONG DateStamp;
+
+
+ //
+ // Query display parameters.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &CursorColumn,
+ &CursorRow);
+
+ //
+ // At this point the context record contains the machine state at the
+ // call to bug check.
+ //
+ // Put out the system version and the title line with the PSR and FSR.
+ //
+
+ //
+ // Check to see if any BugCheckParameters are valid code addresses.
+ // If so, print them for the user
+ //
+
+ NoLines = 8;
+ for (i=0; i < NumberOfParameters; i++) {
+ ImageBase = KiPcToFileHeader((PVOID) BugCheckParameters[i], &DataTableEntry);
+ if (ImageBase == NULL) {
+ continue;
+ }
+
+ sprintf (Buffer, "*** Address %08lx has base at %08lx - %-12.12s\n",
+ BugCheckParameters[i], ImageBase,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+ HalDisplayString(Buffer);
+ NoLines++;
+ }
+ Prcb = KeGetCurrentPrcb();
+ if (Prcb->CpuID) {
+ sprintf(Buffer, "\n\nCPUID:%.12s %x.%x.%x",
+ Prcb->VendorString,
+ Prcb->CpuType,
+ Prcb->CpuStep >> 8,
+ Prcb->CpuStep & 0x0f
+ );
+
+ } else {
+ sprintf(Buffer, "\n\np%x-%04x", Prcb->CpuType, Prcb->CpuStep);
+ }
+ HalDisplayString (Buffer);
+
+ sprintf(Buffer, " irql:%x%s SYSVER 0x%08x\n\n",
+ KeGetCurrentIrql(),
+ KeIsExecutingDpc() ? " DPC" : " ",
+ NtBuildNumber);
+ HalDisplayString(Buffer);
+ NoLines += 3;
+
+ //
+ // Dump the loaded module list
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ Next = ModuleListHead->Flink;
+
+ if (Next != NULL) {
+ for (i=0; i < DisplayWidth; i += 40) {
+ HalDisplayString ("Dll Base DateStmp - Name ");
+ }
+ HalDisplayString ("\n");
+ NoLines += 2;
+
+ while (NoLines < DisplayHeight && Next != ModuleListHead) {
+ for (i=0; i < 2 && Next != ModuleListHead; i++) {
+ DataTableEntry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ if (MmDbgReadCheck(DataTableEntry->DllBase) != NULL) {
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase);
+ DateStamp = NtHeaders->FileHeader.TimeDateStamp;
+
+ } else {
+ DateStamp = 0;
+ }
+ sprintf (Buffer, "%08lx %08lx - %-18.18s ",
+ DataTableEntry->DllBase,
+ DateStamp,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString (Buffer);
+ }
+ HalDisplayString ("\n");
+ NoLines++;
+ }
+ HalDisplayString ("\n");
+ }
+
+ //
+ // Dump some of the current stack
+ //
+
+ StackAddr = ProcessorState->ContextFrame.Esp - sizeof(ULONG);
+ j = 0;
+ while (NoLines < DisplayHeight) {
+
+ StackAddr += sizeof(ULONG);
+ if (!KiReadStackValue(StackAddr, &PossiblePc)) {
+ HalDisplayString ("\n");
+ break;
+ }
+
+ ImageBase = KiPcToFileHeader((PVOID) PossiblePc, &DataTableEntry);
+ if (ImageBase == NULL) {
+ continue;
+ }
+
+ if (j == 0) {
+ sprintf(Buffer, "Address dword dump Build [%ld] %25s - Name\n",
+ NtBuildNumber & 0xFFFFFFF,
+ " "
+ );
+
+ HalDisplayString(Buffer);
+ NoLines++;
+ j++;
+ }
+
+ sprintf(Buffer, "%08lx %08lx ", StackAddr, PossiblePc);
+ HalDisplayString (Buffer);
+
+ for (i=0; i < 5; i++) {
+ if (KiReadStackValue(StackAddr+i*sizeof(ULONG), &PossiblePc)) {
+ sprintf (Buffer, "%08lx ", PossiblePc);
+ HalDisplayString (Buffer);
+ } else {
+ HalDisplayString (" ");
+ }
+ }
+
+ sprintf (Buffer, "- %-14.14s\n",
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+ HalDisplayString(Buffer);
+ NoLines++;
+ }
+
+ return;
+}
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base of an image that contains the
+ specified PcValue. An image contains the PcValue if the PcValue
+ is within the ImageBase, and the ImageBase plus the size of the
+ virtual image.
+
+Arguments:
+
+ PcValue - Supplies a PcValue.
+
+ DataTableEntry - Suppies a pointer to a variable that receives the
+ address of the data table entry that describes the image.
+
+Return Value:
+
+ NULL - No image was found that contains the PcValue.
+
+ NON-NULL - Returns the base address of the image that contain the
+ PcValue.
+
+--*/
+
+{
+
+ PLIST_ENTRY ModuleListHead;
+ PLDR_DATA_TABLE_ENTRY Entry;
+ PLIST_ENTRY Next;
+ ULONG Bounds;
+ PVOID ReturnBase, Base;
+
+ //
+ // If the module list has been initialized, then scan the list to
+ // locate the appropriate entry.
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ ReturnBase = NULL;
+ Next = ModuleListHead->Flink;
+ if (Next != NULL) {
+ while (Next != ModuleListHead) {
+ Entry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ Base = Entry->DllBase;
+ Bounds = (ULONG)Base + Entry->SizeOfImage;
+ if ((ULONG)PcValue >= (ULONG)Base && (ULONG)PcValue < Bounds) {
+ *DataTableEntry = Entry;
+ ReturnBase = Base;
+ break;
+ }
+ }
+ }
+
+ return ReturnBase;
+}
+
+BOOLEAN
+KiReadStackValue(
+ IN ULONG Address,
+ OUT PULONG Value
+ )
+/*++
+
+Routine Description:
+
+ This function reads a dword off the current stack.
+
+Arguments:
+
+ Address - Stack address to read
+
+ Value - value of dword at the supplied stack address
+
+Return Value:
+
+ FALSE - Address was out of range
+ TRUE - dword returned
+
+--*/
+{
+ PKPCR Pcr;
+
+ Pcr = KeGetPcr();
+ if (Address > (ULONG) Pcr->NtTib.StackBase ||
+ Address < (ULONG) Pcr->NtTib.StackLimit) {
+ return FALSE;
+ }
+
+ *Value = *((PULONG) Address);
+ return TRUE;
+}
diff --git a/private/ntos/ke/i386/emv86.asm b/private/ntos/ke/i386/emv86.asm
new file mode 100644
index 000000000..e0642c180
--- /dev/null
+++ b/private/ntos/ke/i386/emv86.asm
@@ -0,0 +1,1973 @@
+ title "Vdm Instuction Emulation"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; emv86.asm
+;
+; Abstract:
+;
+; This module contains the routines for emulating instructions and
+; faults from v86 mode.
+;
+; Author:
+;
+; sudeep bharati (sudeepb) 16-Nov-1992
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Notes:
+;
+;
+; Revision History:
+;
+;--
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include i386\mi.inc
+include callconv.inc
+include ..\..\vdm\i386\vdm.inc
+include ..\..\vdm\i386\vdmtb.inc
+ .list
+
+ extrn VdmOpcode0f:proc
+ extrn _DbgPrint:proc
+ extrn _KeI386VdmIoplAllowed:dword
+ extrn _KeI386VirtualIntExtensions:dword
+ EXTRNP _Ki386VdmDispatchIo,5
+ EXTRNP _Ki386VdmDispatchStringIo,8
+ EXTRNP _KiDispatchException,5
+ EXTRNP _Ki386VdmReflectException,1
+ EXTRNP _VdmEndExecution,2
+ extrn VdmDispatchBop:near
+ EXTRNP _VdmPrinterStatus,3
+ EXTRNP _VdmPrinterWriteData, 3
+ EXTRNP _VdmDispatchInterrupts,2
+ EXTRNP _KeBugCheck,1
+ EXTRNP _VdmSkipNpxInstruction,4
+ifdef VDMDBG
+ extrn TraceOpcode:near
+endif
+
+ extrn _ExVdmOpcodeDispatchCounts:dword
+ extrn OpcodeIndex:byte
+ extrn _VdmUserCr0MapIn:byte
+
+; SUPPORT Intel CPU/Non PC/AT machine
+ extrn _VdmFixedStateLinear:dword
+ extrn _KeI386MachineType:dword
+
+ page ,132
+
+ifdef VDMDBG
+%out Debugging version
+endif
+
+; Force assume into place
+
+_PAGE SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+_PAGE ENDS
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+_TEXT$00 ENDS
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+
+;
+; Instruction emulation emulates the following instructions.
+; The emulation affects the noted user mode registers.
+;
+;
+; In V86 mode, the following instruction are emulated in the kernel
+;
+; Registers (E)Flags (E)SP SS CS
+; PUSHF X X
+; POPF X X
+; INTnn X X X
+; INTO X X X
+; IRET X X X
+; CLI X
+; STI X
+;
+;
+; INSB
+; INSW
+; OUTSB
+; OUTSW
+; INBimm
+; INWimm
+; OUTBimm
+; OUTWimm
+; INB
+; INW
+; OUTB
+; OUTW
+;
+; WARNING What do we do about 32 bit io instructions??
+
+
+; OpcodeDispatchV86 - table of routines used to emulate instructions
+; in v86 mode.
+
+ public OpcodeDispatchV86
+dtBEGIN OpcodeDispatchV86,OpcodeInvalidV86
+ dtS VDM_INDEX_0F , Opcode0FV86
+ dtS VDM_INDEX_ESPrefix , OpcodeESPrefixV86
+ dtS VDM_INDEX_CSPrefix , OpcodeCSPrefixV86
+ dtS VDM_INDEX_SSPrefix , OpcodeSSPrefixV86
+ dtS VDM_INDEX_DSPrefix , OpcodeDSPrefixV86
+ dtS VDM_INDEX_FSPrefix , OpcodeFSPrefixV86
+ dtS VDM_INDEX_GSPrefix , OpcodeGSPrefixV86
+ dtS VDM_INDEX_OPER32Prefix , OpcodeOPER32PrefixV86
+ dtS VDM_INDEX_ADDR32Prefix , OpcodeADDR32PrefixV86
+ dtS VDM_INDEX_INSB , OpcodeINSBV86
+ dtS VDM_INDEX_INSW , OpcodeINSWV86
+ dtS VDM_INDEX_OUTSB , OpcodeOUTSBV86
+ dtS VDM_INDEX_OUTSW , OpcodeOUTSWV86
+ dtS VDM_INDEX_PUSHF , OpcodePUSHFV86
+ dtS VDM_INDEX_POPF , OpcodePOPFV86
+ dtS VDM_INDEX_INTnn , OpcodeINTnnV86
+ dtS VDM_INDEX_INTO , OpcodeINTOV86
+ dtS VDM_INDEX_IRET , OpcodeIRETV86
+ dts VDM_INDEX_NPX , OpcodeNPXV86
+ dtS VDM_INDEX_INBimm , OpcodeINBimmV86
+ dtS VDM_INDEX_INWimm , OpcodeINWimmV86
+ dtS VDM_INDEX_OUTBimm , OpcodeOUTBimmV86
+ dtS VDM_INDEX_OUTWimm , OpcodeOUTWimmV86
+ dtS VDM_INDEX_INB , OpcodeINBV86
+ dtS VDM_INDEX_INW , OpcodeINWV86
+ dtS VDM_INDEX_OUTB , OpcodeOUTBV86
+ dtS VDM_INDEX_OUTW , OpcodeOUTWV86
+ dtS VDM_INDEX_LOCKPrefix , OpcodeLOCKPrefixV86
+ dtS VDM_INDEX_REPNEPrefix , OpcodeREPNEPrefixV86
+ dtS VDM_INDEX_REPPrefix , OpcodeREPPrefixV86
+ dtS VDM_INDEX_CLI , OpcodeCLIV86
+ dtS VDM_INDEX_STI , OpcodeSTIV86
+ dtS VDM_INDEX_HLT , OpcodeHLTV86
+dtEND MAX_VDM_INDEX
+
+_DATA ENDS
+
+_PAGE SEGMENT DWORD USE32 PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+ page ,132
+ subttl "Overide Prefix Macro"
+;++
+;
+; Routine Description:
+;
+; This macro generates the code for handling override prefixes
+; The routine name generated is OpcodeXXXXPrefix, where XXXX is
+; the name used in the macro invocation. The code will set the
+; PREFIX_XXXX bit in the Prefix flags.
+;
+; Arguments
+; name = name of prefix
+; esi = address of reg info
+; edx = opcode
+;
+; Returns
+; user mode Eip advanced
+; eax advanced
+; edx contains next byte of opcode
+;
+; NOTE: This routine exits by dispatching through the table again.
+;--
+opPrefix macro name
+ public Opcode&name&PrefixV86
+Opcode&name&PrefixV86 proc
+
+ or ebx,PREFIX_&name
+
+
+ifdef VDMDBG
+_DATA segment
+Msg&name&Prefix db 'NTVDM: Encountered override prefix &name& %lx at '
+ db 'address %lx', 0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:Msg&name&Prefix
+ call _DbgPrint
+ add esp,12
+
+endif
+
+ jmp OpcodeGenericPrefixV86 ; dispatch to next handler
+
+Opcode&name&PrefixV86 endp
+endm
+
+irp prefix, <ES, CS, SS, DS, FS, GS, OPER32, ADDR32, LOCK, REPNE, REP>
+
+ opPrefix prefix
+
+endm
+
+ page ,132
+ subttl "Instruction Emulation Dispatcher for V86"
+;++
+;
+; Routine Description:
+;
+; This routine dispatches to the opcode specific emulation routine,
+; based on the first byte of the opcode. Two byte opcodes, and prefixes
+; result in another level of dispatching, from the handling routine.
+;
+; Arguments:
+;
+; ebp = pointer to trap frame
+;
+; Returns:
+;
+; EAX = 0 failure
+; 1 success
+
+cPublicProc _Ki386DispatchOpcodeV86,0
+
+ifdef VDMDBG
+ push 0
+ call TraceOpcode
+endif
+
+ mov esi,[ebp].TsSegCs
+ shl esi,4
+ add esi,[ebp].TsEip
+ movzx ecx, byte ptr [esi]
+ movzx edx, OpcodeIndex[ecx] ;get opcode index
+
+ mov edi,1
+ xor ebx,ebx
+
+ ; All handler routines will get the following on entry
+ ; ebx -> prefix flags
+ ; ebp -> trap frame
+ ; cl -> byte at the faulting address
+ ; interrupts enabled and Irql at APC level
+ ; esi -> address of faulting instruction
+ ; edi -> instruction length count
+ ; All handler routines return
+ ; EAX = 0 for failure
+ ; EAX = 1 for success
+
+if DEVL
+ inc _ExVdmOpcodeDispatchCounts[edx * type _ExVdmOpcodeDispatchCounts]
+endif
+ jmp dword ptr OpcodeDispatchV86[edx * type OpcodeDispatchV86]
+
+stdENDP _Ki386DispatchOpcodeV86
+
+
+ page ,132
+ subttl "Invalid Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an invalid opcode. It prints the invalid
+; opcode message, and causes a GP fault to be reflected to the
+; debuger
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeInvalidV86
+OpcodeInvalidV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgInvalidOpcode db 'NTVDM: An invalid opcode %lx was encountered at '
+ db 'address %x:%x',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ and ecx,0ffh
+ push ecx
+ push offset FLAT:MsgInvalidOpcode
+ call _DbgPrint ; display invalid opcode message
+ add esp,16
+endif
+
+ xor eax,eax ; ret fail
+ ret
+
+OpcodeInvalidV86 endp
+
+
+ page ,132
+ subttl "Generic Prefix Handler"
+;++
+;
+; Routine Description:
+;
+; This routine handles the generic portion of all of the prefixes,
+; and dispatches the next byte of the opcode.
+;
+; Arguments:
+;
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeGenericPrefixV86
+OpcodeGenericPrefixV86 proc
+
+ inc esi
+ inc edi
+ movzx ecx, byte ptr [esi]
+ movzx edx, OpcodeIndex[ecx] ;get opcode index
+if DEVL
+ inc _ExVdmOpcodeDispatchCounts[edx * type _ExVdmOpcodeDispatchCounts]
+endif
+ jmp OpcodeDispatchV86[edx * type OpcodeDispatchV86]
+
+OpcodeGenericPrefixV86 endp
+
+
+ page ,132
+ subttl "Byte string in Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INSB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+; WARNING size override? ds override?
+
+ public OpcodeINSBV86
+OpcodeINSBV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINSBOpcode db 'NTVDM: An INSB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push ebx
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINSBOpcode
+ call _DbgPrint ; display INSB opcode message
+ add esp,12
+ pop ebx
+
+endif
+ push ebp ; trap frame
+ push edi ; size of insb
+ movzx eax,word ptr [ebp].TsV86Es
+ shl eax,16
+ movzx ecx,word ptr [ebp].TsEdi
+ or eax,ecx
+ push eax ; address
+ mov eax,1
+ xor ecx, ecx
+ test ebx,PREFIX_REP ; prefixREP
+ jz oisb20
+
+ mov ecx, 1
+ movzx eax,word ptr [ebp].TsEcx
+oisb20:
+ push eax ; number of io ops
+ push TRUE ; read op
+ push ecx ; REP prefix ?
+ push 1 ; byte op
+ movzx eax,word ptr [ebp].TsEdx
+ push eax ; port number
+
+ ; Ki386VdmDispatchStringIo enables interrupts
+IFDEF STD_CALL
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+ELSE
+ call _Ki386VdmDispatchStringIo ; use retval
+ add esp,24
+ENDIF
+ ret
+
+OpcodeINSBV86 endp
+
+ page ,132
+ subttl "Word String In Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INSW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+;
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINSWV86
+OpcodeINSWV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINSWOpcode db 'NTVDM: An INSW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push ebx
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINSWOpcode
+ call _DbgPrint ; display INSW opcode message
+ add esp,12
+ pop ebx
+endif
+
+ push ebp ; trap frame
+ push edi ; size of insw
+ movzx eax,word ptr [ebp].TsV86Es
+ shl eax,16
+ movzx ecx,word ptr [ebp].TsEdi
+ or eax,ecx
+ push eax ; address
+ mov eax,1
+ xor ecx, ecx
+ test ebx,PREFIX_REP ; prefixREP
+ jz oisw20
+
+ mov ecx, 1
+ movzx eax,word ptr [ebp].TsEcx
+oisw20:
+ push eax ; number of io ops
+ push TRUE ; read op
+ push ecx ; REP prefix ?
+ push 2 ; word op
+ movzx eax,word ptr [ebp].TsEdx
+ push eax ; port number
+
+ ; Ki386VdmDispatchStringIo enables interrupts
+IFDEF STD_CALL
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+ELSE
+ call _Ki386VdmDispatchStringIo ; use retval
+ add esp,24
+ENDIF
+ ret
+
+OpcodeINSWV86 endp
+
+ page ,132
+ subttl "Byte String Out Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTSB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+;
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeOUTSBV86
+OpcodeOUTSBV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTSBOpcode db 'NTVDM: An OUTSB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push ebx
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTSBOpcode
+ call _DbgPrint ; display OUTSB opcode message
+ add esp,12
+ pop ebx
+endif
+
+ push ebp ; trap frame
+ push edi ; size of outsb
+ movzx eax,word ptr [ebp].TsV86Ds
+ shl eax,16
+ movzx ecx,word ptr [ebp].TsEsi
+ or eax,ecx
+ push eax ; address
+ mov eax,1
+ xor ecx, ecx
+ test ebx,PREFIX_REP ; prefixREP
+ jz oosb20
+
+ mov ecx, 1
+ movzx eax,word ptr [ebp].TsEcx
+oosb20:
+ push eax ; number of io ops
+ push FALSE ; write op
+ push ecx ; REP prefix ?
+ push 1 ; byte op
+ movzx eax,word ptr [ebp].TsEdx
+ push eax ; port number
+
+ ; Ki386VdmDispatchStringIo enables interrupts
+IFDEF STD_CALL
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+ELSE
+ call _Ki386VdmDispatchStringIo ; use retval
+ add esp,24
+ENDIF
+ ret
+
+OpcodeOUTSBV86 endp
+
+ page ,132
+ subttl "Word String Out Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTSW opcode. Currently, it prints
+; a message, and ignores the instruction
+;
+; Arguments:
+;
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeOUTSWV86
+OpcodeOUTSWV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTSWOpcode db 'NTVDM: An OUTSW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push ebx
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTSWOpcode
+ call _DbgPrint ; display OUTSW opcode message
+ add esp,12
+ pop ebx
+endif
+
+ push ebp ; trap frame
+ push edi ; size of outsw
+ movzx eax,word ptr [ebp].TsV86Ds
+ shl eax,16
+ movzx ecx,word ptr [ebp].TsEsi
+ or eax,ecx
+ push eax ; address
+
+ mov eax,1
+ xor ecx, ecx
+ test ebx,PREFIX_REP ; prefixREP
+ jz oosw20
+
+ mov ecx, 1
+ movzx eax,word ptr [ebp].TsEcx
+oosw20:
+ push eax ; number of io ops
+ push FALSE ; write op
+ push ecx ; REP prefix ?
+ push 2 ; word op
+ movzx eax,word ptr [ebp].TsEdx
+ push eax ; port number
+
+ ; Ki386VdmDispatchStringIo enables interrupts
+IFDEF STD_CALL
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+ELSE
+ call _Ki386VdmDispatchStringIo ; use retval
+ add esp,24
+ENDIF
+ ret
+
+OpcodeOUTSWV86 endp
+
+ page ,132
+ subttl "PUSHF Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an PUSHF opcode. Currently, it prints
+; a message, and simulates the instruction.
+;
+; Get SS
+; shift left 4
+; get SP
+; subtract 2
+; get flags
+; put in virtual interrupt flag
+; put on stack
+; update sp
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+ public OpcodePUSHFV86
+OpcodePUSHFV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgPUSHFOpcode db 'NTVDM: An PUSHF opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+
+ push offset FLAT:MsgPUSHFOpcode
+ call _DbgPrint ; display PUSHF opcode message
+ add esp,12
+ pop eax
+endif
+
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ mov eax, dword ptr [eax] ; get virtual int flag
+ and eax,VDM_VIRTUAL_INTERRUPTS OR VDM_VIRTUAL_AC OR VDM_VIRTUAL_NT
+ mov edx,dword ptr [ebp].TsEFlags
+ and edx,NOT EFLAGS_INTERRUPT_MASK
+ or eax,edx
+ or eax,EFLAGS_IOPL_MASK
+ movzx ecx,word ptr [ebp].TsHardwareSegSS
+ shl ecx,4
+ movzx edx,word ptr [ebp].TsHardwareEsp
+ sub dx,2
+
+ test ebx,PREFIX_OPER32 ; check operand size
+ jnz puf10
+
+ mov [ecx + edx],ax
+puf05:
+ mov word ptr [ebp].TsHardwareEsp,dx ; update client esp
+ add dword ptr [ebp].TsEip,edi
+
+ mov eax,1
+ ret
+
+puf10: sub dx,2
+ mov [ecx + edx],eax
+ jmp puf05
+
+OpcodePUSHFV86 endp
+
+ page ,132
+ subttl "POPF Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an POPF opcode. Currently, it prints
+; a message, and returns to the monitor.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodePOPFV86
+OpcodePOPFV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgPOPFOpcode db 'NTVDM: An POPF opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push ebx
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgPOPFOpcode
+ call _DbgPrint ; display POPF opcode message
+ add esp,12
+ pop ebx
+ pop eax
+endif
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ mov ecx,[ebp].TsHardwareSegSS
+ shl ecx,4
+ movzx edx,word ptr [ebp].TsHardwareEsp
+ mov ecx,[ecx + edx] ; get flags from stack
+ add edx,4
+ test ebx,PREFIX_OPER32 ; check operand size
+ jnz pof10
+ and ecx,0ffffh
+ sub edx,2
+pof10:
+ mov [ebp].TsHardwareEsp,edx
+ and ecx, NOT EFLAGS_IOPL_MASK
+ mov ebx,ecx
+ and ebx, NOT EFLAGS_NT_MASK
+
+ test _KeI386VirtualIntExtensions, dword ptr V86_VIRTUAL_INT_EXTENSIONS
+ jz short pof15
+ or ebx, EFLAGS_VIF
+ test ebx, EFLAGS_INTERRUPT_MASK
+ jnz pof15
+ and ebx, NOT EFLAGS_VIF
+
+pof15:
+ or ebx, (EFLAGS_INTERRUPT_MASK OR EFLAGS_V86_MASK)
+ mov [ebp].TsEFlags,ebx
+ and ecx, (EFLAGS_INTERRUPT_MASK OR EFLAGS_ALIGN_CHECK OR EFLAGS_NT_MASK)
+ MPLOCK and [eax],NOT (EFLAGS_INTERRUPT_MASK OR EFLAGS_ALIGN_CHECK OR EFLAGS_NT_MASK)
+ MPLOCK or [eax],ecx
+ add dword ptr [ebp].TsEip,edi
+
+ mov eax,dword ptr [eax]
+ test eax,VDM_INTERRUPT_PENDING
+ jz pof25
+
+ test eax,VDM_VIRTUAL_INTERRUPTS
+ jz pof25
+
+ call VdmDispatchIntAck
+
+pof25:
+ mov eax,1 ; handled
+ ret
+OpcodePOPFV86 endp
+
+ page ,132
+ subttl "INTnn Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INTnn opcode. It retrieves the handler
+; from the IVT, pushes the current cs:ip and flags on the stack,
+; and dispatches to the handler.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINTnnV86
+OpcodeINTnnV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINTnnOpcode db 'NTVDM: An INTnn opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+
+ push offset FLAT:MsgINTnnOpcode
+ call _DbgPrint ; display INTnn opcode message
+ add esp,12
+ pop eax
+endif
+
+;
+; Int nn in v86 mode always disables interrupts
+;
+
+ mov edx,[ebp].TsEflags
+;
+; If KeI386VdmIoplAllowed is true, direct IF manipulation is allowed
+;
+ test _KeI386VdmIoplAllowed,1
+ jz oinnv10
+
+ mov eax,edx ; save original flags
+ and edx,NOT EFLAGS_INTERRUPT_MASK
+ jmp oinnv20
+
+;
+; Else, IF and some other flags bits are virtualized
+;
+oinnv10:
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ mov ecx,dword ptr [eax]
+ MPLOCK and [eax],NOT VDM_VIRTUAL_INTERRUPTS
+ mov eax,ecx
+ and eax,VDM_VIRTUAL_INTERRUPTS OR VDM_VIRTUAL_AC
+ and edx,NOT EFLAGS_INTERRUPT_MASK
+ or eax,edx
+ or edx, EFLAGS_INTERRUPT_MASK
+
+oinnv20:
+ and edx,NOT (EFLAGS_NT_MASK OR EFLAGS_TF_MASK)
+ mov [ebp].TsEflags,edx
+ or eax, EFLAGS_IOPL_MASK
+ movzx ecx,word ptr [ebp].TsHardwareSegSS
+ shl ecx,4
+ movzx edx,word ptr [ebp].TsHardwareEsp ; ecx+edx is user stack
+ sub dx,2
+ mov word ptr [ecx+edx],ax ; push flags
+ mov ax,word ptr [ebp].TsSegCS
+ sub dx,2
+ mov word ptr [ecx+edx],ax ; push cs
+ movzx eax,word ptr [ebp].TsEip
+ add eax, edi
+ inc eax
+ sub dx,2
+ mov word ptr [ecx+edx],ax ; push ip
+ mov [ebp].TsHardwareEsp,dx ; update sp on trap frame
+
+ inc esi
+ movzx ecx,byte ptr [esi] ; ecx is int#
+ mov ebx,[ecx*4]
+ mov word ptr [ebp].TsEip,bx
+ shr ebx,16 ; ebx+edx is the handler add
+ mov [ebp].TsSegCs,bx ; cs:ip on trap frame is updated
+
+ mov eax,1
+ ret
+
+OpcodeINTnnV86 endp
+
+ page ,132
+ subttl "INTO Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INTO opcode. Currently, it prints
+; a message, and reflects a GP fault to the debugger.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINTOV86
+OpcodeINTOV86 proc
+
+
+ifdef VDMDBG
+_DATA segment
+MsgINTOOpcode db 'NTVDM: An INTO opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+
+ push offset FLAT:MsgINTOOpcode
+ call _DbgPrint ; display INTO opcode message
+ add esp,12
+endif
+ xor eax,eax ; ret fail
+ ret
+
+OpcodeINTOV86 endp
+
+ page ,132
+ subttl "IRET Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an IRET opcode. It retrieves the flags,
+; and new instruction pointer from the stack and puts them into
+; the user context.
+;
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeIRETV86
+OpcodeIRETV86 proc
+
+
+ifdef VDMDBG
+_DATA segment
+MsgIRETOpcode db 'NTVDM: An IRET opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+
+ push offset FLAT:MsgIRETOpcode
+ call _DbgPrint ; display IRET opcode message
+ add esp,12
+ pop eax
+endif
+
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ movzx ecx,word ptr [ebp].TsHardwareSegSS
+ shl ecx,4
+ movzx edx,word ptr [ebp].TsHardwareEsp ; ebx+edx is user stack
+ add ecx,edx
+ test ebx,PREFIX_OPER32
+ jnz irt50 ; normally not
+
+ movzx edi,word ptr [ecx] ; get ip value
+ mov [ebp].TsEip,edi
+ movzx esi,word ptr [ecx+2] ; get cs value
+ mov [ebp].TsSegCs,esi
+ add edx,6
+ mov [ebp].TsHardwareEsp,edx ; update sp on trap frame
+ movzx ebx,word ptr [ecx+4] ; get flag value
+
+irt10:
+ and ebx, NOT (EFLAGS_IOPL_MASK OR EFLAGS_NT_MASK)
+ mov ecx,ebx
+
+ test _KeI386VirtualIntExtensions, dword ptr V86_VIRTUAL_INT_EXTENSIONS
+ jz short irt15
+ or ebx, EFLAGS_VIF
+ test ebx, EFLAGS_INTERRUPT_MASK
+ jnz irt15
+ and ebx, NOT EFLAGS_VIF
+
+irt15:
+ or ebx, (EFLAGS_V86_MASK OR EFLAGS_INTERRUPT_MASK)
+ mov [ebp].TsEFlags,ebx ; update flags n trap frame
+ and ecx, EFLAGS_INTERRUPT_MASK
+ MPLOCK and [eax],NOT VDM_VIRTUAL_INTERRUPTS
+ MPLOCK or [eax],ecx
+ mov ebx,[eax]
+
+
+ ; at this point esi is the cs and edi is the ip where v86 mode
+ ; will return. Now we will check if this returning instruction
+ ; is a bop. if so we will directly dispatch the bop from here
+ ; saving a full round trip. This will be really helpful to
+ ; com apps.
+
+ shl esi,4
+ add esi,edi
+ mov ax, word ptr [esi]
+ cmp ax, 0c4c4h
+ je irtbop
+
+ test ebx,VDM_INTERRUPT_PENDING
+ jz short irt25
+
+ test ebx,VDM_VIRTUAL_INTERRUPTS
+ jz short irt25
+
+ call VdmDispatchIntAck ; VdmDispatchIntAck enables interrupts
+
+irt25:
+ mov eax,1 ; handled
+ ret
+
+ ; ireting to a bop
+irtbop:
+ call VdmDispatchBop ; this expects ebp to be trap frame
+ jmp short irt25
+
+irt50:
+ mov edi, [ecx] ; get ip value
+ mov [ebp].TsEip,edi
+ movzx esi,word ptr [ecx+4] ; get cs value
+ mov [ebp].TsSegCs,esi
+ add edx,12
+ mov [ebp].TsHardwareEsp,edx ; update sp on trap frame
+ mov ebx, [ecx+8] ; get flag value
+ jmp irt10 ; rejoin the common path
+
+OpcodeIRETV86 endp
+
+
+ page ,132
+ subttl "In Byte Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an in byte immediate opcode. Currently, it
+; prints a message, and ignores the instruction.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINBimmV86
+OpcodeINBimmV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINBimmOpcode db 'NTVDM: An INBimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINBimmOpcode
+ call _DbgPrint ; display INBimm opcode message
+ add esp,12
+endif
+
+ inc esi
+ inc edi
+ movzx ecx,byte ptr [esi]
+
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ecx, 1, TRUE, edi, ebp>
+ ret
+
+OpcodeINBimmV86 endp
+
+ page ,132
+ subttl "Word In Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an in word immediate opcode. Currently, it
+; prints a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINWimmV86
+OpcodeINWimmV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINWimmOpcode db 'NTVDM: An INWimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINWimmOpcode
+ call _DbgPrint ; display INWimm opcode message
+ add esp,12
+endif
+
+ inc esi
+ inc edi
+ movzx ecx,byte ptr [esi]
+; edi - instruction size
+; TRUE - read op
+; 2 - word op
+; ecx - port number
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ecx, 2, TRUE, edi, ebp>
+
+ ret
+
+OpcodeINWimmV86 endp
+
+ page ,132
+ subttl "Out Byte Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an invalid opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeOUTBimmV86
+OpcodeOUTBimmV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTBimmOpcode db 'NTVDM: An OUTBimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTBimmOpcode
+ call _DbgPrint ; display OUTBimm opcode message
+ add esp,12
+endif
+
+
+ inc edi
+ inc esi
+ movzx ecx,byte ptr [esi]
+; edi - instruction size
+; FALSE - write op
+; 1 - byte op
+; ecx - port #
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ecx, 1, FALSE, edi, ebp>
+
+ ret
+
+OpcodeOUTBimmV86 endp
+
+ page ,132
+ subttl "Out Word Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an out word immediate opcode. Currently,
+; it prints a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeOUTWimmV86
+OpcodeOUTWimmV86 proc
+
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTWimmOpcode db 'NTVDM: An OUTWimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTWimmOpcode
+ call _DbgPrint ; display OUTWimm opcode message
+ add esp,12
+endif
+
+
+ inc esi
+ inc edi
+ movzx ecx,byte ptr [esi]
+; edi - instruction size
+; FALSE - write op
+; 2 - word op
+; ecx - port number
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ecx, 2, FALSE, edi, ebp>
+
+ ret
+
+OpcodeOUTWimmV86 endp
+
+ page ,132
+ subttl "INB Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeINBV86
+OpcodeINBV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINBOpcode db 'NTVDM: An INB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINBOpcode
+ call _DbgPrint ; display INB opcode message
+ add esp,12
+endif
+
+ movzx ebx,word ptr [ebp].TsEdx
+
+
+; JAPAN - SUPPORT Intel CPU/Non PC/AT compatible machine
+; Get Hardware Id, PC_AT_COMPATIBLE is 0x00XX
+ test _KeI386MachineType, MACHINE_TYPE_MASK
+ jnz oib_reflect
+
+; edi - instruction size
+; TRUE - read op
+; 1 - byte op
+; ebx - port number
+
+ cmp ebx, 3bdh
+ jz oib_prt1
+ cmp ebx, 379h
+ jz oib_prt1
+ cmp ebx, 279h
+ jz oib_prt1
+
+oib_reflect:
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ebx, 1, TRUE, edi, ebp>
+oib_com:
+ ret
+
+oib_prt1:
+ ; call printer status routine with port number, size, trap frame
+ stdCall _VdmPrinterStatus, <ebx, edi, ebp>
+ or al,al
+ jz short oib_reflect
+ jmp short oib_com
+
+OpcodeINBV86 endp
+
+ page ,132
+ subttl "INW Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeINWV86
+OpcodeINWV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINWOpcode db 'NTVDM: An INW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINWOpcode
+ call _DbgPrint ; display INW opcode message
+ add esp,12
+endif
+
+ movzx ebx,word ptr [ebp].TsEdx
+
+; edi - instruction size
+; TRUE - read operation
+; 2 - word op
+; ebx - port number
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ebx, 2, TRUE, edi, ebp>
+
+ ret
+OpcodeINWV86 endp
+
+ page ,132
+ subttl "OUTB Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeOUTBV86
+OpcodeOUTBV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTBOpcode db 'NTVDM: An OUTB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTBOpcode
+ call _DbgPrint ; display OUTB opcode message
+ add esp,12
+endif
+
+ movzx ebx,word ptr [ebp].TsEdx
+
+ cmp ebx, 3bch
+ jz oob_prt1
+ cmp ebx, 378h
+ jz oob_prt1
+ cmp ebx, 278h
+ jz oob_prt1
+
+oob_reflect:
+
+; edi - instruction size
+; FALSE - write op
+; 1 - byte op
+; ebx - port number
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ebx, 1, FALSE, edi, ebp>
+
+ ret
+oob_prt1:
+ ; call printer write data routine with port number, size, trap frame
+ stdCall _VdmPrinterWriteData, <ebx, edi, ebp>
+ or al,al
+ jz short oob_reflect
+ ;al already has TRUE
+ ret
+OpcodeOUTBV86 endp
+
+ page ,132
+ subttl "OUTW Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeOUTWV86
+OpcodeOUTWV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTWOpcode db 'NTVDM: An OUTW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTWOpcode
+ call _DbgPrint ; display OUTW opcode message
+ add esp,12
+endif
+
+
+ movzx ebx,word ptr [ebp].TsEdx
+; edi - instruction size
+; FALSE - write op
+; 2 - word op
+; ebx - port #
+ ; Ki386VdmDispatchIo enables interrupts
+ stdCall _Ki386VdmDispatchIo, <ebx, 2, FALSE, edi, ebp>
+
+ ret
+
+OpcodeOUTWV86 endp
+
+
+ page ,132
+ subttl "CLI Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an CLI opcode. Currently, it prints
+; a message, and clears the virtual interrupt flag in the VdmTeb.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeCLIV86
+OpcodeCLIV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgCLIOpcode db 'NTVDM: An CLI opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgCLIOpcode
+ call _DbgPrint ; display CLI opcode message
+ add esp,12
+ pop eax
+endif
+
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ MPLOCK and dword ptr [eax],NOT VDM_VIRTUAL_INTERRUPTS
+ add dword ptr [ebp].TsEip,edi
+
+ mov eax,1
+ ret
+
+OpcodeCLIV86 endp
+
+ page ,132
+ subttl "STI Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an STI opcode. Currently, it prints
+; a message, and sets the virtual interrupt flag in the VDM teb.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+;
+
+ public OpcodeSTIV86
+OpcodeSTIV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgSTIOpcode db 'NTVDM: An STI opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgSTIOpcode
+ call _DbgPrint ; display STI opcode message
+ add esp,12
+ pop eax
+endif
+
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ test _KeI386VirtualIntExtensions, dword ptr V86_VIRTUAL_INT_EXTENSIONS
+ jz short os10
+
+ or [ebp].TsEFlags, dword ptr EFLAGS_VIF
+
+os10: MPLOCK or dword ptr [eax],EFLAGS_INTERRUPT_MASK
+os20: add dword ptr [ebp].TsEip,edi
+ mov eax,dword ptr [eax]
+ test eax,VDM_INTERRUPT_PENDING
+ jz short os30
+
+ call VdmDispatchIntAck
+os30: mov eax,1
+ ret
+
+OpcodeSTIV86 endp
+
+
+;
+; If we get here, we have executed an NPX instruction in user mode
+; with the emulator installed. If the EM bit was not set in CR0, the
+; app really wanted to execute the instruction for detection purposes.
+; In this case, we need to clear the TS bit, and restart the instruction.
+; Otherwise we need to reflect the exception
+;
+;
+; Reginfo structure
+;
+
+ public Opcode0FV86
+Opcode0FV86 proc
+
+RI equ [ebp - REGINFOSIZE]
+
+ push ebp
+ mov ebp,esp
+ sub esp,REGINFOSIZE
+ push esi
+ push edi
+
+
+ ; Initialize RegInfo
+do10: mov esi,[ebp]
+
+
+ ; initialize rest of the trap from which was'nt initialized for
+ ; v86 mode
+ mov eax, [esi].TsV86Es
+ mov [esi].TsSegEs,eax
+ mov eax, [esi].TsV86Ds
+ mov [esi].TsSegDs,eax
+ mov eax, [esi].TsV86Fs
+ mov [esi].TsSegFs,eax
+ mov eax, [esi].TsV86Gs
+ mov [esi].TsSegGs,eax
+
+ mov RI.RiTrapFrame,esi
+ mov eax,[esi].TsHardwareSegSs
+ mov RI.RiSegSs,eax
+ mov eax,[esi].TsHardwareEsp
+ mov RI.RiEsp,eax
+ mov eax,[esi].TsEFlags
+ mov RI.RiEFlags,eax
+ mov eax,[esi].TsSegCs
+ mov RI.RiSegCs,eax
+ mov eax,[esi].TsEip
+ dec edi
+ add eax,edi ; for prefixes
+ mov RI.RiEip,eax
+
+ mov RI.RiPrefixFlags,ebx
+ lea esi,RI
+
+ CsToLinearV86
+ call VdmOpcode0f ; enables interrupts
+
+ test eax,0FFFFh
+ jz do20
+
+ mov edi,RI.RiTrapFrame
+ mov eax,RI.RiEip ; advance eip
+ mov [edi].TsEip,eax
+do19: mov eax,1
+do20:
+ pop edi
+ pop esi
+ mov esp,ebp
+ pop ebp
+ ret
+
+Opcode0FV86 endp
+
+
+;++
+;
+; Routine Description: VdmDispatchIntAck
+; pushes stack arguments for VdmDispatchInterrupts
+; and invokes VdmDispatchInterrupts
+;
+; Expects VDM_INTERRUPT_PENDING, and VDM_VIRTUAL_INTERRUPTS
+;
+; Arguments:
+; EBP -> trap frame
+;
+; Returns:
+; nothing
+;
+;
+ public VdmDispatchIntAck
+VdmDispatchIntAck proc
+
+ mov eax,_VdmFixedStateLinear ; get pointer to VDM State
+ test [eax],VDM_INT_HARDWARE ; check for hardware int
+ mov eax, PCR[PcTeb]
+ mov eax,[eax].TbVdm ; get pointer to VdmTib
+ jz short dia20
+
+ ;
+ ; dispatch hardware int directly from kernel
+ ;
+ stdCall _VdmDispatchInterrupts, <ebp, eax> ; TrapFrame, VdmTib
+dia10:
+ ret
+
+
+ ;
+ ; Switch to monitor context to dispatch timer int
+ ;
+dia20:
+ mov dword ptr [eax].VtEIEvent,VdmIntAck
+ mov dword ptr [eax].VtEIInstSize,0
+ mov dword ptr [eax].VtEiIntAckInfo,0
+ stdCall _VdmEndExecution, <ebp, eax> ; TrapFrame, VdmTib
+ jmp short dia10
+
+VdmDispatchIntAck endp
+
+
+ public vdmDebugPoint
+vdmDebugPoint proc
+ ret
+vdmDebugPoint endp
+
+
+
+ page ,132
+ subttl "HLT Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an HLT opcode. If the halt instruction is
+; followed by the magic number (to be found in a crackerjack box),
+; we use the hlt + magic number as a prefix, and emulate the following
+; instruction. This allows code running in segmented protected mode to
+; access the virtual interrupt flag.
+;
+; Arguments:
+; EAX -> pointer to vdm state in DOS arena
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; interrupts disabled
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+;
+
+ public OpcodeHLTV86
+OpcodeHLTV86 proc
+
+ifdef VDMDBG
+_DATA segment
+MsgHLTOpcode db 'NTVDM: An HLT opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push eax
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgHLTOpcode
+ call _DbgPrint ; display HLT opcode message
+ add esp,12
+ pop eax
+endif
+
+ add dword ptr [ebp].TsEip,edi
+ mov eax,1
+ ret
+
+OpcodeHLTV86 endp
+
+_PAGE ends
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ subttl "NPX Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates all NPX opcodes, when the system
+; has the R3 emulator installed and the c86 apps takes a
+; trap07.
+;
+; Arguments:
+; EBX -> prefix flags
+; EBP -> trap frame
+; CL -> byte at the faulting address
+; ESI -> address of faulting instruction
+; EDI -> instruction length count
+;
+; Returns:
+; EAX = 0 for failure
+; EAX = 1 for success
+;
+; All registers can be trashed except ebp/esp.
+; moved from emv86.asm as it must be non-pagable
+
+ public OpcodeNPXV86
+OpcodeNPXV86 proc
+ mov edx, PCR[PcInitialStack]
+ mov edx, [edx].FpCr0NpxState
+ test edx, CR0_EM ; Does app want NPX traps?
+ jnz short onp40
+
+ ; MP bit can never be set while the EM bit is cleared, so we know
+ ; the faulting instruction is not an FWAIT
+
+onp30: and ebx, PREFIX_ADDR32
+ stdCall _VdmSkipNpxInstruction, <ebp, ebx, esi, edi>
+ or al, al ; was it handled?
+ jnz short onp60 ; no, go raise exception to app
+
+onp40: stdCall _Ki386VdmReflectException, <7> ; trap #
+
+onp60: mov eax,1
+ ret
+
+
+OpcodeNPXV86 endp
+
+
+;++ KiVdmSetUserCR0
+;
+; eax
+;
+ public KiVdmSetUserCR0
+KiVdmSetUserCR0 proc
+
+ and eax, CR0_MP OR CR0_EM ; Sanitize parameter
+ shr eax, 1
+ movzx eax, _VdmUserCr0MapIn[eax]
+
+ push esp ; Pass current Esp to handler
+ push offset scr_fault ; Set Handler address
+ push PCR[PcExceptionList] ; Set next pointer
+ mov PCR[PcExceptionList],esp ; Link us on
+
+ mov edx, PCR[PcTeb] ; Shadow user's CR0 state to
+ mov edx,[edx].TbVdm ; Teb for R3 emulator
+ mov [edx].VtVdmContext.CsFloatSave.FpCr0NpxState, eax
+
+scr10: pop PCR[PcExceptionList] ; Remove our exception handle
+ add esp, 8 ; clear stack
+
+ mov edx, PCR[PcInitialStack] ; Get fp save area
+ mov ebx, PCR[PcPrcbData + PbCurrentThread] ; (ebx) = current thread
+
+scr20: cli ; sync with context swap
+ and [edx].FpCr0NpxState, NOT (CR0_MP+CR0_EM+CR0_PE)
+ or [edx].FpCr0NpxState,eax ; set fp save area bits
+
+ mov eax,cr0
+ and eax, NOT (CR0_MP+CR0_EM+CR0_TS) ; turn off bits we will change
+ or al, [ebx].ThNpxState ; set scheduler bits
+ or eax,[edx].FpCr0NpxState ; set user's bits
+ mov cr0,eax
+ sti
+ ret
+
+scr_fault:
+;
+; WARNING: Here we directly unlink the exception handler from the
+; exception registration chain. NO unwind is performed. We can take
+; this short cut because we know that our handler is a leaf-node.
+;
+
+ mov esp, [esp+8] ; (esp)-> ExceptionList
+ jmp short scr10
+
+
+KiVdmSetUserCR0 endp
+
+_TEXT$00 ENDS
+
+ end
diff --git a/private/ntos/ke/i386/emxcptn.asm b/private/ntos/ke/i386/emxcptn.asm
new file mode 100644
index 000000000..58e2ed253
--- /dev/null
+++ b/private/ntos/ke/i386/emxcptn.asm
@@ -0,0 +1,657 @@
+ page 78,132
+;*******************************************************************************
+; Copyright (c) Microsoft Corporation 1991
+; All Rights Reserved
+;
+; ke\i386\emxcptn.asm
+;
+; Module to support getting/setting context to and from the R3
+; emulator.
+;
+;Revision History:
+;
+;
+;*******************************************************************************
+
+ .386p
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+
+;;*******************************************************************************
+;;
+;; Include some more macros and constants.
+;;
+;;*******************************************************************************
+;
+
+NT386 equ 1
+
+ include ks386.inc
+ include em387.inc ; Emulator TEB data layout
+ include callconv.inc
+
+ EXTRNP _KeGetCurrentIrql,0
+ EXTRNP _KeBugCheck,1
+ EXTRNP _ExRaiseStatus,1
+ extrn _Ki387RoundModeTable:dword
+
+
+ subttl _KiEm87StateToNpxFrame
+ page
+
+;*** _KiEm87StateToNpxFrames
+;
+; Translates the R3 emulators state to the NpxFrame
+;
+; Returns TRUE if NpxFrame sucessfully completed.
+; else FALSE
+;
+; Warning: This function can only be called at Irql 0 with interrupts
+; enabled. It is intended to be called only to deal with R3 exceptions
+; when the emulator is being used.
+;
+; Revision History:
+;
+;
+;*******************************************************************************
+
+cPublicProc _KiEm87StateToNpxFrame, 1
+ push ebp
+ mov ebp, esp
+ push ebx ; Save C runtime varibles
+ push edi
+ push esi
+
+ push esp ; Pass current Esp to handler
+ push offset stnpx_30 ; Set Handler address
+ push PCR[PcExceptionList] ; Set next pointer
+ mov PCR[PcExceptionList],esp ; Link us on
+
+if DBG
+ pushfd ; Sanity check
+ pop ecx ; make sure interrupts are enabled
+ test ecx, EFLAGS_INTERRUPT_MASK
+ jz short stnpx_err
+
+ stdCall _KeGetCurrentIrql ; Sanity check
+ cmp al, DISPATCH_LEVEL ; make sure Irql is below DPC level
+ jnc short stnpx_err
+endif
+
+ xor eax, eax ; set FALSE
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jnz short stnpx_10 ; Yes, then not supported
+
+ mov ebx, PCR[PcTeb] ; R3 Teb
+ cmp [ebx].Einstall, 0 ; Initialized?
+ je short stnpx_10 ; No, then don't return NpxFrame
+
+ test [ebx].CURErr, Summary ; Completed?
+ jz short stnpx_10 ; No, then don't return NpxFrame
+
+ mov esi, [ebp+8] ; (esi) = NpxFrame
+ call SaveState
+
+ mov eax, 1 ; Return TRUE
+stnpx_10:
+ pop PCR[PcExceptionList] ; Remove our exception handle
+ add esp, 8 ; clear stack
+ pop esi
+ pop edi
+ pop ebx
+ pop ebp
+ stdRET _KiEm87StateToNpxFrame
+
+stnpx_30:
+;
+; WARNING: Here we directly unlink the exception handler from the
+; exception registration chain. NO unwind is performed. We can take
+; this short cut because we know that our handler is a leaf-node.
+;
+
+ mov esp, [esp+8] ; (esp)-> ExceptionList
+ xor eax, eax ; Return FALSE
+ jmp short stnpx_10
+
+if DBG
+stnpx_err:
+ stdCall _KeBugCheck <IRQL_NOT_LESS_OR_EQUAL>
+endif
+_KiEm87StateToNpxFrame ENDP
+
+
+;*** SaveEnv
+;
+;
+; ARGUMENTS
+;
+; (esi) = NpxFrame
+; (ebx) = PcTeb
+;
+;
+; DESCRIPTION
+;
+
+SaveEnv:
+ xor ax,ax
+ mov [esi].reserved1,ax
+ mov [esi].reserved2,ax
+ mov [esi].reserved3,ax
+ mov [esi].reserved4,ax
+ mov [esi].reserved5,ax
+ mov ax,[ebx].ControlWord
+ mov [esi].E32_ControlWord,ax
+ call GetEMSEGStatusWord
+ mov [esi].E32_StatusWord,ax
+ call GetTagWord
+ mov [esi].E32_TagWord,ax
+ mov ax,cs
+ mov [esi].E32_CodeSeg,ax ; NOTE: Not R0 code & stack
+ mov ax,ss
+ mov [esi].E32_DataSeg,ax
+ mov eax,[ebx].PrevCodeOff
+ mov [esi].E32_CodeOff,eax
+ mov eax,[ebx].PrevDataOff
+ mov [esi].E32_DataOff,eax
+ ret
+
+
+;*** SaveState -
+;
+; ARGUMENTS
+; (esi) = where to store environment
+; (ebx) = PcTeb
+;
+; DESCRIPTION
+;
+; REGISTERS
+; Destroys ALL, but EBX
+;
+
+SaveState: ; Enter here for debugger save state
+ mov dword ptr [esi].FpCr0NpxState, CR0_EM
+
+ call SaveEnv
+ add esi,size Env80x87_32 ;Skip over environment
+ mov ebp,NumLev ;Save entire stack
+ mov edi,[ebx].CURstk
+ss_loop:
+ mov eax,[ebx+edi].ExpSgn
+ call StoreTempReal ;in emstore.asm
+ add esi,10
+
+ mov edi,[ebx].CURstk
+;;; NextStackElem edi,SaveState
+ cmp edi,INITstk
+ jae short ss_wrap
+ add edi,Reg87Len
+ss_continue:
+ mov [ebx].CURstk,edi
+ dec ebp
+ jnz short ss_loop
+ ret
+ss_wrap:
+ mov edi, BEGstk
+ jmp short ss_continue
+
+
+;*** GetTagWord - figures out what the tag word is from the numeric stack
+; and returns the value of the tag word in ax.
+;
+; ARGUMENTS
+; (ebx) = PcTeb
+;
+
+GetTagWord:
+ push esi
+ xor eax, eax
+ mov ecx, NumLev ; get tags for regs. 0, 7 - 1
+ mov esi, INITstk
+GetTagLoop:
+ mov dh, [ebx+esi].bTag ; The top 2 bits of Tag are the X87 tag bits.
+ shld ax, dx, 2
+ sub esi, Reg87Len
+ loop GetTagLoop
+ rol ax, 2 ; This moves Tag(0) into the low 2 bits
+ pop esi
+ ret
+
+
+;*** GetEMSEGStatusWord
+;
+; User status word returned in ax.
+; Uses status word in per-thread data area, otherwise
+; identical to GetStatusWord
+;
+; ARGUMENTS
+; (ebx) = PcTeb
+
+GetEMSEGStatusWord:
+ mov eax, [ebx].CURstk
+ sub eax, BEGstk
+
+ ;
+ ; Make sure the 'div' won't overflowed.
+ ;
+
+ cmp eax, Reg87Len * (NumLev + 2)
+ ja short @f
+
+ mov dl,Reg87Len
+ div dl
+ inc eax
+ and eax, 7 ; eax is now the stack number
+ shl ax, 11
+ or ax, [ebx].StatusWord ; or in the rest of the status word.
+ ret
+@@:
+ mov eax, STATUS_INTEGER_OVERFLOW
+ stdCall _ExRaiseStatus, <eax>
+ ret ; Should never come here ...
+
+;*** StoreTempReal
+;
+;
+; ARGUMENTS
+; ??
+; (ebx) = PcTeb
+;
+
+StoreTempReal:
+ mov edx,[ebx+edi].lManHi
+ mov edi,[ebx+edi].lManLo
+;mantissa in edx:edi, exponent in high eax, sign in ah bit 7, tag in al
+;memory destination is esi
+ mov ecx,eax ;get copy of sign and tag
+ shr ecx,16 ;Bring exponent down
+ cmp al,bTAG_ZERO
+ jz short StoreIEEE80 ;Skip bias if zero
+ add ecx,IexpBias-TexpBias ;Correct bias
+ cmp al,bTAG_DEN
+ jz short Denorm80
+StoreIEEE80:
+ and eax,bSign shl 8
+ or ecx,eax ;Combine sign with exponent
+ mov [esi],edi
+ mov [esi+4],edx
+ mov [esi+8],cx
+ ret
+
+Denorm80:
+;Must change it to a denormal
+ dec ecx
+ neg ecx ;Use as shift count
+ cmp cl,32 ;Long shift?
+ jae LongDenorm
+ shrd edi,edx,cl
+ shr edx,cl
+ xor ecx,ecx ;Exponent is zero
+ jmp short StoreIEEE80
+
+LongDenorm:
+;edi must be zero if we have 32 bits to shift
+ xchg edx,edi ;32-bit right shift
+ shr edi,cl ;shift count is modulo-32
+ xor ecx,ecx ;Exponent is zero
+ jmp short StoreIEEE80
+
+
+;****************************************************
+;****************************************************
+;****************************************************
+;****************************************************
+
+
+;*** _KiNpxFrameToEm87State
+;
+; Translates the NpxFrame to the R3 emulators state
+;
+; Returns TRUE if NpxFrame state sucessfully transfered.
+; else FALSE
+;
+; Warning: This function can only be called at Irql 0 with interrupts
+; enabled. It is intended to be called only to deal with R3 exceptions
+; when the emulator is being used.
+;
+; Revision History:
+;
+;
+;*******************************************************************************
+
+cPublicProc _KiNpxFrameToEm87State, 1
+ push ebp
+ mov ebp, esp
+ push ebx ; Save C runtime varibles
+ push edi
+ push esi
+
+ push esp ; Pass current Esp to handler
+ push offset npxts_30 ; Set Handler address
+ push PCR[PcExceptionList] ; Set next pointer
+ mov PCR[PcExceptionList],esp ; Link us on
+
+if DBG
+ pushfd ; Sanity check
+ pop ecx ; make sure interrupts are enabled
+ test ecx, EFLAGS_INTERRUPT_MASK
+ jz short npxts_err
+
+ stdCall _KeGetCurrentIrql ; Sanity check
+ cmp al, DISPATCH_LEVEL ; make sure Irql is below DPC level
+ jnc short npxts_err
+endif
+
+ xor eax, eax ; set FALSE
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jnz short npxts_10 ; Yes, then not supported
+
+ mov ebx, PCR[PcTeb] ; R3 Teb
+ cmp [ebx].Einstall, 0 ; Initialized?
+ je short npxts_10 ; No, then don't set NpxFrame
+
+ mov esi, [ebp+8] ; (esi) = NpxFrame
+ call StorState
+ or [ebx].CURErr, Summary ; Set completed
+
+ mov eax, 1 ; Return TRUE
+npxts_10:
+ pop PCR[PcExceptionList] ; Remove our exception handle
+ add esp, 8 ; clear stack
+ pop esi
+ pop edi
+ pop ebx
+ pop ebp
+ stdRet _KiNpxFrameToEm87State
+
+npxts_30:
+;
+; WARNING: Here we directly unlink the exception handler from the
+; exception registration chain. NO unwind is performed. We can take
+; this short cut because we know that our handler is a leaf-node.
+;
+
+ mov esp, [esp+8] ; (esp)-> ExceptionList
+ xor eax, eax ; Return FALSE
+ jmp short npxts_10
+ ret
+
+if DBG
+npxts_err:
+ stdCall _KeBugCheck <IRQL_NOT_LESS_OR_EQUAL>
+endif
+_KiNpxFrameToEm87State ENDP
+
+
+
+;*** StorState - emulate FRSTOR [address]
+;
+; ARGUMENTS
+; (esi) = where to get the environment
+; (ebx) = PcTeb
+;
+;
+; DESCRIPTION
+; This routine emulates an 80387 FRSTOR (restore state)
+
+StorState:
+;First we set up the status word so that [CURstk] is initialized.
+;The floating-point registers are stored in logical ST(0) - ST(7) order,
+;not physical register order. We don't do a full load of the environment
+;because we're not ready to use the tag word yet.
+
+ mov ax, [esi].E32_StatusWord
+ call SetEmStatusWord ;Initialize [CURstk]
+ add esi,size Env80x87_32 ;Skip over environment
+
+;Load of temp real has one difference from real math chip: it is an invalid
+;operation to load an unsupported format. By ensuring the exception is
+;masked, we will convert unsupported format to Indefinite. Note that the
+;mask and [CURerr] will be completely restored by the FLDENV at the end.
+
+ mov [ebx].CWmask,3FH ;Mask off invalid operation exception
+ mov edi,[ebx].CURstk
+ mov ebp,NumLev
+FrstorLoadLoop:
+ push esi
+ call LoadTempReal ;In emload.asm
+ pop esi
+ add esi,10 ;Point to next temp real
+;;; NextStackElem edi,Frstor
+ cmp edi,INITstk
+ jae short fr_wrap
+ add edi,Reg87Len
+fr_continue:
+ dec ebp
+ jnz short FrstorLoadLoop
+ sub esi,NumLev*10+size Env80x87_32 ;Point to start of env.
+;
+; Stor Enviroment
+; (esi) = where to get enviroment
+; (ebx) = PcTeb
+;
+
+ mov ax, [esi].E32_StatusWord
+ call SetEmStatusWord ; set up status word
+ mov ax, [esi].E32_ControlWord
+ call SetControlWord
+ mov ax, [esi].E32_TagWord
+ call UseTagWord
+
+ mov eax, [esi].E32_CodeOff
+ mov [ebx].PrevCodeOff, eax
+ mov eax, [esi].E32_DataOff
+ mov [ebx].PrevDataOff, eax
+ ret
+
+fr_wrap:
+ mov edi, BEGstk
+ jmp short fr_continue
+
+
+;*** SetEmStatusWord -
+;
+; Given user status word in ax, set into emulator.
+; Destroys ebx only.
+
+
+SetEmStatusWord:
+ and ax,7F7FH
+ mov cx,ax
+ and cx,3FH ; set up CURerr in case user
+ mov [ebx].CURerr,cl ; wants to force an exception
+ mov ecx, eax
+ and ecx, not (7 shl 11) ; remove stack field.
+ mov [ebx].StatusWord, cx
+
+ sub ah, 8 ; adjust for emulator's stack layout
+ and ah, 7 shl 3
+ mov al, ah
+ shr ah, 1
+ add al, ah ; stack field * 3 * 4
+.erre Reg87Len eq 12
+ and eax, 255 ; eax is now 12*stack number
+ add eax, BEGstk
+ mov [ebx].CURstk, eax
+ ret
+
+SetControlWord:
+ and ax,0F3FH ; Limit to valid values
+ mov [ebx].ControlWord, ax ; Store in the emulated control word
+ not al ;Flip mask bits for fast compare
+ and al,3FH ;Limit to valid mask bits
+ mov [ebx].ErrMask,al
+ and eax,(RoundControl + PrecisionControl) shl 8
+.erre RoundControl eq 1100B
+.erre PrecisionControl eq 0011B
+ shr eax,6 ;Put PC and RC in bits 2-5
+ mov ecx,_Ki387RoundModeTable
+ mov ecx,[ecx+eax] ;Get correct RoundMode vector
+ mov [ebx].RoundMode,ecx
+ mov [ebx].SavedRoundMode,ecx
+ and eax,RoundControl shl (8-6) ;Mask off precision control
+ mov ecx,_Ki387RoundModeTable
+ mov ecx,[ecx+(eax+PC64 shl (8-6))];Get correct RoundMode vector
+ mov [ebx].TransRound,ecx ;Round mode w/o precision
+ ret
+
+
+;*** UseTagWord - Set up tags using tag word from environment
+;
+; ARGUMENTS
+; ax - should contain the tag word
+;
+; Destroys ax,bx,cx,dx,di
+
+UseTagWord:
+ ror ax, 2 ; mov Tag(0) into top bits of ax
+ mov edi,INITstk
+ mov ecx, NumLev
+UseTagLoop:
+ mov dl,bTAG_EMPTY
+ cmp ah, 0c0h ;Is register to be tagged Empty?
+ jae short SetTag ;Yes, go mark it
+ mov dl,[ebx+edi].bTag ;Get current tag
+ cmp dl,bTAG_EMPTY ;Is register currently Empty?
+ je short SetTagNotEmpty ;If so, go figure out tag for it
+SetTag:
+ mov [ebx+edi].bTag,dl
+UseTagLoopCheck:
+ sub edi, Reg87Len
+ shl eax, 2
+ loop UseTagLoop
+ ret
+
+SetTagEmpty:
+ mov [ebx+edi].bTag, bTAG_EMPTY
+ jmp short UseTagLoopCheck
+
+SetTagNotEmpty:
+;Register is currently tagged empty, but new tag word says it is not empty.
+;Figure out a new tag for it. The rules are:
+;
+;1. Everything is either normalized or zero--unnormalized formats cannot
+;get in. So if the high half mantissa is zero, the number is zero.
+;
+;2. Although the exponent bias is different, NANs and Infinities are in
+;standard IEEE format - exponent is TexpMax, mantissa indicates NAN vs.
+;infinity (mantissa for infinity is 800..000H).
+;
+;3. Denormals have an exponent less than TexpMin.
+;
+;4. If the low half of the mantissa is zero, it is tagged bTAG_SNGL
+;
+;5. Everything else is bTAG_VALID
+
+ cmp [ebx+edi].lManHi, 0
+ mov dl,bTAG_ZERO ;Try zero first
+ jz short SetTag ;Is mantissa zero?
+ mov edx,[ebx+edi].ExpSgn
+ mov dl,bTAG_DEN
+ cmp edx,TexpMin shl 16 ;Is it denormal?
+ jl short SetTag
+ cmp [ebx+edi].lManLo,0 ;Is low half zero?
+.erre bTAG_VALID eq 1
+.erre bTAG_SNGL eq 0
+ setnz dl ;if low half==0 then dl=0 else dl=1
+ cmp edx,TexpMax shl 16 ;Is it NAN or Infinity?
+ jl short SetTag ;If not, it's valid
+.erre (bTAG_VALID - bTAG_SNGL) shl TAG_SHIFT eq (bTAG_NAN - bTAG_INF)
+ shl dl,TAG_SHIFT
+ add dl,bTAG_INF - bTAG_SNGL
+;If the low bits were zero we have just changed bTAG_SNGL to bTAG_INF
+;If the low bits weren't zero, we changed bTAG_VALID to bTAG_NAN
+;See if infinity is really possible: is high half 80..00H?
+ cmp [ebx+edi].lManHi,1 shl 31 ;Is it infinity?
+ jz short SetTag ;Store tag for infinity or NAN
+ mov dl,bTAG_NAN
+ jmp short SetTag
+
+
+;*** LoadTempReal
+;
+;
+;
+
+LoadTempReal:
+ mov ebx,[esi+4] ;Get high half of mantissa
+ mov cx,[esi+8] ;Get exponent and sign
+ mov esi,[esi] ;Get low half of mantissa
+ mov eax,ecx
+ and ch,7FH ;Mask off sign bit
+ shl ecx,16 ;Move exponent to high end
+ mov ch,ah ;Restore sign
+ jz short ZeroOrDenorm80
+;Check for unsupported format: unnormals (MSB not set)
+ or ebx,ebx
+ jns short Unsupported
+ sub ecx,(IexpBias-TexpBias) shl 16 ;Correct the bias
+ cmp ecx,TexpMax shl 16
+ jge short NANorInf80
+SetupTag:
+ or esi,esi ;Any bits in low half?
+.erre bTAG_VALID eq 1
+.erre bTAG_SNGL eq 0
+ setnz cl ;if low half==0 then cl=0 else cl=1
+ jmp short SaveStack
+
+NANorInf80:
+ mov cl,bTAG_NAN
+ cmp ebx,1 shl 31 ;Only 1 bit set means infinity
+ jnz short SaveStack
+ or esi,esi
+ jnz short SaveStack
+ mov cl,bTAG_INF
+ jmp short SaveStack
+
+ZeroOrDenorm80:
+;Exponent is zero. Number is either zero or denormalized
+ or ebx,ebx
+ jnz short ShortNorm80 ;Are top 32 bits zero?
+ or esi,esi ;Are low 32 bits zero too?
+ jnz LongNorm80
+ mov cl,bTAG_ZERO
+ jmp short SaveStack
+
+;This code accepts and works correctly with pseudo-denormals (MSB already set)
+LongNorm80:
+ xchg ebx,esi ;Shift up 32 bits
+ sub ecx,32 shl 16 ;Correct exponent
+ShortNorm80:
+ add ecx,(TexpBias-IexpBias+1-31) shl 16 ;Fix up bias
+ bsr edx,ebx ;Scan for MSB
+;Bit number in edx ranges from 0 to 31
+ mov cl,dl
+ not cl ;Convert bit number to shift count
+ shld ebx,esi,cl
+ shl esi,cl
+ shl edx,16 ;Move exp. adjustment to high end
+ add ecx,edx ;Adjust exponent
+ jmp short SetUpTag
+
+SaveStack:
+ mov eax, PCR[PcTeb]
+ mov [eax].CURstk,edi
+ mov [eax+edi].lManLo,esi
+ mov [eax+edi].lManHi,ebx
+ mov [eax+edi].ExpSgn,ecx
+ mov ebx, eax ; (ebx) = PcTeb
+ ret
+
+Unsupported:
+ mov ebx, PCR[PcTeb]
+ or [ebx].CURerr,Invalid ; (assume it's masked?)
+ mov [ebx+edi].lManLo,0
+ mov [ebx+edi].lManHi,0C0000000H
+ mov [ebx+edi].ExpSgn,TexpMax shl 16 + bSign shl 8 + bTAG_NAN
+ mov [ebx].CURstk,edi ;Update top of stack
+ ret
+
+_TEXT ENDS
+END
diff --git a/private/ntos/ke/i386/exceptn.c b/private/ntos/ke/i386/exceptn.c
new file mode 100644
index 000000000..7c700e591
--- /dev/null
+++ b/private/ntos/ke/i386/exceptn.c
@@ -0,0 +1,1270 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ exceptn.c
+
+Abstract:
+
+ This module implement the code necessary to dispatch expections to the
+ proper mode and invoke the exception dispatcher.
+
+Author:
+
+ David N. Cutler (davec) 30-Apr-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ 14-Feb-1990 shielint
+
+ Modified for NT386 interrupt manager
+
+ 6-April-1990 bryanwi
+
+ Fix non-canonical stack case for 386.
+
+--*/
+
+#include "ki.h"
+
+extern UCHAR VdmUserCr0MapIn[];
+
+VOID
+Ki386AdjustEsp0(
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+BOOLEAN
+KiEm87StateToNpxFrame(
+ OUT PFLOATING_SAVE_AREA NpxFrmae
+ );
+
+BOOLEAN
+KiNpxFrameToEm87State(
+ IN PFLOATING_SAVE_AREA NpxFrmae
+ );
+
+
+ULONG
+KiEspFromTrapFrame(
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine fetches the correct esp from a trapframe, accounting
+ for whether the frame is a user or kernel mode frame, and whether
+ it has been edited.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+Return Value:
+
+ Value of Esp.
+
+--*/
+
+{
+ if (((TrapFrame->SegCs & MODE_MASK) != KernelMode) ||
+ (TrapFrame->EFlags & EFLAGS_V86_MASK)) {
+
+ // User mode frame, real value of Esp is always in HardwareEsp.
+
+ return TrapFrame->HardwareEsp;
+
+ } else {
+
+ if ((TrapFrame->SegCs & FRAME_EDITED) == 0) {
+
+ // Kernel mode frame which has had esp edited,
+ // value of Esp is in TempEsp.
+
+ return TrapFrame->TempEsp;
+
+ } else {
+
+ // Kernel mode frame has has not had esp edited, compute esp.
+
+ return (ULONG)&TrapFrame->HardwareEsp;
+ }
+ }
+}
+
+VOID
+KiEspToTrapFrame(
+ IN PKTRAP_FRAME TrapFrame,
+ IN ULONG Esp
+ )
+
+/*++
+
+Routine Description:
+
+ This routine sets the specified value Esp into the trap frame,
+ accounting for whether the frame is a user or kernel mode frame,
+ and whether it has been edited before.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ Esp - New value for Esp.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ ULONG OldEsp;
+
+ OldEsp = KiEspFromTrapFrame(TrapFrame);
+
+ if (((TrapFrame->SegCs & MODE_MASK) != KernelMode) ||
+ (TrapFrame->EFlags & EFLAGS_V86_MASK)) {
+
+ //
+ // User mode trap frame
+ //
+
+ TrapFrame->HardwareEsp = Esp;
+
+ } else {
+
+ //
+ // Kernel mode esp can't be lowered or iret emulation will fail
+ //
+
+ if (Esp < OldEsp)
+ KeBugCheck(SET_OF_INVALID_CONTEXT);
+
+ //
+ // Edit frame, setting edit marker as needed.
+ //
+
+ if ((TrapFrame->SegCs & FRAME_EDITED) == 0) {
+
+ // Kernel frame that has already been edited,
+ // store value in TempEsp.
+
+ TrapFrame->TempEsp = Esp;
+
+ } else {
+
+ // Kernel frame for which Esp is being edited first time.
+ // Save real SegCs, set marked in SegCs, save Esp value.
+
+ if (OldEsp != Esp) {
+ TrapFrame->TempSegCs = TrapFrame->SegCs;
+ TrapFrame->SegCs = TrapFrame->SegCs & ~FRAME_EDITED;
+ TrapFrame->TempEsp = Esp;
+ }
+ }
+ }
+}
+
+ULONG
+KiSegSsFromTrapFrame(
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine fetches the correct ss from a trapframe, accounting
+ for whether the frame is a user or kernel mode frame.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+Return Value:
+
+ Value of SegSs.
+
+--*/
+
+{
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK){
+ return TrapFrame->HardwareSegSs;
+ } else if ((TrapFrame->SegCs & MODE_MASK) != KernelMode) {
+
+ //
+ // It's user mode. The HardwareSegSs contains R3 data selector.
+ //
+
+ return TrapFrame->HardwareSegSs | RPL_MASK;
+ } else {
+ return KGDT_R0_DATA;
+ }
+}
+
+VOID
+KiSegSsToTrapFrame(
+ IN PKTRAP_FRAME TrapFrame,
+ IN ULONG SegSs
+ )
+
+/*++
+
+Routine Description:
+
+ It turns out that in a flat system there are only two legal values
+ for SS. Therefore, this procedure forces the appropriate one
+ of those values to be used. The legal SS value is a function of
+ which CS value is already set.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ SegSs - value of SS caller would like to set.
+
+Return Value:
+
+ Nothing.
+
+--*/
+
+{
+ SegSs &= SEGMENT_MASK; // Throw away the high order trash bits
+
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ TrapFrame->HardwareSegSs = SegSs;
+ } else if ((TrapFrame->SegCs & MODE_MASK) == UserMode) {
+
+ //
+ // If user mode, we simply put SegSs to trapfram. If the SegSs
+ // is a bogus value. The trap0d handler will be able to detect
+ // this and handle it appropriately.
+ //
+
+ TrapFrame->HardwareSegSs = SegSs | RPL_MASK;
+ }
+
+ //
+ // else {
+ // The frame is a kernel mode frame, which does not have
+ // a place to store SS. Therefore, do nothing.
+ //
+}
+
+VOID
+KeContextFromKframes (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PCONTEXT ContextFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified trap and exception frames
+ frames into the specified context frame according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame from which context
+ should be copied into the context record. This argument is ignored since
+ there is no exception frame on NT386.
+
+ ContextFrame - Supplies a pointer to the context frame that receives the
+ context copied from the trap and exception frames.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PFLOATING_SAVE_AREA NpxFrame;
+ BOOLEAN StateSaved;
+ ULONG i;
+
+ UNREFERENCED_PARAMETER( ExceptionFrame );
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set registers ebp, eip, cs, eflag, esp and ss.
+ //
+
+ ContextFrame->Ebp = TrapFrame->Ebp;
+ ContextFrame->Eip = TrapFrame->Eip;
+
+ if (((TrapFrame->SegCs & FRAME_EDITED) == 0) &&
+ ((TrapFrame->EFlags & EFLAGS_V86_MASK) == 0)) {
+ ContextFrame->SegCs = TrapFrame->TempSegCs & SEGMENT_MASK;
+ } else {
+ ContextFrame->SegCs = TrapFrame->SegCs & SEGMENT_MASK;
+ }
+ ContextFrame->EFlags = TrapFrame->EFlags;
+ ContextFrame->SegSs = KiSegSsFromTrapFrame(TrapFrame);
+ ContextFrame->Esp = KiEspFromTrapFrame(TrapFrame);
+ }
+
+ //
+ // Set segment register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS) {
+
+ //
+ // Set segment registers gs, fs, es, ds.
+ //
+ // These values are junk most of the time, but useful
+ // for debugging under certain conditions. Therefore,
+ // we report whatever was in the frame.
+ //
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ ContextFrame->SegGs = TrapFrame->V86Gs & SEGMENT_MASK;
+ ContextFrame->SegFs = TrapFrame->V86Fs & SEGMENT_MASK;
+ ContextFrame->SegEs = TrapFrame->V86Es & SEGMENT_MASK;
+ ContextFrame->SegDs = TrapFrame->V86Ds & SEGMENT_MASK;
+ }
+ else {
+ if (TrapFrame->SegCs == KGDT_R0_CODE) {
+ //
+ // Trap frames created from R0_CODE traps do not save
+ // the following selectors. Set them in the frame now.
+ //
+
+ TrapFrame->SegGs = 0;
+ TrapFrame->SegFs = KGDT_R0_PCR;
+ TrapFrame->SegEs = KGDT_R3_DATA | RPL_MASK;
+ TrapFrame->SegDs = KGDT_R3_DATA | RPL_MASK;
+ }
+
+ ContextFrame->SegGs = TrapFrame->SegGs & SEGMENT_MASK;
+ ContextFrame->SegFs = TrapFrame->SegFs & SEGMENT_MASK;
+ ContextFrame->SegEs = TrapFrame->SegEs & SEGMENT_MASK;
+ ContextFrame->SegDs = TrapFrame->SegDs & SEGMENT_MASK;
+ }
+
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers edi, esi, ebx, edx, ecx, eax
+ //
+
+ ContextFrame->Edi = TrapFrame->Edi;
+ ContextFrame->Esi = TrapFrame->Esi;
+ ContextFrame->Ebx = TrapFrame->Ebx;
+ ContextFrame->Ecx = TrapFrame->Ecx;
+ ContextFrame->Edx = TrapFrame->Edx;
+ ContextFrame->Eax = TrapFrame->Eax;
+ }
+
+ //
+ // Fetch floating register contents if requested, and type of target
+ // is user. (system frames have no fp state, so ignore request)
+ //
+
+ if ( ((ContextFrame->ContextFlags & CONTEXT_FLOATING_POINT) ==
+ CONTEXT_FLOATING_POINT) &&
+ ((TrapFrame->SegCs & MODE_MASK) == UserMode)) {
+
+ //
+ // This is the base TrapFrame, and the NpxFrame is on the base
+ // of the kernel stack, just above it in memory.
+ //
+
+ NpxFrame = (PFLOATING_SAVE_AREA)(TrapFrame + 1);
+
+ if (KeI386NpxPresent) {
+
+ //
+ // Force the coprocessors state to the save area and copy it
+ // to the context frame.
+ //
+
+ KiFlushNPXState ();
+ ContextFrame->FloatSave.ControlWord = NpxFrame->ControlWord;
+ ContextFrame->FloatSave.StatusWord = NpxFrame->StatusWord;
+ ContextFrame->FloatSave.TagWord = NpxFrame->TagWord;
+ ContextFrame->FloatSave.ErrorOffset = NpxFrame->ErrorOffset;
+ ContextFrame->FloatSave.ErrorSelector = NpxFrame->ErrorSelector;
+ ContextFrame->FloatSave.DataOffset = NpxFrame->DataOffset;
+ ContextFrame->FloatSave.DataSelector = NpxFrame->DataSelector;
+ ContextFrame->FloatSave.Cr0NpxState = NpxFrame->Cr0NpxState;
+ for (i = 0; i < SIZE_OF_80387_REGISTERS; i++) {
+ ContextFrame->FloatSave.RegisterArea[i] = NpxFrame->RegisterArea[i];
+ }
+ } else {
+
+ //
+ // The 80387 is being emulated by the R3 emulator.
+ // ** The only time the Npx state is ever obtained or set is
+ // ** for userlevel handling. Current Irql must be 0 or 1.
+ // Go slurp the emulator's R3 data and generate the
+ // floating point context
+ //
+
+ StateSaved = KiEm87StateToNpxFrame(&ContextFrame->FloatSave);
+ if (StateSaved) {
+ ContextFrame->FloatSave.Cr0NpxState = NpxFrame->Cr0NpxState;
+ } else {
+
+ //
+ // The floatingpoint state can not be determined.
+ // Remove the floatingpoint flag from the context frame flags.
+ //
+
+ ContextFrame->ContextFlags &= (~CONTEXT_FLOATING_POINT) | CONTEXT_i386;
+ }
+ }
+ }
+
+ //
+ // Fetch Dr register contents if requested. Values may be trash.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_DEBUG_REGISTERS) ==
+ CONTEXT_DEBUG_REGISTERS) {
+
+ ContextFrame->Dr0 = TrapFrame->Dr0;
+ ContextFrame->Dr1 = TrapFrame->Dr1;
+ ContextFrame->Dr2 = TrapFrame->Dr2;
+ ContextFrame->Dr3 = TrapFrame->Dr3;
+ ContextFrame->Dr6 = TrapFrame->Dr6;
+
+ //
+ // If it's a user mode frame, and the thread doesn't have DRs set,
+ // and we just return the trash in the frame, we risk accidentally
+ // making the thread active with trash values on a set. Therefore,
+ // Dr7 must be set to 0 if we get a non-active user mode frame.
+ //
+
+ if ((((TrapFrame->SegCs & MODE_MASK) != KernelMode) ||
+ ((TrapFrame->EFlags & EFLAGS_V86_MASK) != 0)) &&
+ (KeGetCurrentThread()->DebugActive == TRUE)) {
+
+ ContextFrame->Dr7 = TrapFrame->Dr7;
+
+ } else {
+
+ ContextFrame->Dr7 = 0L;
+
+ }
+ }
+
+}
+
+VOID
+KeContextToKframes (
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN PCONTEXT ContextFrame,
+ IN ULONG ContextFlags,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified context frame into
+ the specified trap and exception frames according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that receives the volatile
+ context from the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that receives
+ the nonvolatile context from the context record. This argument is
+ ignored since there is no exception frame on NT386.
+
+ ContextFrame - Supplies a pointer to a context frame that contains the
+ context that is to be copied into the trap and exception frames.
+
+ ContextFlags - Supplies the set of flags that specify which parts of the
+ context frame are to be copied into the trap and exception frames.
+
+ PreviousMode - Supplies the processor mode for which the trap and exception
+ frames are being built.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PFLOATING_SAVE_AREA NpxFrame;
+ ULONG i;
+ BOOLEAN StateSaved;
+ BOOLEAN ModeChanged;
+#if DBG
+ PKPCR Pcr;
+ KIRQL OldIrql;
+#endif
+
+ UNREFERENCED_PARAMETER( ExceptionFrame );
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ if ((ContextFrame->EFlags & EFLAGS_V86_MASK) !=
+ (TrapFrame->EFlags & EFLAGS_V86_MASK)) {
+ ModeChanged = TRUE;
+ } else {
+ ModeChanged = FALSE;
+ }
+
+
+ //
+ // Set registers eflag, ebp, eip, cs, esp and ss.
+ // Eflags is set first, so that the auxilliary routines
+ // can check the v86 bit to determine as well as cs, to
+ // determine if the frame is kernel or user mode. (v86 mode cs
+ // can have any value)
+ //
+
+ TrapFrame->EFlags = SANITIZE_FLAGS(ContextFrame->EFlags, PreviousMode);
+ TrapFrame->Ebp = ContextFrame->Ebp;
+ TrapFrame->Eip = ContextFrame->Eip;
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ TrapFrame->SegCs = ContextFrame->SegCs;
+ } else {
+ TrapFrame->SegCs = SANITIZE_SEG(ContextFrame->SegCs, PreviousMode);
+ if (PreviousMode != KernelMode && TrapFrame->SegCs < 8) {
+
+ //
+ // If user mode and the selector value is less than 8, we
+ // know it is an invalid selector. Set it to flat user
+ // mode selector. Another reason we need to check for this
+ // is that any cs value less than 8 causes our exit kernel
+ // macro to treat its exit trap fram as an edited frame.
+ //
+
+ TrapFrame->SegCs = KGDT_R3_CODE | RPL_MASK;
+ }
+ }
+ KiSegSsToTrapFrame(TrapFrame, ContextFrame->SegSs);
+ KiEspToTrapFrame(TrapFrame, ContextFrame->Esp);
+ if (ModeChanged) {
+ Ki386AdjustEsp0(TrapFrame); // realign esp0 in the tss
+ }
+ }
+
+ //
+ // Set segment register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS) {
+
+ //
+ // Set segment registers gs, fs, es, ds.
+ //
+
+ //
+ // There's only one legal value for DS and ES, so simply set it.
+ // This allows KeContextFromKframes to report the real values in
+ // the frame. (which are junk most of the time, but sometimes useful
+ // for debugging.)
+ // Only 2 legal values for FS, let either one be set.
+ // Force GS to be 0 to deal with entry via SysCall and exit
+ // via exception.
+ //
+ // For V86 mode, the FS, GS, DS, and ES registers must be properly
+ // set from the supplied context.
+ //
+
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ TrapFrame->V86Fs = ContextFrame->SegFs;
+ TrapFrame->V86Es = ContextFrame->SegEs;
+ TrapFrame->V86Ds = ContextFrame->SegDs;
+ TrapFrame->V86Gs = ContextFrame->SegGs;
+ } else if (((TrapFrame->SegCs & MODE_MASK) == KernelMode)) {
+
+ //
+ // set up the standard selectors
+ //
+
+ TrapFrame->SegFs = SANITIZE_SEG(ContextFrame->SegFs, PreviousMode);
+ TrapFrame->SegEs = KGDT_R3_DATA | RPL_MASK;
+ TrapFrame->SegDs = KGDT_R3_DATA | RPL_MASK;
+ TrapFrame->SegGs = 0;
+ } else {
+
+ //
+ // If user mode, we simply return whatever left in context frame
+ // and let trap 0d handle it (if later we trap while popping the
+ // trap frame.) V86 mode also get handled here.
+ //
+
+ TrapFrame->SegFs = ContextFrame->SegFs;
+ TrapFrame->SegEs = ContextFrame->SegEs;
+ TrapFrame->SegDs = ContextFrame->SegDs;
+ if (TrapFrame->SegCs == (KGDT_R3_CODE | RPL_MASK)) {
+ TrapFrame->SegGs = 0;
+ } else {
+ TrapFrame->SegGs = ContextFrame->SegGs;
+ }
+ }
+ }
+ //
+ // Set integer registers contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers edi, esi, ebx, edx, ecx, eax.
+ //
+ // Can NOT call RtlMoveMemory here because the regs aren't
+ // contiguous in pusha frame, and we don't want to export
+ // bits of junk into context record.
+ //
+
+ TrapFrame->Edi = ContextFrame->Edi;
+ TrapFrame->Esi = ContextFrame->Esi;
+ TrapFrame->Ebx = ContextFrame->Ebx;
+ TrapFrame->Ecx = ContextFrame->Ecx;
+ TrapFrame->Edx = ContextFrame->Edx;
+ TrapFrame->Eax = ContextFrame->Eax;
+
+ }
+
+ //
+ // Set floating register contents if requested, and type of target
+ // is user. (system frames have no fp state, so ignore request)
+ //
+
+ if (((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) &&
+ ((TrapFrame->SegCs & MODE_MASK) == UserMode)) {
+
+ //
+ // This is the base TrapFrame, and the NpxFrame is on the base
+ // of the kernel stack, just above it in memory.
+ //
+
+ NpxFrame = (PFLOATING_SAVE_AREA)(TrapFrame + 1);
+
+ if (KeI386NpxPresent) {
+
+ //
+ // Set coprocessor stack, control and status registers
+ //
+
+ KiFlushNPXState ();
+ NpxFrame->ControlWord = ContextFrame->FloatSave.ControlWord;
+ NpxFrame->StatusWord = ContextFrame->FloatSave.StatusWord;
+ NpxFrame->TagWord = ContextFrame->FloatSave.TagWord;
+ NpxFrame->ErrorOffset = ContextFrame->FloatSave.ErrorOffset;
+ NpxFrame->ErrorSelector = ContextFrame->FloatSave.ErrorSelector;
+ NpxFrame->DataOffset = ContextFrame->FloatSave.DataOffset;
+ NpxFrame->DataSelector = ContextFrame->FloatSave.DataSelector;
+
+ //
+ // Make sure only valid floating state bits are moved to Cr0NpxState.
+ //
+
+ NpxFrame->Cr0NpxState &= ~(CR0_EM | CR0_MP | CR0_TS);
+
+ //
+ // Only let VDMs turn on the EM bit. The kernel can't do
+ // anything for FLAT apps
+ //
+ if (KeGetCurrentThread()->ApcState.Process->VdmFlag & 0xf) {
+ NpxFrame->Cr0NpxState |= ContextFrame->FloatSave.Cr0NpxState &
+ (CR0_EM | CR0_MP);
+ }
+
+ for (i = 0; i < SIZE_OF_80387_REGISTERS; i++) {
+ NpxFrame->RegisterArea[i] = ContextFrame->FloatSave.RegisterArea[i];
+ }
+
+ } else {
+
+ if (KeGetCurrentThread()->ApcState.Process->VdmFlag & 0xf) {
+
+ //
+ // This is a special hack to allow SetContext for VDMs to
+ // turn on/off it's CR0_EM bit.
+ //
+
+ NpxFrame->Cr0NpxState &= ~(CR0_MP | CR0_TS | CR0_EM | CR0_PE);
+ NpxFrame->Cr0NpxState |=
+ VdmUserCr0MapIn[ContextFrame->FloatSave.Cr0NpxState & (CR0_EM | CR0_MP)];
+
+ } else {
+
+ //
+ // The 80387 is being emulated by the R3 emulator.
+ // ** The only time the Npx state is ever obtained or set is
+ // ** for userlevel handling. Current Irql must be 0 or 1.
+ // And the context being set must be for the current thread.
+ // Go smash the floatingpoint context into the R3 emulator's
+ // data area.
+ //
+#if DBG
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ Pcr = KeGetPcr();
+ ASSERT (Pcr->Prcb->CurrentThread->Teb == Pcr->NtTib.Self);
+ KeLowerIrql (OldIrql);
+#endif
+
+ StateSaved = KiNpxFrameToEm87State(&ContextFrame->FloatSave);
+ if (StateSaved) {
+
+ //
+ // Make sure only valid floating state bits are moved to
+ // Cr0NpxState. Since we are emulating, don't allow
+ // resetting CR0_EM.
+ //
+
+ NpxFrame->Cr0NpxState &= ~(CR0_MP | CR0_TS);
+ NpxFrame->Cr0NpxState |=
+ ContextFrame->FloatSave.Cr0NpxState & CR0_MP;
+ }
+ }
+ }
+ }
+
+ //
+ // Set debug register state if specified. If previous mode is user
+ // mode (i.e. it's a user frame we're setting) and if effect will be to
+ // cause at least one of the LE (local enable) bits in Dr7 to be
+ // set (i.e. at least one of Dr0,1,2,3 are active) then set DebugActive
+ // in the thread object to true. Otherwise set it to false.
+ //
+
+ if ((ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS) {
+
+ TrapFrame->Dr0 = SANITIZE_DRADDR(ContextFrame->Dr0, PreviousMode);
+ TrapFrame->Dr1 = SANITIZE_DRADDR(ContextFrame->Dr1, PreviousMode);
+ TrapFrame->Dr2 = SANITIZE_DRADDR(ContextFrame->Dr2, PreviousMode);
+ TrapFrame->Dr3 = SANITIZE_DRADDR(ContextFrame->Dr3, PreviousMode);
+ TrapFrame->Dr6 = SANITIZE_DR6(ContextFrame->Dr6, PreviousMode);
+ TrapFrame->Dr7 = SANITIZE_DR7(ContextFrame->Dr7, PreviousMode);
+
+ if (PreviousMode != KernelMode) {
+ KeGetPcr()->DebugActive = KeGetCurrentThread()->DebugActive =
+ (BOOLEAN)((ContextFrame->Dr7 & DR7_ACTIVE) != 0);
+ }
+ }
+
+ //
+ // If thread is supposed to have IOPL, then force it on in eflags
+ //
+ if (KeGetCurrentThread()->Iopl) {
+ TrapFrame->EFlags |= (EFLAGS_IOPL_MASK & -1); // IOPL = 3
+ }
+
+ return;
+}
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to dispatch an exception to the proper mode and
+ to cause the exception dispatcher to be called. If the previous mode is
+ kernel, then the exception dispatcher is called directly to process the
+ exception. Otherwise the exception record, exception frame, and trap
+ frame contents are copied to the user mode stack. The contents of the
+ exception frame and trap are then modified such that when control is
+ returned, execution will commense in user mode in a routine which will
+ call the exception dispatcher.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame. For NT386,
+ this should be NULL.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ FirstChance - Supplies a boolean value that specifies whether this is
+ the first (TRUE) or second (FALSE) chance for the exception.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ CONTEXT ContextFrame;
+ EXCEPTION_RECORD ExceptionRecord1, ExceptionRecord2;
+ LONG Length;
+ ULONG UserStack1;
+ ULONG UserStack2;
+
+ //
+ // Move machine state from trap and exception frames to a context frame,
+ // and increment the number of exceptions dispatched.
+ //
+
+ KeGetCurrentPrcb()->KeExceptionDispatchCount += 1;
+ ContextFrame.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
+ if (PreviousMode == UserMode) {
+ //
+ // For usermode exceptions always try to dispatch the floating
+ // point state. This allows expection handlers & debuggers to
+ // examine/edit the npx context if required. Plus it allows
+ // exception handlers to use fp instructions without detroying
+ // the npx state at the time of the exception.
+ //
+ // Note: If there's no 80387, ContextTo/FromKFrames will use the
+ // emulator's current state. If the emulator can not give the
+ // current state, then the context_floating_point bit will be
+ // turned off by ContextFromKFrames.
+ //
+
+ ContextFrame.ContextFlags |= CONTEXT_FLOATING_POINT;
+ }
+
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+
+ //
+ // if it is BREAK_POINT exception, we subtract 1 from EIP and report
+ // the updated EIP to user. This is because Cruiser requires EIP
+ // points to the int 3 instruction (not the instruction following int 3).
+ // In this case, BreakPoint exception is fatal. Otherwise we will step
+ // on the int 3 over and over again, if user does not handle it
+ //
+ // if the BREAK_POINT occured in V86 mode, the debugger running in the
+ // VDM will expect CS:EIP to point after the exception (the way the
+ // processor left it. this is also true for protected mode dos
+ // app debuggers. We will need a way to detect this.
+ //
+ //
+
+// if ((ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+// !(ContextFrame.EFlags & EFLAGS_V86_MASK)) {
+
+ switch (ExceptionRecord->ExceptionCode) {
+ case STATUS_BREAKPOINT:
+ ContextFrame.Eip--;
+ break;
+ }
+
+ //
+ // Select the method of handling the exception based on the previous mode.
+ //
+
+ ASSERT ((
+ !((PreviousMode == KernelMode) &&
+ (ContextFrame.EFlags & EFLAGS_V86_MASK))
+ ));
+
+ if (PreviousMode == KernelMode) {
+
+ //
+ // Previous mode was kernel.
+ //
+ // If the kernel debugger is active, then give the kernel debugger the
+ // first chance to handle the exception. If the kernel debugger handles
+ // the exception, then continue execution. Else attempt to dispatch the
+ // exception to a frame based handler. If a frame based handler handles
+ // the exception, then continue execution.
+ //
+ // If a frame based handler does not handle the exception,
+ // give the kernel debugger a second chance, if it's present.
+ //
+ // If the exception is still unhandled, call KeBugCheck().
+ //
+
+ if (FirstChance == TRUE) {
+
+ if ((KiDebugRoutine != NULL) &&
+ (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ FALSE)) != FALSE)) {
+
+ goto Handled1;
+ }
+
+ // Kernel debugger didn't handle exception.
+
+ if (RtlDispatchException(ExceptionRecord, &ContextFrame) == TRUE) {
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ TRUE)) != FALSE)) {
+
+ goto Handled1;
+ }
+
+ KeBugCheckEx(
+ KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]
+ );
+
+ } else {
+
+ //
+ // Previous mode was user.
+ //
+ // If this is the first chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // transfer the exception information to the user stack, transition to
+ // user mode, and attempt to dispatch the exception to a frame based
+ // handler. If a frame based handler handles the exception, then continue
+ // execution with the continue system service. Else execute the
+ // NtRaiseException system service with FirstChance == FALSE, which
+ // will call this routine a second time to process the exception.
+ //
+ // If this is the second chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // if the current process has a subsystem port, then send a message to
+ // the subsystem port and wait for a reply. If the subsystem handles the
+ // exception, then continue execution. Else terminate the thread.
+ //
+
+
+ if (FirstChance == TRUE) {
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if ( PsGetCurrentProcess()->DebugPort ) {
+ if ( (KiDebugRoutine != NULL) &&
+ KdIsThisAKdTrap(ExceptionRecord, &ContextFrame, UserMode) ) {
+
+ if ((((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ FALSE)) != FALSE)) {
+
+ goto Handled1;
+ }
+ }
+ } else {
+ if ((KiDebugRoutine != NULL) &&
+ (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ FALSE)) != FALSE)) {
+
+ goto Handled1;
+ }
+ }
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, FALSE)) {
+ goto Handled2;
+ }
+
+ //
+ // Transfer exception information to the user stack, transition
+ // to user mode, and attempt to dispatch the exception to a frame
+ // based handler.
+
+ repeat:
+ try {
+
+ //
+ // If the SS segment is not 32 bit flat, there is no point
+ // to dispatch exception to frame based exception handler.
+ //
+
+ if (TrapFrame->HardwareSegSs != (KGDT_R3_DATA | RPL_MASK) ||
+ TrapFrame->EFlags & EFLAGS_V86_MASK ) {
+ ExceptionRecord2.ExceptionCode = STATUS_ACCESS_VIOLATION;
+ ExceptionRecord2.ExceptionFlags = 0;
+ ExceptionRecord2.NumberParameters = 0;
+ ExRaiseException(&ExceptionRecord2);
+ }
+
+ //
+ // Compute length of context record and new aligned user stack
+ // pointer.
+ //
+
+ Length = (sizeof(CONTEXT) + CONTEXT_ROUND) & ~CONTEXT_ROUND;
+ UserStack1 = (ContextFrame.Esp & ~CONTEXT_ROUND) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack1, Length, CONTEXT_ALIGN);
+ RtlMoveMemory((PULONG)UserStack1, &ContextFrame, sizeof(CONTEXT));
+
+ //
+ // Compute length of exception record and new aligned stack
+ // address.
+ //
+
+ Length = (sizeof(EXCEPTION_RECORD) - (EXCEPTION_MAXIMUM_PARAMETERS -
+ ExceptionRecord->NumberParameters) * sizeof(ULONG) +3) &
+ (~3);
+ UserStack2 = UserStack1 - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack area.
+ // N.B. The probing length is Length+8 because there are two
+ // arguments need to be pushed to user stack later.
+ //
+
+ ProbeForWrite((PCHAR)(UserStack2 - 8), Length + 8, sizeof(ULONG));
+ RtlMoveMemory((PULONG)UserStack2, ExceptionRecord, Length);
+
+ //
+ // Push address of exception record, context record to the
+ // user stack. They are the two parameters required by
+ // _KiUserExceptionDispatch.
+ //
+
+ *(PULONG)(UserStack2 - sizeof(ULONG)) = UserStack1;
+ *(PULONG)(UserStack2 - 2*sizeof(ULONG)) = UserStack2;
+
+ //
+ // Set new stack pointer to the trap frame.
+ //
+
+ KiSegSsToTrapFrame(TrapFrame, KGDT_R3_DATA);
+ KiEspToTrapFrame(TrapFrame, (UserStack2 - sizeof(ULONG)*2));
+
+ //
+ // Force correct R3 selectors into TrapFrame.
+ //
+
+ TrapFrame->SegCs = SANITIZE_SEG(KGDT_R3_CODE, PreviousMode);
+ TrapFrame->SegDs = SANITIZE_SEG(KGDT_R3_DATA, PreviousMode);
+ TrapFrame->SegEs = SANITIZE_SEG(KGDT_R3_DATA, PreviousMode);
+ TrapFrame->SegFs = SANITIZE_SEG(KGDT_R3_TEB, PreviousMode);
+ TrapFrame->SegGs = 0;
+
+ //
+ // Set the address of the exception routine that will call the
+ // exception dispatcher and then return to the trap handler.
+ // The trap handler will restore the exception and trap frame
+ // context and continue execution in the routine that will
+ // call the exception dispatcher.
+ //
+
+ TrapFrame->Eip = (ULONG)KeUserExceptionDispatcher;
+ return;
+
+ } except (KiCopyInformation(&ExceptionRecord1,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // If the exception is a stack overflow, then attempt
+ // to raise the stack overflow exception. Otherwise,
+ // the user's stack is not accessible, or is misaligned,
+ // and second chance processing is performed.
+ //
+
+ if (ExceptionRecord1.ExceptionCode == STATUS_STACK_OVERFLOW) {
+ ExceptionRecord1.ExceptionAddress = ExceptionRecord->ExceptionAddress;
+ RtlMoveMemory((PVOID)ExceptionRecord,
+ &ExceptionRecord1, sizeof(EXCEPTION_RECORD));
+ goto repeat;
+ }
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, TRUE)) {
+ goto Handled2;
+ } else if (DbgkForwardException(ExceptionRecord, FALSE, TRUE)) {
+ goto Handled2;
+ } else {
+ ZwTerminateThread(NtCurrentThread(), ExceptionRecord->ExceptionCode);
+ KeBugCheckEx(
+ KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]
+ );
+ }
+ }
+
+ //
+ // Move machine state from context frame to trap and exception frames and
+ // then return to continue execution with the restored state.
+ //
+
+Handled1:
+ KeContextToKframes(TrapFrame, ExceptionFrame, &ContextFrame,
+ ContextFrame.ContextFlags, PreviousMode);
+
+ //
+ // Exception was handled by the debugger or the associated subsystem
+ // and state was modified, if necessary, using the get state and set
+ // state capabilities. Therefore the context frame does not need to
+ // be transfered to the trap and exception frames.
+ //
+
+Handled2:
+ return;
+}
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from an exception filter to copy the exception
+ information from one exception record to another when an exception occurs.
+
+Arguments:
+
+ ExceptionRecord1 - Supplies a pointer to the destination exception record.
+
+ ExceptionRecord2 - Supplies a pointer to the source exception record.
+
+Return Value:
+
+ A value of EXCEPTION_EXECUTE_HANDLER is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Copy one exception record to another and return value that causes
+ // an exception handler to be executed.
+ //
+
+ RtlMoveMemory((PVOID)ExceptionRecord1,
+ (PVOID)ExceptionRecord2,
+ sizeof(EXCEPTION_RECORD));
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+NTSTATUS
+KeRaiseUserException(
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an exception to be raised in the calling thread's user-mode
+ context. It does this by editing the trap frame the kernel was entered with to
+ point to trampoline code that raises the requested exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ The status value that should be returned by the caller.
+
+--*/
+
+{
+ PKTHREAD Thread;
+ PKTRAP_FRAME TrapFrame;
+ PTEB Teb;
+ ULONG PreviousEip;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ Thread = KeGetCurrentThread();
+ TrapFrame = Thread->TrapFrame;
+ Teb = (PTEB)Thread->Teb;
+
+ //
+ // In order to create the correct call stack, we return the previous
+ // EIP as the status code. The usermode trampoline code will push this
+ // onto the stack for use as the return address. The status code to
+ // be raised is passed in the TEB.
+ //
+
+ try {
+ Teb->ExceptionCode = ExceptionCode;
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return(ExceptionCode);
+ }
+
+ PreviousEip = TrapFrame->Eip;
+ TrapFrame->Eip = KeRaiseUserExceptionDispatcher;
+
+ return((NTSTATUS)PreviousEip);
+}
diff --git a/private/ntos/ke/i386/flush.c b/private/ntos/ke/i386/flush.c
new file mode 100644
index 000000000..1d7f41d9b
--- /dev/null
+++ b/private/ntos/ke/i386/flush.c
@@ -0,0 +1,170 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ flush.c
+
+Abstract:
+
+ This module implements i386 machine dependent kernel functions to flush
+ the data and instruction caches and to stall processor execution.
+
+Author:
+
+ David N. Cutler (davec) 26-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+
+// i386 and i486 have transparent caches, so these routines are nooped
+// out in macros in i386.h.
+
+#if 0
+
+VOID
+KeSweepDcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the data cache on all processors that are currently
+ running threads which are children of the current process or flushes the
+ data cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which data
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ HalSweepDcache();
+ return;
+}
+
+VOID
+KeSweepIcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache on all processors that are
+ currently running threads which are children of the current process or
+ flushes the instruction cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ HalSweepIcache();
+
+#if defined(R4000)
+
+ HalSweepDcache();
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcacheRange (
+ IN BOOLEAN AllProcessors,
+ IN PVOID BaseAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the an range of virtual addresses from the primary
+ instruction cache on all processors that are currently running threads
+ which are children of the current process or flushes the range of virtual
+ addresses from the primary instruction cache on all processors in the host
+ configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+
+ //
+ // If the length of the range is greater than the size of the primary
+ // instruction cache, then set the length of the flush to the size of
+ // the primary instruction cache and set the ase address of zero.
+ //
+ // N.B. It is assumed that the size of the primary instruction and
+ // data caches are the same.
+ //
+
+ if (Length > PCR->FirstLevelIcacheSize) {
+ BaseAddress = (PVOID)0;
+ Length = PCR->FirstLevelIcacheSize;
+ }
+
+ //
+ // Flush the specified range of virtual addresses from the primary
+ // instruction cache.
+ //
+
+ Offset = (ULONG)BaseAddress & PCR->DcacheAlignment;
+ Length = (Offset + Length + PCR->DcacheAlignment) & ~PCR->DcacheAlignment;
+ BaseAddress = (PVOID)((ULONG)BaseAddress & ~PCR->DcacheAlignment);
+ HalSweepIcacheRange(BaseAddress, Length);
+
+#if defined(R4000)
+
+ HalSweepDcacheRange(BaseAddress, Length);
+
+#endif
+
+ return;
+}
+#endif
diff --git a/private/ntos/ke/i386/flushtb.c b/private/ntos/ke/i386/flushtb.c
new file mode 100644
index 000000000..77e8a5511
--- /dev/null
+++ b/private/ntos/ke/i386/flushtb.c
@@ -0,0 +1,565 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ tbflush.c
+
+Abstract:
+
+ This module implements machine dependent functions to flush
+ the translation buffers in an Intel x86 system.
+
+ N.B. This module contains only MP versions of the TB flush routines.
+ The UP versions are macros in ke.h
+ KeFlushEntireTb remains a routine for the UP system since it is
+ exported from the kernel for backwards compatibility.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Shie-Lin Tzong (shielint) 30-Aug-1990
+ Implement MP version of KeFlushSingleTb and KeFlushEntireTb.
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiFlushTargetEntireTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Invalid,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushTargetMultipleTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+
+VOID
+KiFlushTargetSingleTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+#if defined(NT_UP)
+#undef KeFlushEntireTb
+#endif
+
+
+VOID
+KeFlushEntireTb (
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the entire translation buffer (TB) on all processors
+ that are currently running threads which are child of the current process
+ or flushes the entire translation buffer on all processors in the host
+ configuration.
+
+Arguments:
+
+ Invalid - Supplies a boolean value that specifies the reason for flushing
+ the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which translation
+ buffers are to be flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Compute the target set of processors, disable context switching,
+ // and send the flush entire parameters to the target processors,
+ // if any, for execution.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ KiLockContextSwap(&OldIrql);
+ Prcb = KeGetCurrentPrcb();
+ Process = Prcb->CurrentThread->ApcState.Process;
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushTargetEntireTb,
+ NULL,
+ NULL,
+ NULL);
+
+ IPI_INSTRUMENT_COUNT (Prcb->Number, FlushEntireTb);
+ }
+
+#endif
+
+ //
+ // Flush TB on current processor.
+ //
+
+ KeFlushCurrentTb();
+
+ //
+ // Wait until all target processors have finished and complete packet.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushTargetEntireTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing the entire TB.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the entire TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KeFlushCurrentTb();
+ return;
+}
+
+VOID
+KeFlushMultipleTb (
+ IN ULONG Number,
+ IN PVOID *Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE *PtePointer OPTIONAL,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes multiple entries from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a multiple entries from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies an optional pointer to an array of pointers to
+ page table entries that receive the specified page table entry
+ value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ ULONG Index;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
+
+ //
+ // Compute target set of processors.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ KiLockContextSwap(&OldIrql);
+ Process = Prcb->CurrentThread->ApcState.Process;
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= ~Prcb->SetMember;
+
+ //
+ // If a page table entry address array is specified, then set the
+ // specified page table entries to the specific value.
+ //
+
+ if (ARGUMENT_PRESENT(PtePointer)) {
+ for (Index = 0; Index < Number; Index += 1) {
+ *PtePointer[Index] = PteValue;
+ }
+ }
+
+ //
+ // If any target processors are specified, then send a flush multiple
+ // packet to the target set of processors.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushTargetMultipleTb,
+ (PVOID)Invalid,
+ (PVOID)Number,
+ (PVOID)Virtual);
+
+ IPI_INSTRUMENT_COUNT (Prcb->Number, FlushMultipleTb);
+ }
+
+ //
+ // Flush the specified entries from the TB on the current processor.
+ //
+
+ for (Index = 0; Index < Number; Index += 1) {
+ KiFlushSingleTb(Invalid, Virtual[Index]);
+ }
+
+ //
+ // Wait until all target processors have finished and complete packet.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Release the context swap lock.
+ //
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+ return;
+}
+
+VOID
+KiFlushTargetMultipleTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Invalid,
+ IN PVOID Number,
+ IN PVOID Virtual
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing multiple TB entries.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Invalid - Supplies a bollean value that determines whether the virtual
+ address is invalid.
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ PVOID VirtualAddress[FLUSH_MULTIPLE_MAXIMUM];
+
+ //
+ // Capture the virtual addresses that are to be flushed from the TB
+ // on the current processor and signal pack done.
+ //
+
+ for (Index = 0; Index < (ULONG) Number; Index += 1) {
+ VirtualAddress[Index] = ((PVOID *)(Virtual))[Index];
+ }
+
+ KiIpiSignalPacketDone(SignalDone);
+
+ //
+ // Flush the specified virtual address for the TB on the current
+ // processor.
+ //
+
+ for (Index = 0; Index < (ULONG) Number; Index += 1) {
+ KiFlushSingleTb((BOOLEAN)Invalid, VirtualAddress [Index]);
+ }
+}
+
+HARDWARE_PTE
+KeFlushSingleTb (
+ IN PVOID Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE PtePointer,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a single entry from translation buffer (TB) on all
+ processors that are currently running threads which are child of the current
+ process or flushes the entire translation buffer on all processors in the
+ host configuration.
+
+Arguments:
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for flushing
+ the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which translation
+ buffers are to be flushed.
+
+ PtePointer - Address of Pte to update with new value.
+
+ PteValue - New value to put in the Pte. Will simply be assigned to
+ *PtePointer, in a fashion correct for the hardware.
+
+Return Value:
+
+ Returns the contents of the PtePointer before the new value
+ is stored.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ PKPROCESS Process;
+ HARDWARE_PTE OldPteValue;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL);
+
+ //
+ // Compute target set of processors.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ KiLockContextSwap(&OldIrql);
+ Process = Prcb->CurrentThread->ApcState.Process;
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= ~Prcb->SetMember;
+
+ //
+ // Capture the previous contents of the page table entry and set the
+ // page table entry to the new value.
+ //
+
+ OldPteValue = *PtePointer;
+ *PtePointer = PteValue;
+
+ //
+ // If any target processors are specified, then send a flush single
+ // packet to the target set of processors.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushTargetSingleTb,
+ (PVOID)Invalid,
+ (PVOID)Virtual,
+ NULL);
+
+ IPI_INSTRUMENT_COUNT(Prcb->Number, FlushSingleTb);
+ }
+
+
+ //
+ // Flush the specified entry from the TB on the current processor.
+ //
+
+ KiFlushSingleTb(Invalid, Virtual);
+
+ //
+ // Wait until all target processors have finished and complete packet.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Release the context swap lock.
+ //
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+ return(OldPteValue);
+}
+
+VOID
+KiFlushTargetSingleTb (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Invalid,
+ IN PVOID VirtualAddress,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing a single TB entry.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Invalid - Supplies a bollean value that determines whether the virtual
+ address is invalid.
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush a single entry from the TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KiFlushSingleTb((BOOLEAN)Invalid, (PVOID)VirtualAddress);
+}
+
+#endif
diff --git a/private/ntos/ke/i386/gdtsup.c b/private/ntos/ke/i386/gdtsup.c
new file mode 100644
index 000000000..f4d6d12a1
--- /dev/null
+++ b/private/ntos/ke/i386/gdtsup.c
@@ -0,0 +1,174 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ gdtsup.c
+
+Abstract:
+
+ This module implements interfaces that support manipulation of i386 Gdts.
+ These entry points only exist on i386 machines.
+
+Author:
+
+ Dave Hastings (daveh) 28 May 1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,KeI386SetGdtSelector)
+#endif
+
+VOID
+Ke386GetGdtEntryThread(
+ IN PKTHREAD Thread,
+ IN ULONG Offset,
+ IN PKGDTENTRY Descriptor
+ )
+/*++
+
+Routine Description:
+
+ This routine returns the contents of an entry in the Gdt. If the
+ entry is thread specific, the entry for the specified thread is
+ created and returned (KGDT_LDT, and KGDT_R3_TEB). If the selector
+ is processor dependent, the entry for the current processor is
+ returned (KGDT_R0_PCR).
+
+Arguments:
+
+ Thread -- Supplies a pointer to the thread to return the entry for.
+
+ Offset -- Supplies the offset in the Gdt. This value must be 0
+ mod 8.
+
+ Descriptor -- Returns the contents of the Gdt descriptor
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PKGDTENTRY Gdt;
+ PKPROCESS Process;
+
+ //
+ // If the entry is out of range, don't return anything
+ //
+
+ if (Offset >= KGDT_NUMBER * sizeof(KGDTENTRY)) {
+ return ;
+ }
+
+ if (Offset == KGDT_LDT) {
+
+ //
+ // Materialize Ldt selector
+ //
+
+ Process = Thread->ApcState.Process;
+ RtlMoveMemory( Descriptor,
+ &(Process->LdtDescriptor),
+ sizeof(KGDTENTRY)
+ );
+
+ } else {
+
+ //
+ // Copy Selector from Ldt
+ //
+ // N.B. We will change the base later, if it is KGDT_R3_TEB
+ //
+
+
+ Gdt = KiPcr()->GDT;
+
+ RtlMoveMemory(Descriptor, (PCHAR)Gdt + Offset, sizeof(KGDTENTRY));
+
+ //
+ // if it is the TEB selector, fix the base
+ //
+
+ if (Offset == KGDT_R3_TEB) {
+ Descriptor->BaseLow = (USHORT)((ULONG)(Thread->Teb) & 0xFFFF);
+ Descriptor->HighWord.Bytes.BaseMid =
+ (UCHAR) ( ( (ULONG)(Thread->Teb) & 0xFF0000L) >> 16);
+ Descriptor->HighWord.Bytes.BaseHi =
+ (CHAR) ( ( (ULONG)(Thread->Teb) & 0xFF000000L) >> 24);
+ }
+ }
+
+ return ;
+}
+
+NTSTATUS
+KeI386SetGdtSelector (
+ ULONG Selector,
+ PKGDTENTRY GdtValue
+ )
+/*++
+
+Routine Description:
+
+ Sets a GDTs returned via KeI386AllocateGdtSelectors to the supplied
+ GdtValue.
+
+Arguments:
+
+ Selector - Which GDT to set
+ GdtValue - GDT value to set into GDT
+
+Return Value:
+
+ status code
+
+--*/
+{
+ KAFFINITY TargetSet;
+ PKPRCB Prcb;
+ PKPCR Pcr;
+ PKGDTENTRY GdtEntry;
+ ULONG GdtIndex, BitNumber;
+
+ PAGED_CODE ();
+
+ //
+ // Verify GDT entry passed, and it's above the kernel GDT values
+ //
+
+ GdtIndex = Selector >> 3;
+ if ((Selector & 0x7) != 0 || GdtIndex < KGDT_NUMBER) {
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ //
+ // Set gdt entry in each processors GDT
+ //
+
+ TargetSet = KeActiveProcessors;
+ while (TargetSet != 0) {
+ BitNumber = KiFindFirstSetRightMember(TargetSet);
+ ClearMember(BitNumber, TargetSet);
+
+ Prcb = KiProcessorBlock[BitNumber];
+ Pcr = CONTAINING_RECORD (Prcb, KPCR, PrcbData);
+ GdtEntry = Pcr->GDT + GdtIndex;
+
+ // set it
+ *GdtEntry = *GdtValue;
+ }
+
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/ke/i386/geni386.c b/private/ntos/ke/i386/geni386.c
new file mode 100644
index 000000000..cfefb3611
--- /dev/null
+++ b/private/ntos/ke/i386/geni386.c
@@ -0,0 +1,812 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ genoff.c
+
+Abstract:
+
+ This module implements a program which generates structure offset
+ definitions for kernel structures that are accessed in assembly code.
+
+Author:
+
+ Bryan M. Willman (bryanwi) 16-Oct-90
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+
+#include "nturtl.h"
+#include "vdmntos.h"
+#include "abios.h"
+
+//
+// Define architecture specific generation macros.
+//
+
+#define genAlt(Name, Type, Member) \
+ p2(#Name, OFFSET(Type, Member))
+
+#define genCom(Comment) \
+ p1("\n"); \
+ p1(";\n"); \
+ p1("; " Comment "\n"); \
+ p1(";\n"); \
+ p1("\n")
+
+#define genDef(Prefix, Type, Member) \
+ p2(#Prefix #Member, OFFSET(Type, Member))
+
+#define genVal(Name, Value) \
+ p2(#Name, Value)
+
+#define genSpc() p1("\n");
+
+//
+// Define member offset computation macro.
+//
+
+#define OFFSET(type, field) ((LONG)(&((type *)0)->field))
+
+FILE *OutKs386;
+FILE *OutHal386;
+
+ULONG OutputEnabled;
+
+#define KS386 0x01
+#define HAL386 0x02
+
+#define KERNEL KS386
+#define HAL HAL386
+
+//
+// p1 prints a single string.
+//
+
+VOID p1(PUCHAR outstring);
+
+//
+// p2 prints the first argument as a string, followed by " equ " and
+// the hexadecimal value of "Value".
+//
+
+VOID p2(PUCHAR a, LONG b);
+
+//
+// p2a first argument is the format string. second argument is passed
+// to the printf function
+//
+
+VOID p2a(PUCHAR a, LONG b);
+
+//
+// EnableInc(a) - Enables output to goto specified include file
+//
+
+#define EnableInc(a) OutputEnabled |= a;
+
+//
+// DisableInc(a) - Disables output to goto specified include file
+//
+
+#define DisableInc(a) OutputEnabled &= ~a;
+
+int
+_CRTAPI1
+main(
+ int argc,
+ char *argv[]
+ )
+{
+ char *outName;
+
+ printf ("Sizeof DeviceObject %d\n", sizeof (DEVICE_OBJECT));
+ printf ("Sizeof DeviceObject Ext %d\n", sizeof (DEVOBJ_EXTENSION));
+
+ outName = argc >= 2 ? argv[1] : "\\nt\\public\\sdk\\inc\\ks386.inc";
+ OutKs386 = fopen(outName, "w" );
+ if (OutKs386 == NULL) {
+ fprintf(stderr, "GENi386: Could not create output file '%s'.\n", outName);
+ fprintf( stderr, "sizeof( EPROCESS ) == %04x\n", sizeof( EPROCESS ) );
+ fprintf( stderr, "Win32Process %08x\n",OFFSET(EPROCESS, Win32Process));
+ exit (1);
+ }
+
+ fprintf( stderr, "GENi386: Writing %s header file.\n", outName );
+
+ outName = argc >= 3 ? argv[2] : "\\nt\\private\\ntos\\inc\\hal386.inc";
+ OutHal386 = fopen( outName, "w" );
+ if (OutHal386 == NULL) {
+ fprintf(stderr, "GENi386: Could not create output file '%s'.\n", outName);
+ fprintf(stderr, "GENi386: Execution continuing. Hal results ignored '%s'.\n", outName);
+ }
+
+ fprintf( stderr, "GENi386: Writing %s header file.\n", outName );
+
+ fprintf( stderr, "sizeof( TEB ) == %04x %s\n", sizeof( TEB ), sizeof( TEB ) >= PAGE_SIZE ? "Warning, TEB too Large" : "" );
+ fprintf( stderr, "sizeof( PEB ) == %04x %s\n", sizeof( PEB ), sizeof( PEB ) >= PAGE_SIZE ? "Warning, PEB too Large" : "" );
+ fprintf( stderr, "sizeof( KTHREAD ) == %04x\n", sizeof( KTHREAD ) );
+ fprintf( stderr, "sizeof( ETHREAD ) == %04x\n", sizeof( ETHREAD ) );
+ fprintf( stderr, "sizeof( KPROCESS ) == %04x\n", sizeof( KPROCESS ) );
+ fprintf( stderr, "sizeof( EPROCESS ) == %04x\n", sizeof( EPROCESS ) );
+ fprintf( stderr, "sizeof( KEVENT ) == %04x\n", sizeof( KEVENT ) );
+ fprintf( stderr, "sizeof( KSEMAPHORE ) == %04x\n", sizeof( KSEMAPHORE ) );
+
+ EnableInc (KS386);
+
+ //
+ // Include architecture independent definitions.
+ //
+
+#include "..\genxx.inc"
+
+ //
+ // Generate architecture dependent definitions.
+ //
+
+ p1("\n");
+ p1("; \n");
+ p1("; Apc Record Structure Offset Definitions\n");
+ p1("; \n");
+ p1("\n");
+ p2("ArNormalRoutine", OFFSET(KAPC_RECORD, NormalRoutine));
+ p2("ArNormalContext", OFFSET(KAPC_RECORD, NormalContext));
+ p2("ArSystemArgument1", OFFSET(KAPC_RECORD, SystemArgument1));
+ p2("ArSystemArgument2", OFFSET(KAPC_RECORD, SystemArgument2));
+ p2("ApcRecordLength", sizeof(KAPC_RECORD));
+ p1("\n");
+
+ EnableInc(HAL386);
+ p1("\n");
+ p1("; \n");
+ p1("; Processor Control Registers Structure Offset Definitions\n");
+ p1("; \n");
+ p1("\n");
+ p2("KI_BEGIN_KERNEL_RESERVED", KI_BEGIN_KERNEL_RESERVED);
+ p1("ifdef NT_UP\n");
+ p2a(" P0PCRADDRESS equ 0%lXH\n", KIP0PCRADDRESS);
+ p2a(" PCR equ ds:[0%lXH]\n", KIP0PCRADDRESS);
+ p1("else\n");
+ p1(" PCR equ fs:\n");
+ p1("endif\n\n");
+ p2("PcExceptionList", OFFSET(KPCR, NtTib.ExceptionList));
+ p2("PcInitialStack", OFFSET(KPCR, NtTib.StackBase));
+ p2("PcStackLimit", OFFSET(KPCR, NtTib.StackLimit));
+ p2("PcSelfPcr", OFFSET(KPCR, SelfPcr));
+ p2("PcPrcb", OFFSET(KPCR, Prcb));
+ p2("PcTeb", OFFSET(KPCR, NtTib.Self));
+ p2("PcIrql", OFFSET(KPCR, Irql));
+ p2("PcIRR", OFFSET(KPCR, IRR));
+ p2("PcIrrActive", OFFSET(KPCR, IrrActive));
+ p2("PcIDR", OFFSET(KPCR, IDR));
+ p2("PcIdt", OFFSET(KPCR, IDT));
+ p2("PcGdt", OFFSET(KPCR, GDT));
+ p2("PcTss", OFFSET(KPCR, TSS));
+ p2("PcDebugActive", OFFSET(KPCR, DebugActive));
+ p2("PcNumber", OFFSET(KPCR, Number));
+ p2("PcVdmAlert", OFFSET(KPCR, VdmAlert));
+ p2("PcSetMember", OFFSET(KPCR, SetMember));
+ p2("PcStallScaleFactor", OFFSET(KPCR, StallScaleFactor));
+ p2("PcHal", OFFSET(KPCR, HalReserved));
+ p2("PcKernel", OFFSET(KPCR, KernelReserved));
+ DisableInc (HAL386);
+ p2("PcPrcbData", OFFSET(KPCR, PrcbData));
+ p2("ProcessorControlRegisterLength", sizeof(KPCR));
+ p2("TebPeb", OFFSET(TEB, ProcessEnvironmentBlock));
+ p2("PebBeingDebugged", OFFSET(PEB, BeingDebugged));
+ p2("PebKernelCallbackTable", OFFSET(PEB, KernelCallbackTable));
+
+ EnableInc (HAL386);
+ p1("\n");
+ p1(";\n");
+ p1("; Defines for user shared data\n");
+ p1(";\n");
+ p2("USER_SHARED_DATA", KI_USER_SHARED_DATA);
+ p2("MM_SHARED_USER_DATA_VA", MM_SHARED_USER_DATA_VA);
+ p2a("USERDATA equ ds:[0%lXH]\n", KI_USER_SHARED_DATA);
+ p2("UsTickCountLow", OFFSET(KUSER_SHARED_DATA, TickCountLow));
+ p2("UsTickCountMultiplier", OFFSET(KUSER_SHARED_DATA, TickCountMultiplier));
+ p2("UsInterruptTime", OFFSET(KUSER_SHARED_DATA, InterruptTime));
+ p2("UsSystemTime", OFFSET(KUSER_SHARED_DATA, SystemTime));
+
+ p1("\n");
+ p1(";\n");
+ p1("; Tss Structure Offset Definitions\n");
+ p1(";\n\n");
+ p2("TssEsp0", OFFSET(KTSS, Esp0));
+ p2("TssCR3", OFFSET(KTSS, CR3));
+ p2("TssIoMapBase", OFFSET(KTSS, IoMapBase));
+ p2("TssIoMaps", OFFSET(KTSS, IoMaps));
+ p2("TssLength", sizeof(KTSS));
+ p1("\n");
+ DisableInc (HAL386);
+
+ EnableInc (HAL386);
+ p1(";\n");
+ p1("; Gdt Descriptor Offset Definitions\n");
+ p1(";\n\n");
+ p2("KGDT_R3_DATA", KGDT_R3_DATA);
+ p2("KGDT_R3_CODE", KGDT_R3_CODE);
+ p2("KGDT_R0_CODE", KGDT_R0_CODE);
+ p2("KGDT_R0_DATA", KGDT_R0_DATA);
+ p2("KGDT_R0_PCR", KGDT_R0_PCR);
+ p2("KGDT_STACK16", KGDT_STACK16);
+ p2("KGDT_CODE16", KGDT_CODE16);
+ p2("KGDT_TSS", KGDT_TSS);
+ DisableInc (HAL386);
+ p2("KGDT_R3_TEB", KGDT_R3_TEB);
+ p2("KGDT_DF_TSS", KGDT_DF_TSS);
+ p2("KGDT_NMI_TSS", KGDT_NMI_TSS);
+ p2("KGDT_LDT", KGDT_LDT);
+ p1("\n");
+
+ EnableInc (HAL386);
+ p1(";\n");
+ p1("; GdtEntry Offset Definitions\n");
+ p1(";\n\n");
+ p2("KgdtBaseLow", OFFSET(KGDTENTRY, BaseLow));
+ p2("KgdtBaseMid", OFFSET(KGDTENTRY, HighWord.Bytes.BaseMid));
+ p2("KgdtBaseHi", OFFSET(KGDTENTRY, HighWord.Bytes.BaseHi));
+ p2("KgdtLimitHi", OFFSET(KGDTENTRY, HighWord.Bytes.Flags2));
+ p2("KgdtLimitLow", OFFSET(KGDTENTRY, LimitLow));
+ p1("\n");
+
+ //
+ // Processor block structure definitions.
+ //
+
+ genCom("Processor Block Structure Offset Definitions");
+
+ genDef(Pb, KPRCB, CurrentThread);
+ genDef(Pb, KPRCB, NextThread);
+ genDef(Pb, KPRCB, IdleThread);
+ genDef(Pb, KPRCB, Number);
+ genDef(Pb, KPRCB, SetMember);
+ genDef(Pb, KPRCB, CpuID);
+ genDef(Pb, KPRCB, CpuType);
+ genDef(Pb, KPRCB, CpuStep);
+ genDef(Pb, KPRCB, HalReserved);
+ genDef(Pb, KPRCB, ProcessorState);
+
+ DisableInc (HAL386);
+
+ genDef(Pb, KPRCB, NpxThread);
+ genDef(Pb, KPRCB, InterruptCount);
+ genDef(Pb, KPRCB, KernelTime);
+ genDef(Pb, KPRCB, UserTime);
+ genDef(Pb, KPRCB, DpcTime);
+ genDef(Pb, KPRCB, InterruptTime);
+ genDef(Pb, KPRCB, ApcBypassCount);
+ genDef(Pb, KPRCB, DpcBypassCount);
+ genDef(Pb, KPRCB, AdjustDpcThreshold);
+ genDef(Pb, KPRCB, ThreadStartCount);
+ genAlt(PbAlignmentFixupCount, KPRCB, KeAlignmentFixupCount);
+ genAlt(PbContextSwitches, KPRCB, KeContextSwitches);
+ genAlt(PbDcacheFlushCount, KPRCB, KeDcacheFlushCount);
+ genAlt(PbExceptionDispatchCount, KPRCB, KeExceptionDispatchCount);
+ genAlt(PbFirstLevelTbFills, KPRCB, KeFirstLevelTbFills);
+ genAlt(PbFloatingEmulationCount, KPRCB, KeFloatingEmulationCount);
+ genAlt(PbIcacheFlushCount, KPRCB, KeIcacheFlushCount);
+ genAlt(PbSecondLevelTbFills, KPRCB, KeSecondLevelTbFills);
+ genAlt(PbSystemCalls, KPRCB, KeSystemCalls);
+ genDef(Pb, KPRCB, CurrentPacket);
+ genDef(Pb, KPRCB, TargetSet);
+ genDef(Pb, KPRCB, WorkerRoutine);
+ genDef(Pb, KPRCB, IpiFrozen);
+ genDef(Pb, KPRCB, RequestSummary);
+ genDef(Pb, KPRCB, SignalDone);
+ genDef(Pb, KPRCB, IpiFrame);
+ genDef(Pb, KPRCB, DpcInterruptRequested);
+ genDef(Pb, KPRCB, MaximumDpcQueueDepth);
+ genDef(Pb, KPRCB, MinimumDpcRate);
+ genDef(Pb, KPRCB, DpcListHead);
+ genDef(Pb, KPRCB, DpcQueueDepth);
+ genDef(Pb, KPRCB, DpcRoutineActive);
+ genDef(Pb, KPRCB, DpcCount);
+ genDef(Pb, KPRCB, DpcLastCount);
+ genDef(Pb, KPRCB, DpcRequestRate);
+ genDef(Pb, KPRCB, DpcLock);
+ genDef(Pb, KPRCB, SkipTick);
+ genDef(Pb, KPRCB, QuantumEnd);
+ genVal(ProcessorBlockLength, ((sizeof(KPRCB) + 15) & ~15));
+
+ //
+ // Interprocessor command definitions.
+ //
+
+ genCom("Immediate Interprocessor Command Definitions");
+
+ genVal(IPI_APC, IPI_APC);
+ genVal(IPI_DPC, IPI_DPC);
+ genVal(IPI_FREEZE, IPI_FREEZE);
+ genVal(IPI_PACKET_READY, IPI_PACKET_READY);
+
+ p1("; \n");
+ p1("; Thread Environment Block Structure Offset Definitions\n");
+ p1("; \n");
+ p1("\n");
+
+ p2("TbExceptionList", OFFSET(TEB, NtTib.ExceptionList));
+ p2("TbStackBase", OFFSET(TEB, NtTib.StackBase));
+ p2("TbStackLimit", OFFSET(TEB, NtTib.StackLimit));
+ p2("TbEnvironmentPointer", OFFSET(TEB, EnvironmentPointer));
+ p2("TbVersion", OFFSET(TEB, NtTib.Version));
+ p2("TbFiberData", OFFSET(TEB, NtTib.FiberData));
+ p2("TbArbitraryUserPointer", OFFSET(TEB, NtTib.ArbitraryUserPointer));
+ p2("TbClientId", OFFSET(TEB, ClientId));
+ p2("TbThreadLocalStoragePointer", OFFSET(TEB,
+ ThreadLocalStoragePointer));
+ p2("TbCountOfOwnedCriticalSections", OFFSET(TEB, CountOfOwnedCriticalSections));
+ p2("TbSystemReserved1", OFFSET(TEB, SystemReserved1));
+ p2("TbSystemReserved2", OFFSET(TEB, SystemReserved2));
+ p2("TbVdm", OFFSET(TEB, Vdm));
+ p2("TbCsrClientThread", OFFSET(TEB, CsrClientThread));
+ p2("TbGdiClientPID", OFFSET(TEB, GdiClientPID));
+ p2("TbGdiClientTID", OFFSET(TEB, GdiClientTID));
+ p2("TbGdiThreadLocalInfo", OFFSET(TEB, GdiThreadLocalInfo));
+ p2("TbglDispatchTable", OFFSET(TEB, glDispatchTable));
+ p2("TbglSectionInfo", OFFSET(TEB, glSectionInfo));
+ p2("TbglSection", OFFSET(TEB, glSection));
+ p2("TbglTable", OFFSET(TEB, glTable));
+ p2("TbglCurrentRC", OFFSET(TEB, glCurrentRC));
+ p2("TbglContext", OFFSET(TEB, glContext));
+ p2("TbWin32ClientInfo", OFFSET(TEB, Win32ClientInfo));
+ p2("TbWOW32Reserved", OFFSET(TEB, WOW32Reserved));
+ p2("TbWin32ThreadInfo", OFFSET(TEB, Win32ThreadInfo));
+ p2("TbSpare1", OFFSET(TEB, Spare1));
+ p2("TbExceptionCode", OFFSET(TEB, ExceptionCode));
+ p2("TbDeallocationStack", OFFSET(TEB, DeallocationStack));
+ p2("TbGdiBatchCount", OFFSET(TEB, GdiBatchCount));
+
+ EnableInc (HAL386);
+ p1(";\n");
+ p1(";\n");
+ p1("; Time Fields (TIME_FIELDS) Structure Offset Definitions\n");
+ p1(";\n\n");
+ p2("TfSecond", OFFSET(TIME_FIELDS, Second));
+ p2("TfMinute", OFFSET(TIME_FIELDS, Minute));
+ p2("TfHour", OFFSET(TIME_FIELDS, Hour));
+ p2("TfWeekday", OFFSET(TIME_FIELDS, Weekday));
+ p2("TfDay", OFFSET(TIME_FIELDS, Day));
+ p2("TfMonth", OFFSET(TIME_FIELDS, Month));
+ p2("TfYear", OFFSET(TIME_FIELDS, Year));
+ p2("TfMilliseconds", OFFSET(TIME_FIELDS, Milliseconds));
+ p1("\n");
+ DisableInc (HAL386);
+
+ EnableInc (HAL386);
+ p1("; \n");
+ p1("; constants for system irql and IDT vector conversion\n");
+ p1("; \n");
+ p1("\n");
+ p2("MAXIMUM_IDTVECTOR", MAXIMUM_IDTVECTOR);
+ p2("MAXIMUM_PRIMARY_VECTOR", MAXIMUM_PRIMARY_VECTOR);
+ p2("PRIMARY_VECTOR_BASE", PRIMARY_VECTOR_BASE);
+ p2("RPL_MASK", RPL_MASK);
+ p2("MODE_MASK", MODE_MASK);
+ p1("\n");
+ p1("; \n");
+ p1("; Flags in the CR0 register\n");
+ p1("; \n");
+ p1("\n");
+ p2("CR0_PG", CR0_PG);
+ p2("CR0_ET", CR0_ET);
+ p2("CR0_TS", CR0_TS);
+ p2("CR0_EM", CR0_EM);
+ p2("CR0_MP", CR0_MP);
+ p2("CR0_PE", CR0_PE);
+ p2("CR0_CD", CR0_CD);
+ p2("CR0_NW", CR0_NW);
+ p2("CR0_AM", CR0_AM);
+ p2("CR0_WP", CR0_WP);
+ p2("CR0_NE", CR0_NE);
+ p1("\n");
+ p1("; \n");
+ p1("; Flags in the CR4 register\n");
+ p1("; \n");
+ p1("\n");
+ p2("CR4_VME", CR4_VME);
+ p2("CR4_PVI", CR4_PVI);
+ p2("CR4_TSD", CR4_TSD);
+ p2("CR4_DE", CR4_DE);
+ p2("CR4_PSE", CR4_PSE);
+ p2("CR4_PAE", CR4_PAE);
+ p2("CR4_MCE", CR4_MCE);
+ p2("CR4_PGE", CR4_PGE);
+ p1("; \n");
+ p1("; Miscellaneous Definitions\n");
+ p1("; \n");
+ p1("\n");
+ p2("MAXIMUM_PROCESSORS", MAXIMUM_PROCESSORS);
+ p2("INITIAL_STALL_COUNT", INITIAL_STALL_COUNT);
+ p2("IRQL_NOT_GREATER_OR_EQUAL", IRQL_NOT_GREATER_OR_EQUAL);
+ p2("IRQL_NOT_LESS_OR_EQUAL", IRQL_NOT_LESS_OR_EQUAL);
+ DisableInc (HAL386);
+ p2("BASE_PRIORITY_THRESHOLD", BASE_PRIORITY_THRESHOLD);
+ p2("EVENT_PAIR_INCREMENT", EVENT_PAIR_INCREMENT);
+ p2("LOW_REALTIME_PRIORITY", LOW_REALTIME_PRIORITY);
+ p2("BlackHole", 0xffffa000);
+ p2("KERNEL_LARGE_STACK_COMMIT", KERNEL_LARGE_STACK_COMMIT);
+ p2("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE);
+ p2("DOUBLE_FAULT_STACK_SIZE", DOUBLE_FAULT_STACK_SIZE);
+ p2("EFLAG_SELECT", EFLAG_SELECT);
+ p2("BREAKPOINT_BREAK ", BREAKPOINT_BREAK);
+ p2("IPI_FREEZE", IPI_FREEZE);
+ p2("CLOCK_QUANTUM_DECREMENT", CLOCK_QUANTUM_DECREMENT);
+ p2("READY_SKIP_QUANTUM", READY_SKIP_QUANTUM);
+ p2("THREAD_QUANTUM", THREAD_QUANTUM);
+ p2("WAIT_QUANTUM_DECREMENT", WAIT_QUANTUM_DECREMENT);
+ p2("ROUND_TRIP_DECREMENT_COUNT", ROUND_TRIP_DECREMENT_COUNT);
+
+ //
+ // Print trap frame offsets relative to sp.
+ //
+
+ EnableInc (HAL386);
+ p1("\n");
+ p1("; \n");
+ p1("; Trap Frame Offset Definitions and Length\n");
+ p1("; \n");
+ p1("\n");
+
+ p2("TsExceptionList", OFFSET(KTRAP_FRAME, ExceptionList));
+ p2("TsPreviousPreviousMode", OFFSET(KTRAP_FRAME, PreviousPreviousMode));
+ p2("TsSegGs", OFFSET(KTRAP_FRAME, SegGs));
+ p2("TsSegFs", OFFSET(KTRAP_FRAME, SegFs));
+ p2("TsSegEs", OFFSET(KTRAP_FRAME, SegEs));
+ p2("TsSegDs", OFFSET(KTRAP_FRAME, SegDs));
+ p2("TsEdi", OFFSET(KTRAP_FRAME, Edi));
+ p2("TsEsi", OFFSET(KTRAP_FRAME, Esi));
+ p2("TsEbp", OFFSET(KTRAP_FRAME, Ebp));
+ p2("TsEbx", OFFSET(KTRAP_FRAME, Ebx));
+ p2("TsEdx", OFFSET(KTRAP_FRAME, Edx));
+ p2("TsEcx", OFFSET(KTRAP_FRAME, Ecx));
+ p2("TsEax", OFFSET(KTRAP_FRAME, Eax));
+ p2("TsErrCode", OFFSET(KTRAP_FRAME, ErrCode));
+ p2("TsEip", OFFSET(KTRAP_FRAME, Eip));
+ p2("TsSegCs", OFFSET(KTRAP_FRAME, SegCs));
+ p2("TsEflags", OFFSET(KTRAP_FRAME, EFlags));
+ p2("TsHardwareEsp", OFFSET(KTRAP_FRAME, HardwareEsp));
+ p2("TsHardwareSegSs", OFFSET(KTRAP_FRAME, HardwareSegSs));
+ p2("TsTempSegCs", OFFSET(KTRAP_FRAME, TempSegCs));
+ p2("TsTempEsp", OFFSET(KTRAP_FRAME, TempEsp));
+ p2("TsDbgEbp", OFFSET(KTRAP_FRAME, DbgEbp));
+ p2("TsDbgEip", OFFSET(KTRAP_FRAME, DbgEip));
+ p2("TsDbgArgMark", OFFSET(KTRAP_FRAME, DbgArgMark));
+ p2("TsDbgArgPointer", OFFSET(KTRAP_FRAME, DbgArgPointer));
+ p2("TsDr0", OFFSET(KTRAP_FRAME, Dr0));
+ p2("TsDr1", OFFSET(KTRAP_FRAME, Dr1));
+ p2("TsDr2", OFFSET(KTRAP_FRAME, Dr2));
+ p2("TsDr3", OFFSET(KTRAP_FRAME, Dr3));
+ p2("TsDr6", OFFSET(KTRAP_FRAME, Dr6));
+ p2("TsDr7", OFFSET(KTRAP_FRAME, Dr7));
+ p2("TsV86Es", OFFSET(KTRAP_FRAME, V86Es));
+ p2("TsV86Ds", OFFSET(KTRAP_FRAME, V86Ds));
+ p2("TsV86Fs", OFFSET(KTRAP_FRAME, V86Fs));
+ p2("TsV86Gs", OFFSET(KTRAP_FRAME, V86Gs));
+ p2("KTRAP_FRAME_LENGTH", KTRAP_FRAME_LENGTH);
+ p2("KTRAP_FRAME_ALIGN", KTRAP_FRAME_ALIGN);
+ p2("FRAME_EDITED", FRAME_EDITED);
+ p2("EFLAGS_ALIGN_CHECK", EFLAGS_ALIGN_CHECK);
+ p2("EFLAGS_V86_MASK", EFLAGS_V86_MASK);
+ p2("EFLAGS_INTERRUPT_MASK", EFLAGS_INTERRUPT_MASK);
+ p2("EFLAGS_VIF", EFLAGS_VIF);
+ p2("EFLAGS_VIP", EFLAGS_VIP);
+ p2("EFLAGS_USER_SANITIZE", EFLAGS_USER_SANITIZE);
+ p1("\n");
+
+
+ p1(";\n");
+ p1("; Context Frame Offset and Flag Definitions\n");
+ p1(";\n");
+ p1("\n");
+ p2("CONTEXT_FULL", CONTEXT_FULL);
+ p2("CONTEXT_DEBUG_REGISTERS", CONTEXT_DEBUG_REGISTERS);
+ p2("CONTEXT_CONTROL", CONTEXT_CONTROL);
+ p2("CONTEXT_FLOATING_POINT", CONTEXT_FLOATING_POINT);
+ p2("CONTEXT_INTEGER", CONTEXT_INTEGER);
+ p2("CONTEXT_SEGMENTS", CONTEXT_SEGMENTS);
+ p1("\n");
+
+ //
+ // Print context frame offsets relative to sp.
+ //
+
+ p2("CsContextFlags", OFFSET(CONTEXT, ContextFlags));
+ p2("CsFloatSave", OFFSET(CONTEXT, FloatSave));
+ p2("CsSegGs", OFFSET(CONTEXT, SegGs));
+ p2("CsSegFs", OFFSET(CONTEXT, SegFs));
+ p2("CsSegEs", OFFSET(CONTEXT, SegEs));
+ p2("CsSegDs", OFFSET(CONTEXT, SegDs));
+ p2("CsEdi", OFFSET(CONTEXT, Edi));
+ p2("CsEsi", OFFSET(CONTEXT, Esi));
+ p2("CsEbp", OFFSET(CONTEXT, Ebp));
+ p2("CsEbx", OFFSET(CONTEXT, Ebx));
+ p2("CsEdx", OFFSET(CONTEXT, Edx));
+ p2("CsEcx", OFFSET(CONTEXT, Ecx));
+ p2("CsEax", OFFSET(CONTEXT, Eax));
+ p2("CsEip", OFFSET(CONTEXT, Eip));
+ p2("CsSegCs", OFFSET(CONTEXT, SegCs));
+ p2("CsEflags", OFFSET(CONTEXT, EFlags));
+ p2("CsEsp", OFFSET(CONTEXT, Esp));
+ p2("CsSegSs", OFFSET(CONTEXT, SegSs));
+ p2("CsDr0", OFFSET(CONTEXT, Dr0));
+ p2("CsDr1", OFFSET(CONTEXT, Dr1));
+ p2("CsDr2", OFFSET(CONTEXT, Dr2));
+ p2("CsDr3", OFFSET(CONTEXT, Dr3));
+ p2("CsDr6", OFFSET(CONTEXT, Dr6));
+ p2("CsDr7", OFFSET(CONTEXT, Dr7));
+ p2("ContextFrameLength", (sizeof(CONTEXT) + 15) & (~15));
+ p2("DR6_LEGAL", DR6_LEGAL);
+ p2("DR7_LEGAL", DR7_LEGAL);
+ p2("DR7_ACTIVE", DR7_ACTIVE);
+
+ //
+ // Print Registration Record Offsets relative to base
+ //
+
+ p2("ErrHandler",
+ OFFSET(EXCEPTION_REGISTRATION_RECORD, Handler));
+ p2("ErrNext",
+ OFFSET(EXCEPTION_REGISTRATION_RECORD, Next));
+ p1("\n");
+
+ //
+ // Print floating point field offsets relative to Context.FloatSave
+ //
+
+ p1(";\n");
+ p1("; Floating save area field offset definitions\n");
+ p1(";\n");
+ p2("FpControlWord ", OFFSET(FLOATING_SAVE_AREA, ControlWord));
+ p2("FpStatusWord ", OFFSET(FLOATING_SAVE_AREA, StatusWord));
+ p2("FpTagWord ", OFFSET(FLOATING_SAVE_AREA, TagWord));
+ p2("FpErrorOffset ", OFFSET(FLOATING_SAVE_AREA, ErrorOffset));
+ p2("FpErrorSelector", OFFSET(FLOATING_SAVE_AREA, ErrorSelector));
+ p2("FpDataOffset ", OFFSET(FLOATING_SAVE_AREA, DataOffset));
+ p2("FpDataSelector ", OFFSET(FLOATING_SAVE_AREA, DataSelector));
+ p2("FpRegisterArea ", OFFSET(FLOATING_SAVE_AREA, RegisterArea));
+ p2("FpCr0NpxState ", OFFSET(FLOATING_SAVE_AREA, Cr0NpxState));
+
+ p1("\n");
+ p2("NPX_FRAME_LENGTH", sizeof(FLOATING_SAVE_AREA));
+
+ //
+ // Processor State Frame offsets relative to base
+ //
+
+ p1(";\n");
+ p1("; Processor State Frame Offset Definitions\n");
+ p1(";\n");
+ p1("\n");
+ p2("PsContextFrame",
+ OFFSET(KPROCESSOR_STATE, ContextFrame));
+ p2("PsSpecialRegisters",
+ OFFSET(KPROCESSOR_STATE, SpecialRegisters));
+ p2("SrCr0", OFFSET(KSPECIAL_REGISTERS, Cr0));
+ p2("SrCr2", OFFSET(KSPECIAL_REGISTERS, Cr2));
+ p2("SrCr3", OFFSET(KSPECIAL_REGISTERS, Cr3));
+ p2("SrCr4", OFFSET(KSPECIAL_REGISTERS, Cr4));
+ p2("SrKernelDr0", OFFSET(KSPECIAL_REGISTERS, KernelDr0));
+ p2("SrKernelDr1", OFFSET(KSPECIAL_REGISTERS, KernelDr1));
+ p2("SrKernelDr2", OFFSET(KSPECIAL_REGISTERS, KernelDr2));
+ p2("SrKernelDr3", OFFSET(KSPECIAL_REGISTERS, KernelDr3));
+ p2("SrKernelDr6", OFFSET(KSPECIAL_REGISTERS, KernelDr6));
+ p2("SrKernelDr7", OFFSET(KSPECIAL_REGISTERS, KernelDr7));
+ p2("SrGdtr", OFFSET(KSPECIAL_REGISTERS, Gdtr.Limit));
+
+ p2("SrIdtr", OFFSET(KSPECIAL_REGISTERS, Idtr.Limit));
+ p2("SrTr", OFFSET(KSPECIAL_REGISTERS, Tr));
+ p2("SrLdtr", OFFSET(KSPECIAL_REGISTERS, Ldtr));
+ p2("ProcessorStateLength", ((sizeof(KPROCESSOR_STATE) + 15) & ~15));
+ DisableInc (HAL386);
+
+ //
+ // E Process fields relative to base
+ //
+
+ p1(";\n");
+ p1("; EPROCESS\n");
+ p1(";\n");
+ p1("\n");
+ p2("EpDebugPort",
+ OFFSET(EPROCESS, DebugPort));
+
+ //
+ // E Resource fields relative to base
+ //
+
+ p1("\n");
+ p1(";\n");
+ p1("; NTDDK Resource\n");
+ p1(";\n");
+ p1("\n");
+ p2("RsOwnerThreads", OFFSET(NTDDK_ERESOURCE, OwnerThreads));
+ p2("RsOwnerCounts", OFFSET(NTDDK_ERESOURCE, OwnerCounts));
+ p2("RsTableSize", OFFSET(NTDDK_ERESOURCE, TableSize));
+ p2("RsActiveCount", OFFSET(NTDDK_ERESOURCE, ActiveCount));
+ p2("RsFlag", OFFSET(NTDDK_ERESOURCE, Flag));
+ p2("RsInitialOwnerThreads", OFFSET(NTDDK_ERESOURCE, InitialOwnerThreads));
+ p2("RsOwnedExclusive", ResourceOwnedExclusive);
+
+ //
+ // Define machine type (temporarily)
+ //
+
+ EnableInc (HAL386);
+ p1(";\n");
+ p1("; Machine type definitions (Temporarily)\n");
+ p1(";\n");
+ p1("\n");
+ p2("MACHINE_TYPE_ISA", MACHINE_TYPE_ISA);
+ p2("MACHINE_TYPE_EISA", MACHINE_TYPE_EISA);
+ p2("MACHINE_TYPE_MCA", MACHINE_TYPE_MCA);
+
+ DisableInc (HAL386);
+ p1(";\n");
+ p1("; KeFeatureBits defines\n");
+ p1(";\n");
+ p1("\n");
+ p2("KF_V86_VIS", KF_V86_VIS);
+ p2("KF_RDTSC", KF_RDTSC);
+ p2("KF_CR4", KF_CR4);
+ p2("KF_GLOBAL_PAGE", KF_GLOBAL_PAGE);
+ p2("KF_LARGE_PAGE", KF_LARGE_PAGE);
+ p2("KF_CMPXCHG8B", KF_CMPXCHG8B);
+
+ EnableInc (HAL386);
+ p1(";\n");
+ p1("; LoaderParameterBlock offsets relative to base\n");
+ p1(";\n");
+ p1("\n");
+ p2("LpbLoadOrderListHead",OFFSET(LOADER_PARAMETER_BLOCK,LoadOrderListHead));
+ p2("LpbMemoryDescriptorListHead",OFFSET(LOADER_PARAMETER_BLOCK,MemoryDescriptorListHead));
+ p2("LpbKernelStack",OFFSET(LOADER_PARAMETER_BLOCK,KernelStack));
+ p2("LpbPrcb",OFFSET(LOADER_PARAMETER_BLOCK,Prcb));
+ p2("LpbProcess",OFFSET(LOADER_PARAMETER_BLOCK,Process));
+ p2("LpbThread",OFFSET(LOADER_PARAMETER_BLOCK,Thread));
+ p2("LpbI386",OFFSET(LOADER_PARAMETER_BLOCK,u.I386));
+ p2("LpbRegistryLength",OFFSET(LOADER_PARAMETER_BLOCK,RegistryLength));
+ p2("LpbRegistryBase",OFFSET(LOADER_PARAMETER_BLOCK,RegistryBase));
+ p2("LpbConfigurationRoot",OFFSET(LOADER_PARAMETER_BLOCK,ConfigurationRoot));
+ p2("LpbArcBootDeviceName",OFFSET(LOADER_PARAMETER_BLOCK,ArcBootDeviceName));
+ p2("LpbArcHalDeviceName",OFFSET(LOADER_PARAMETER_BLOCK,ArcHalDeviceName));
+ DisableInc (HAL386);
+
+ p2("PAGE_SIZE",PAGE_SIZE);
+
+ //
+ // Define the VDM instruction emulation count indexes
+ //
+
+ p1("\n");
+ p1(";\n");
+ p1("; VDM equates.\n");
+ p1(";\n");
+ p1("\n");
+ p2("VDM_INDEX_Invalid", VDM_INDEX_Invalid);
+ p2("VDM_INDEX_0F", VDM_INDEX_0F);
+ p2("VDM_INDEX_ESPrefix", VDM_INDEX_ESPrefix);
+ p2("VDM_INDEX_CSPrefix", VDM_INDEX_CSPrefix);
+ p2("VDM_INDEX_SSPrefix", VDM_INDEX_SSPrefix);
+ p2("VDM_INDEX_DSPrefix", VDM_INDEX_DSPrefix);
+ p2("VDM_INDEX_FSPrefix", VDM_INDEX_FSPrefix);
+ p2("VDM_INDEX_GSPrefix", VDM_INDEX_GSPrefix);
+ p2("VDM_INDEX_OPER32Prefix", VDM_INDEX_OPER32Prefix);
+ p2("VDM_INDEX_ADDR32Prefix", VDM_INDEX_ADDR32Prefix);
+ p2("VDM_INDEX_INSB", VDM_INDEX_INSB);
+ p2("VDM_INDEX_INSW", VDM_INDEX_INSW);
+ p2("VDM_INDEX_OUTSB", VDM_INDEX_OUTSB);
+ p2("VDM_INDEX_OUTSW", VDM_INDEX_OUTSW);
+ p2("VDM_INDEX_PUSHF", VDM_INDEX_PUSHF);
+ p2("VDM_INDEX_POPF", VDM_INDEX_POPF);
+ p2("VDM_INDEX_INTnn", VDM_INDEX_INTnn);
+ p2("VDM_INDEX_INTO", VDM_INDEX_INTO);
+ p2("VDM_INDEX_IRET", VDM_INDEX_IRET);
+ p2("VDM_INDEX_NPX", VDM_INDEX_NPX);
+ p2("VDM_INDEX_INBimm", VDM_INDEX_INBimm);
+ p2("VDM_INDEX_INWimm", VDM_INDEX_INWimm);
+ p2("VDM_INDEX_OUTBimm", VDM_INDEX_OUTBimm);
+ p2("VDM_INDEX_OUTWimm", VDM_INDEX_OUTWimm);
+ p2("VDM_INDEX_INB", VDM_INDEX_INB);
+ p2("VDM_INDEX_INW", VDM_INDEX_INW);
+ p2("VDM_INDEX_OUTB", VDM_INDEX_OUTB);
+ p2("VDM_INDEX_OUTW", VDM_INDEX_OUTW);
+ p2("VDM_INDEX_LOCKPrefix", VDM_INDEX_LOCKPrefix);
+ p2("VDM_INDEX_REPNEPrefix", VDM_INDEX_REPNEPrefix);
+ p2("VDM_INDEX_REPPrefix", VDM_INDEX_REPPrefix);
+ p2("VDM_INDEX_CLI", VDM_INDEX_CLI);
+ p2("VDM_INDEX_STI", VDM_INDEX_STI);
+ p2("VDM_INDEX_HLT", VDM_INDEX_HLT);
+ p2("MAX_VDM_INDEX", MAX_VDM_INDEX);
+
+ //
+ // Vdm feature bits
+ //
+
+ p1("\n");
+ p1(";\n");
+ p1("; VDM feature bits.\n");
+ p1(";\n");
+ p1("\n");
+ p2("V86_VIRTUAL_INT_EXTENSIONS",V86_VIRTUAL_INT_EXTENSIONS);
+ p2("PM_VIRTUAL_INT_EXTENSIONS",PM_VIRTUAL_INT_EXTENSIONS);
+
+ //
+ // Selector type
+ //
+ p1("\n");
+ p1(";\n");
+ p1("; Selector types.\n");
+ p1(";\n");
+ p1("\n");
+ p2("SEL_TYPE_NP",SEL_TYPE_NP);
+
+ //
+ // Usermode callout frame
+ //
+ DisableInc (HAL386);
+ genCom("Usermode callout frame definitions");
+ p2("CuInStk", OFFSET(KCALLOUT_FRAME, InStk));
+ p2("CuTrFr", OFFSET(KCALLOUT_FRAME, TrFr));
+ p2("CuCbStk", OFFSET(KCALLOUT_FRAME, CbStk));
+ p2("CuEdi", OFFSET(KCALLOUT_FRAME, Edi));
+ p2("CuEsi", OFFSET(KCALLOUT_FRAME, Esi));
+ p2("CuEbx", OFFSET(KCALLOUT_FRAME, Ebx));
+ p2("CuEbp", OFFSET(KCALLOUT_FRAME, Ebp));
+ p2("CuRet", OFFSET(KCALLOUT_FRAME, Ret));
+ p2("CuOutBf", OFFSET(KCALLOUT_FRAME, OutBf));
+ p2("CuOutLn", OFFSET(KCALLOUT_FRAME, OutLn));
+ EnableInc (HAL386);
+
+ return 0;
+}
+
+
+VOID
+p1 (PUCHAR a)
+{
+ if (OutputEnabled & KS386) {
+ fprintf(OutKs386,a);
+ }
+
+ if (OutputEnabled & HAL386) {
+ if ( OutHal386 ) {
+ fprintf(OutHal386,a);
+ }
+ }
+}
+
+VOID
+p2 (PUCHAR a, LONG b)
+{
+ if (OutputEnabled & KS386) {
+ fprintf(OutKs386, "%s equ 0%lXH\n", a, b);
+ }
+
+ if (OutputEnabled & HAL386) {
+ if ( OutHal386 ) {
+ fprintf(OutHal386, "%s equ 0%lXH\n", a, b);
+ }
+ }
+}
+
+VOID
+p2a (PUCHAR b, LONG c)
+{
+ if (OutputEnabled & KS386) {
+ fprintf(OutKs386, b, c);
+ }
+
+ if (OutputEnabled & HAL386) {
+ if ( OutHal386 ) {
+ fprintf(OutHal386, b, c);
+ }
+ }
+}
diff --git a/private/ntos/ke/i386/i386init.c b/private/ntos/ke/i386/i386init.c
new file mode 100644
index 000000000..69bcce12b
--- /dev/null
+++ b/private/ntos/ke/i386/i386init.c
@@ -0,0 +1,223 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ i386init.c
+
+Abstract:
+
+ This module contains code to manipulate i386 hardware structures used
+ only by the kernel.
+
+Author:
+
+ Bryan Willman 22 Feb 90
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiInitializeMachineType (
+ VOID
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,KiInitializeGDT)
+#pragma alloc_text(INIT,KiInitializeGdtEntry)
+#pragma alloc_text(INIT,KiInitializeMachineType)
+#endif
+
+
+KIRQL KiProfileIrql = PROFILE_LEVEL;
+ULONG KeI386MachineType = 0;
+BOOLEAN KeI386NpxPresent;
+ULONG KeI386ForceNpxEmulation;
+ULONG KeI386CpuType;
+ULONG KeI386CpuStep;
+PVOID Ki387RoundModeTable; // R3 emulators RoundingMode vector table
+ULONG KiBootFeatureBits;
+
+#if DBG
+UCHAR MsgDpcTrashedEsp[] = "\n*** DPC routine %lx trashed ESP\n";
+UCHAR MsgDpcTimeout[] = "\n*** DPC routine %lx > 1sec (%lx interrupts, %lx isr time)\n";
+UCHAR MsgISRTimeout[] = "\n*** ISR at %lx took over .5 second\n";
+UCHAR MsgISROverflow[] = "\n*** ISR at %lx - %d interrupts per .5 second\n";
+
+ULONG KiDPCTimeout = 110;
+ULONG KiISRTimeout = 55;
+ULONG KiISROverflow = 5500;
+ULONG KiSpinlockTimeout = 55;
+#endif
+
+
+
+VOID
+KiInitializeGDT (
+ IN OUT PKGDTENTRY Gdt,
+ IN USHORT GdtLimit,
+ IN PKPCR Pcr,
+ IN USHORT PcrLimit,
+ IN PKTSS Tss,
+ IN USHORT TssLimit,
+ IN USHORT TebLimit
+ )
+
+/*++
+
+Routine Description:
+
+ This procedure initializes a GDT. It will set standard values
+ for all descriptors.
+
+ It will not set PCR->GDT.
+
+ It will set the PCR address in KGDT_PCR.
+
+ KGDT_R3_TEB will be set to a base of 0, with a limit of 1 page.
+
+
+Arguments:
+
+ Gdt - Supplies a pointer to an array of KGDTENTRYs.
+
+ GdtLimit - Supplies size (in bytes) of Gdt. Used to detect a
+ GDT which is too small (which will cause a BUGCHECK)
+
+ Pcr - FLAT address of Pcr for processor Gdt is for.
+
+ PcrLimit - Size Limit of PCR in bytes.
+
+ Tss - FLAT adderss of TSS for processor Gdt is for.
+
+ TssLimit - Size limit of TSS in bytes.
+
+ TebLimit - Size limit of Teb in bytes.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ if ((KGDT_NUMBER * 8) > GdtLimit)
+ KeBugCheck(MEMORY_MANAGEMENT);
+
+ KiInitializeGdtEntry(&Gdt[KGDT_NULL], 0, 0, 0, 0, GRAN_PAGE);
+
+ KiInitializeGdtEntry(
+ &Gdt[KGDT_R0_CODE], 0, (ULONG)-1, TYPE_CODE, DPL_SYSTEM, GRAN_PAGE);
+
+ KiInitializeGdtEntry(
+ &Gdt[KGDT_R0_DATA], 0, (ULONG)-1, TYPE_DATA, DPL_SYSTEM, GRAN_PAGE);
+
+ KiInitializeGdtEntry(&Gdt[KGDT_R3_CODE], 0,
+ (ULONG)-1, TYPE_CODE, DPL_USER, GRAN_PAGE);
+
+ KiInitializeGdtEntry(&Gdt[KGDT_R3_DATA], 0,
+ (ULONG)-1, TYPE_DATA, DPL_USER, GRAN_PAGE);
+
+ KiInitializeGdtEntry(
+ &Gdt[KGDT_TSS], (ULONG)Tss, TssLimit-1,
+ TYPE_TSS, DPL_SYSTEM, GRAN_BYTE);
+
+ KiInitializeGdtEntry(
+ &Gdt[KGDT_R0_PCR], (ULONG)Pcr, PcrLimit-1,
+ TYPE_DATA, DPL_SYSTEM, GRAN_BYTE);
+
+ KiInitializeGdtEntry(
+ &Gdt[KGDT_R3_TEB], 0, TebLimit-1, TYPE_DATA, DPL_USER, GRAN_BYTE);
+}
+
+VOID
+KiInitializeGdtEntry (
+ OUT PKGDTENTRY GdtEntry,
+ IN ULONG Base,
+ IN ULONG Limit,
+ IN USHORT Type,
+ IN USHORT Dpl,
+ IN USHORT Granularity
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a GDT entry. Base, Limit, Type (code,
+ data), and Dpl (0 or 3) are set according to parameters. All other
+ fields of the entry are set to match standard system values.
+
+Arguments:
+
+ GdtEntry - GDT descriptor to be filled in.
+
+ Base - Linear address of the first byte mapped by the selector.
+
+ Limit - Size of the selector in pages. Note that 0 is 1 page
+ while 0xffffff is 1 megapage = 4 gigabytes.
+
+ Type - Code or Data. All code selectors are marked readable,
+ all data selectors are marked writeable.
+
+ Dpl - User (3) or System (0)
+
+ Granularity - 0 for byte, 1 for page
+
+Return Value:
+
+ Pointer to the GDT entry.
+
+--*/
+
+{
+ GdtEntry->LimitLow = (USHORT)(Limit & 0xffff);
+ GdtEntry->BaseLow = (USHORT)(Base & 0xffff);
+ GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((Base & 0xff0000) >> 16);
+ GdtEntry->HighWord.Bits.Type = Type;
+ GdtEntry->HighWord.Bits.Dpl = Dpl;
+ GdtEntry->HighWord.Bits.Pres = 1;
+ GdtEntry->HighWord.Bits.LimitHi = (Limit & 0xf0000) >> 16;
+ GdtEntry->HighWord.Bits.Sys = 0;
+ GdtEntry->HighWord.Bits.Reserved_0 = 0;
+ GdtEntry->HighWord.Bits.Default_Big = 1;
+ GdtEntry->HighWord.Bits.Granularity = Granularity;
+ GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((Base & 0xff000000) >> 24);
+}
+
+VOID
+KiInitializeMachineType (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes machine type, i.e. MCA, ABIOS, ISA
+ or EISA.
+ N.B. This is a temporary routine. machine type:
+ Byte 0 - Machine Type, ISA, EISA or MCA
+ Byte 1 - CPU type, i386 or i486
+ Byte 2 - Cpu Step, A or B ... etc.
+ Highest bit indicates if NPX is present.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KeI386MachineType = KeLoaderBlock->u.I386.MachineType & 0x000ff;
+}
diff --git a/private/ntos/ke/i386/i386pcr.asm b/private/ntos/ke/i386/i386pcr.asm
new file mode 100644
index 000000000..fed059531
--- /dev/null
+++ b/private/ntos/ke/i386/i386pcr.asm
@@ -0,0 +1,200 @@
+ title "I386 PCR"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; i386pcr.asm
+;
+; Abstract:
+;
+; This module implements routines for accessing and initing the pcr.
+;
+; Author:
+;
+; Bryan Willman (bryanwi) 20 Mar 90
+;
+; Environment:
+;
+; Kernel mode, early init of first processor.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include callconv.inc ; calling convention macros
+ .list
+
+;
+; NOTE - This definition of PCR gives us 2 instructions to get to some
+; variables that need to be addressable in one instruction. Any
+; such variable (such as current thread) must be accessed via its
+; own access procedure (see below), NOT by KeGetPcr()->PbCurrentThread.
+; (This is only an issue on MP machines.)
+;
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+cPublicProc _KeGetPcr ,0
+
+ mov eax,PCR[PcSelfPcr]
+ stdRET _KeGetPcr
+
+stdENDP _KeGetPcr
+
+
+;++
+;
+; PKPRCB
+; KeGetCurrentPrcb()
+;
+; Return Value:
+;
+; Pointer to current PRCB.
+;
+;--
+cPublicProc _KeGetCurrentPrcb ,0
+
+ mov eax,PCR[PcPrcb]
+ stdRET _KeGetCurrentPrcb
+
+stdENDP _KeGetCurrentPrcb
+
+
+;++
+;
+; PKTHREAD
+; KeGetCurrentThread()
+;
+; Return Value:
+;
+; Pointer to current Thread object.
+;
+;--
+cPublicProc _KeGetCurrentThread ,0
+
+ mov eax,PCR[PcPrcbData+PbCurrentThread]
+ stdRET _KeGetCurrentThread
+
+stdENDP _KeGetCurrentThread
+
+
+;++
+;
+; KPROCESSOR_MODE
+; KeGetPreviousMode()
+;
+; Return Value:
+;
+; PreviousMode of current thread.
+;
+;--
+cPublicProc _KeGetPreviousMode
+
+ mov eax,PCR[PcPrcbData+PbCurrentThread] ; (eax) -> Thread
+ movzx eax,byte ptr [eax]+ThPreviousMode ; (eax) = PreviousMode
+ stdRET _KeGetPreviousMode
+
+stdENDP _KeGetPreviousMode
+
+
+;++
+;
+; BOOLEAN
+; KeIsExecutingDpc(
+; VOID
+; );
+;
+; Return Value:
+;
+; Value of flag which indicates whether we're executing in DPC context
+;
+;--
+
+cPublicProc _KeIsExecutingDpc ,0
+
+ mov eax,PCR[PcPrcbData.PbDpcRoutineActive]
+ stdRET _KeIsExecutingDpc
+
+stdENDP _KeIsExecutingDpc
+
+
+;++
+;
+; VOID
+; GetMachineBootPointers(
+; )
+;
+; Routine Description:
+;
+; This routine is called at system startup to extract the address of
+; the PCR and machine control values. It is useful only for the P0
+; case where the boot loader must already init the machine before it
+; turns on paging and calls us.
+;
+; Pcr address is extracted from the base of KGDT_R0_PCR.
+;
+; Gdt and Idt are extracted from the machine GDTR and IDTR.
+;
+; TSS is derived from the TSR and related descriptor.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+;
+; (edi) -> gdt
+; (esi) -> pcr
+; (edx) -> tss
+; (eax) -> idt
+;
+;--
+
+cPublicProc GetMachineBootPointers
+
+ push ebp
+ mov ebp,esp
+ sub esp,8
+
+ sgdt fword ptr [ebp-8]
+ mov edi,[ebp-6] ; (edi) = gdt address
+
+ mov cx,fs
+ and cx,(NOT RPL_MASK)
+ movzx ecx,cx
+ add ecx,edi ; (ecx) -> pcr descriptor
+
+ mov dh,[ecx+KgdtBaseHi]
+ mov dl,[ecx+KgdtBaseMid]
+ shl edx,16
+ mov dx,[ecx+KgdtBaseLow] ; (edx) -> pcr
+ mov esi,edx ; (esi) -> pcr
+
+ str cx
+ movzx ecx,cx
+ add ecx,edi ; (ecx) -> TSS descriptor
+
+ mov dh,[ecx+KgdtBaseHi]
+ mov dl,[ecx+KgdtBaseMid]
+ shl edx,16
+ mov dx,[ecx+KgdtBaseLow] ; (edx) -> TSS
+
+ sidt fword ptr [ebp-8]
+ mov eax,[ebp-6] ; (eax) -> Idt
+
+ mov esp,ebp
+ pop ebp
+ stdRET GetMachineBootPointers
+
+stdENDP GetMachineBootPointers
+
+_TEXT$00 ENDS
+ end
+
diff --git a/private/ntos/ke/i386/instemul.asm b/private/ntos/ke/i386/instemul.asm
new file mode 100644
index 000000000..2c7da00c4
--- /dev/null
+++ b/private/ntos/ke/i386/instemul.asm
@@ -0,0 +1,2873 @@
+ title "Vdm Instuction Emulation"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; instemul.asm
+;
+; Abstract:
+;
+; This module contains the routines for emulating instructions and
+; faults to a VDM.
+;
+; Author:
+;
+; Dave Hastings (daveh) 29-March-1991
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Notes:
+;
+;
+;sudeepb 09-Dec-1992 Very Sonn this file will be deleted and protected
+; mode instruction emulation will be merged in
+; emv86.asm. Particularly following routines will
+; simply become OpcodeInvalid.
+; OpcodeIret
+; OpcodePushf
+; OpcodePopf
+; OpcodeHlt
+; Other routines such as
+; OpcodeCli
+; OpcodeSti
+; OpcodeIN/OUT/SB/Immb etc
+; will map exactly like emv86.asm
+; OpcodeInt will be the main differeing routine.
+;
+; OpcodeDispatch Table will be deleted.
+;
+; So before making any major changes in this file please see
+; Sudeepb or Daveh.
+;
+;neilsa 19-Oct-1993 Size and performance enhancements
+;jonle 15-Nov-1993 - The Debug messages for each opcode may no longer work
+; correctly, because interrupts may not have been enabled
+;
+;
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Revision History:
+;
+;--
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include i386\mi.inc
+include callconv.inc
+include ..\..\vdm\i386\vdm.inc
+include ..\..\vdm\i386\vdmtb.inc
+ .list
+
+ extrn VdmOpcode0f:proc
+ extrn OpcodeNPXV86:proc
+ extrn VdmDispatchIntAck:proc ;; only OpcodeSti uses this
+ extrn CommonDispatchException:proc ;; trap.asm
+ extrn _DbgPrint:proc
+ extrn _KeI386VdmIoplAllowed:dword
+ extrn _KeI386VirtualIntExtensions:dword
+ EXTRNP _KeBugCheck,1
+ EXTRNP _Ki386GetSelectorParameters,4
+ EXTRNP _Ki386VdmDispatchIo,5
+ EXTRNP _Ki386VdmDispatchStringIo,8
+ EXTRNP _KiDispatchException,5
+ EXTRNP _VdmPrinterStatus,3
+ EXTRNP KfLowerIrql,1,IMPORT, FASTCALL
+ EXTRNP _VdmPrinterWriteData, 3
+
+; JAPAN - SUPPORT Intel CPU/Non PC/AT machine
+ extrn _VdmFixedStateLinear:dword
+
+ page ,132
+
+ifdef VDMDBG
+%out Debugging version
+endif
+
+;
+; Force assume into place
+;
+
+_PAGE SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+_PAGE ENDS
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+_TEXT$00 ENDS
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+
+
+;
+; Instruction emulation emulates the following instructions.
+; The emulation affects the noted user mode registers.
+;
+; In protected mode, the following instructions are emulated in the kernel
+;
+; Registers (E)Flags (E)SP SS CS
+; INTnn X X X X
+; INTO X X X X
+; CLI X
+; STI X
+;
+; The following instructions are always emulated by reflection to the
+; Usermode VDM monitor
+;
+; INSB
+; INSW
+; OUTSB
+; OUTSW
+; INBimm
+; INWimm
+; OUTBimm
+; OUTWimm
+; INB
+; INW
+; OUTB
+; OUTW
+;
+; WARNING What do we do about 32 bit io instructions??
+
+
+;
+; OpcodeIndex - packed 1st level table to index OpcodeDispatch table
+;
+ public OpcodeIndex
+diBEGIN OpcodeIndex,VDM_INDEX_Invalid
+ dtI 0fh, VDM_INDEX_0F
+ dtI 26h, VDM_INDEX_ESPrefix
+ dtI 2eh, VDM_INDEX_CSPrefix
+ dtI 36h, VDM_INDEX_SSPrefix
+ dtI 3eh, VDM_INDEX_DSPrefix
+ dtI 64h, VDM_INDEX_FSPrefix
+ dtI 65h, VDM_INDEX_GSPrefix
+ dtI 66h, VDM_INDEX_OPER32Prefix
+ dtI 67h, VDM_INDEX_ADDR32Prefix
+ dtI 6ch, VDM_INDEX_INSB
+ dtI 6dh, VDM_INDEX_INSW
+ dtI 6eh, VDM_INDEX_OUTSB
+ dtI 6fh, VDM_INDEX_OUTSW
+ dtI 9bh, VDM_INDEX_NPX
+ dtI 9ch, VDM_INDEX_PUSHF
+ dtI 9dh, VDM_INDEX_POPF
+ dtI 0cdh, VDM_INDEX_INTnn
+ dtI 0ceh, VDM_INDEX_INTO
+ dtI 0cfh, VDM_INDEX_IRET
+ dtI 0d8h, VDM_INDEX_NPX
+ dtI 0d9h, VDM_INDEX_NPX
+ dtI 0dah, VDM_INDEX_NPX
+ dtI 0dbh, VDM_INDEX_NPX
+ dtI 0dch, VDM_INDEX_NPX
+ dtI 0ddh, VDM_INDEX_NPX
+ dtI 0deh, VDM_INDEX_NPX
+ dtI 0dfh, VDM_INDEX_NPX
+ dtI 0e4h, VDM_INDEX_INBimm
+ dtI 0e5h, VDM_INDEX_INWimm
+ dtI 0e6h, VDM_INDEX_OUTBimm
+ dtI 0e7h, VDM_INDEX_OUTWimm
+ dtI 0ech, VDM_INDEX_INB
+ dtI 0edh, VDM_INDEX_INW
+ dtI 0eeh, VDM_INDEX_OUTB
+ dtI 0efh, VDM_INDEX_OUTW
+ dtI 0f0h, VDM_INDEX_LOCKPrefix
+ dtI 0f2h, VDM_INDEX_REPNEPrefix
+ dtI 0f3h, VDM_INDEX_REPPrefix
+ dtI 0f4h, VDM_INDEX_HLT
+ dtI 0fah, VDM_INDEX_CLI
+ dtI 0fbh, VDM_INDEX_STI
+diEND NUM_OPCODE
+
+;
+; OpcodeDispatch - table of routines used to emulate instructions
+;
+
+ public OpcodeDispatch
+dtBEGIN OpcodeDispatch,OpcodeInvalid
+ dtS VDM_INDEX_0F , Opcode0F
+ dtS VDM_INDEX_ESPrefix , OpcodeESPrefix
+ dtS VDM_INDEX_CSPrefix , OpcodeCSPrefix
+ dtS VDM_INDEX_SSPrefix , OpcodeSSPrefix
+ dtS VDM_INDEX_DSPrefix , OpcodeDSPrefix
+ dtS VDM_INDEX_FSPrefix , OpcodeFSPrefix
+ dtS VDM_INDEX_GSPrefix , OpcodeGSPrefix
+ dtS VDM_INDEX_OPER32Prefix, OpcodeOPER32Prefix
+ dtS VDM_INDEX_ADDR32Prefix, OpcodeADDR32Prefix
+ dtS VDM_INDEX_INSB , OpcodeINSB
+ dtS VDM_INDEX_INSW , OpcodeINSW
+ dtS VDM_INDEX_OUTSB , OpcodeOUTSB
+ dtS VDM_INDEX_OUTSW , OpcodeOUTSW
+ dtS VDM_INDEX_INTnn , OpcodeINTnn
+ dtS VDM_INDEX_INTO , OpcodeINTO
+ dtS VDM_INDEX_INBimm , OpcodeINBimm
+ dtS VDM_INDEX_INWimm , OpcodeINWimm
+ dtS VDM_INDEX_OUTBimm , OpcodeOUTBimm
+ dtS VDM_INDEX_OUTWimm , OpcodeOUTWimm
+ dtS VDM_INDEX_INB , OpcodeINB
+ dtS VDM_INDEX_INW , OpcodeINW
+ dtS VDM_INDEX_OUTB , OpcodeOUTB
+ dtS VDM_INDEX_OUTW , OpcodeOUTW
+ dtS VDM_INDEX_LOCKPrefix , OpcodeLOCKPrefix
+ dtS VDM_INDEX_REPNEPrefix , OpcodeREPNEPrefix
+ dtS VDM_INDEX_REPPrefix , OpcodeREPPrefix
+ dtS VDM_INDEX_CLI , OpcodeCLI
+ dtS VDM_INDEX_STI , OpcodeSTI
+dtEND MAX_VDM_INDEX
+
+ public _ExVdmOpcodeDispatchCounts,_ExVdmSegmentNotPresent
+_ExVdmOpcodeDispatchCounts dd MAX_VDM_INDEX dup(0)
+_ExVdmSegmentNotPresent dd 0
+ifdef VDMDBG
+NUM_TRACE_ENTRIES equ 64
+TRACE_ENTRY_SIZE equ 16
+OpcodeTrace dd NUM_TRACE_ENTRIES*TRACE_ENTRY_SIZE dup (0)
+TracePointer dd OpcodeTrace
+endif
+_DATA ENDS
+
+_PAGE SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+ page ,132
+ subttl "Overide Prefix Macro"
+;++
+;
+; Routine Description:
+;
+; This macro generates the code for handling override prefixes
+; The routine name generated is OpcodeXXXXPrefix, where XXXX is
+; the name used in the macro invocation. The code will set the
+; PREFIX_XXXX bit in the Prefix flags.
+;
+; Arguments
+; name = name of prefix
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns
+; user mode Eip advanced
+; eax advanced
+; edx contains next byte of opcode
+;
+; NOTE: This routine exits by dispatching through the table again.
+;--
+opPrefix macro name
+ public Opcode&name&Prefix
+Opcode&name&Prefix proc
+
+ or [esi].RiPrefixFlags,PREFIX_&name
+
+
+ifdef VDMDBG
+_DATA segment
+Msg&name&Prefix db 'NTVDM: Encountered override prefix &name& %lx at '
+ db 'address %lx', 0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsSegCs
+ push offset FLAT:Msg&name&Prefix
+ call _DbgPrint
+ add esp,12
+
+endif
+
+ jmp OpcodeGenericPrefix ; dispatch to next handler
+
+Opcode&name&Prefix endp
+endm
+
+irp prefix, <ES, CS, SS, DS, FS, GS, OPER32, ADDR32, LOCK, REPNE, REP>
+
+ opPrefix prefix
+
+endm
+
+ page ,132
+ subttl "Instruction Emulation Dispatcher"
+;++
+;
+; Routine Description:
+;
+; This routine dispatches to the opcode specific emulation routine,
+; based on the first byte of the opcode. Two byte opcodes, and prefixes
+; result in another level of dispatching, from the handling routine.
+;
+; Arguments:
+;
+; ebp = pointer to trap frame
+;
+; Returns:
+;
+; Nothing
+;
+;
+
+cPublicProc _Ki386DispatchOpcode,0
+
+ifdef VDMDBG
+ push 1
+ call TraceOpcode
+endif
+
+ sub esp,REGINFOSIZE
+ mov esi, esp ; scratch area
+
+ CsToLinearPM [ebp].TsSegCs, doerr ; initialize reginfo
+
+ mov edi,[ebp].TsEip ; get fault instruction address
+ cmp edi,[esi].RiCsLimit ; check eip
+ ja doerr
+
+ add edi,[esi].RiCsBase
+ movzx ecx,byte ptr [edi] ; get faulting opcode
+
+ mov eax,ecx
+ and eax,0F8h ; check for npx instr
+ cmp eax,0D8h
+ je do30 ; dispatch
+
+ movzx eax, OpcodeIndex[ecx]
+ mov ebx,1 ; length count, flags
+
+ ; All handler routines will get the following on entry
+ ; ebp -> trap frame
+ ; ebx -> prefix flags, instruction length count
+ ; ecx -> byte at the faulting address
+ ; edx -> pointer to vdm state in DOS arena
+ ; interrupts enabled and Irql at APC level
+ ; edi -> address of faulting instruction
+ ; esi -> reginfo struct
+ ; All handler routines will return
+ ; EAX = 0 for failure
+ ; EAX = 1 for success
+if DEVL
+ inc _ExVdmOpcodeDispatchCounts[eax * type _ExVdmOpcodeDispatchCounts]
+endif
+
+ call OpcodeDispatch[eax * type OpcodeDispatch]
+do20:
+ add esp,REGINFOSIZE
+ stdRET _Ki386DispatchOpcode
+
+doerr: xor eax,eax
+ jmp do20
+
+ ;
+ ; If we get here, we have executed an NPX instruction in user mode
+ ; with the emulator installed. If the EM bit was not set in CR0, the
+ ; app really wanted to execute the instruction for detection purposes.
+ ; In this case, we need to clear the TS bit, and restart the instruction.
+ ; Otherwise we need to reflect the exception
+ ;
+do30:
+ call OpcodeNPXV86
+ jmp short do20
+
+stdENDP _Ki386DispatchOpcode
+
+
+ page ,132
+ subttl "Invalid Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an invalid opcode. It prints the invalid
+; opcode message, and causes a GP fault to be reflected to the
+; debuger
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeInvalid
+OpcodeInvalid proc
+ifdef VDMDBG
+_DATA segment
+MsgInvalidOpcode db 'NTVDM: An invalid opcode %lx was encountered at '
+ db 'address %x:%x',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push edx
+ push offset FLAT:MsgInvalidOpcode
+ call _DbgPrint ; display invalid opcode message
+ add esp,16
+endif
+ xor eax,eax ; ret fail
+ ret
+
+OpcodeInvalid endp
+
+
+ page ,132
+ subttl "Generic Prefix Handler"
+;++
+;
+; Routine Description:
+;
+; This routine handles the generic portion of all of the prefixes,
+; and dispatches the next byte of the opcode.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; user mode Eip advanced
+; edx contains next byte of opcode
+;
+
+ public OpcodeGenericPrefix
+OpcodeGenericPrefix proc
+
+ inc edi ; increment eip
+ inc ebx ; increment size
+ cmp bl, 128 ; set arbitrary inst size limit
+ ja ogperr ; in case of pointless prefixes
+
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja ogperr
+
+ mov cl,byte ptr [edi] ; get next opcode
+
+ movzx eax, OpcodeIndex[ecx]
+if DEVL
+ inc _ExVdmOpcodeDispatchCounts[eax * type _ExVdmOpcodeDispatchCounts]
+endif
+ jmp OpcodeDispatch[eax * type OpcodeDispatch]
+
+ogperr:
+ xor eax,eax ; opcode was NOT handled
+ ret
+
+OpcodeGenericPrefix endp
+
+
+ page ,132
+ subttl "0F Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates a 0Fh opcode. It currently prints a message,
+; and causes a GP fault to be reflected to the debugger.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public Opcode0F
+Opcode0F proc
+
+ifdef VDMDBG
+_DATA segment
+Msg0FOpcode db 'NTVDM: A 0F opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:Msg0FOpcode
+ call _DbgPrint ; display invalid opcode message
+ add esp,12
+endif
+
+ mov eax,[ebp].TsEip ; get fault instruction address
+ mov [esi].RiEip,eax
+ mov [esi].RiTrapFrame,ebp
+ mov [esi].RiPrefixFlags,ebx
+ mov eax,dword ptr [ebp].TsEFlags
+ mov [esi].RiEFlags,eax
+
+ call VdmOpcode0F ; enables interrupts
+ test eax,0FFFFh
+ jz o0f20
+
+ mov eax,[esi].RiEip
+ mov [ebp].TsEip,eax
+ mov eax,1
+o0f20:
+ ret
+
+Opcode0F endp
+
+ page ,132
+ subttl "Byte string in Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INSB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+; WARNING what to do about size override? ds override?
+
+ public OpcodeINSB
+OpcodeINSB proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINSBOpcode db 'NTVDM: An INSB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINSBOpcode
+ call _DbgPrint ; display INSB opcode message
+ add esp,12
+endif
+
+ push ebp ; Trap Frame
+ push ebx ; size of insb
+
+ movzx eax,word ptr [ebp].TsSegEs
+ shl eax,16
+ ; WARNING no support for 32bit edi
+ mov ax,word ptr [ebp].TsEdi ; don't support 32bit'ness
+ push eax ; address
+
+ xor eax, eax
+ mov ecx,1
+ test ebx,PREFIX_REP
+ jz @f
+
+ mov eax, 1
+ ; WARNING no support for 32bit ecx
+ movzx ecx,word ptr [ebp].TsEcx
+@@:
+
+ push ecx ; number of io ops
+ push TRUE ; read op
+ push eax ; REP prefix
+ push 1 ; byte op
+ movzx edx,word ptr [ebp].TsEdx
+ push edx ; port number
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+
+ ret
+
+OpcodeINSB endp
+
+ page ,132
+ subttl "Word String In Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INSW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINSW
+OpcodeINSW proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINSWOpcode db 'NTVDM: An INSW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINSWOpcode
+ call _DbgPrint ; display INSW opcode message
+ add esp,12
+endif
+
+ push ebp ; Trap frame
+ push ebx ; sizeof insw
+
+ movzx eax,word ptr [ebp].TsSegEs
+ shl eax,16
+ ; WARNING no support for 32bit edi
+ mov ax,word ptr [ebp].TsEdi
+ push eax ; address
+
+ xor eax, eax
+ mov ecx,1
+ test ebx,PREFIX_REP
+ jz @f
+
+ mov eax, 1
+ ; WARNING no support for 32bit ecx
+ movzx ecx,word ptr [ebp].TsEcx
+@@:
+ movzx edx,word ptr [ebp].TsEdx
+ push ecx ; number of io ops
+ push TRUE ; read op
+ push eax ; REP prefix
+ push 2 ; word size
+ push edx ; port number
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+
+ ret
+
+OpcodeINSW endp
+
+ page ,132
+ subttl "Byte String Out Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTSB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTSB
+OpcodeOUTSB proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTSBOpcode db 'NTVDM: An OUTSB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTSBOpcode
+ call _DbgPrint ; display OUTSB opcode message
+ add esp,12
+endif
+
+ push ebp ; Trap Frame
+ push ebx ; size of outsb
+
+ movzx eax,word ptr [ebp].TsSegDs
+ shl eax,16
+ ; WARNING don't support 32bit'ness, esi
+ mov ax,word ptr [ebp].TsEsi
+ push eax ; address
+
+ xor eax, eax
+ mov ecx,1
+ test ebx,PREFIX_REP
+ jz @f
+
+ mov eax, 1
+ ; WARNING don't support 32bit'ness ecx
+ movzx ecx,word ptr [ebp].TsEcx
+@@:
+ movzx edx,word ptr [ebp].TsEdx
+ push ecx ; number of io ops
+ push FALSE ; write op
+ push eax ; REP prefix
+ push 1 ; byte op
+ push edx ; port number
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+
+ ret
+
+OpcodeOUTSB endp
+
+ page ,132
+ subttl "Word String Out Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTSW opcode. Currently, it prints
+; a message, and ignores the instruction
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTSW
+OpcodeOUTSW proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTSWOpcode db 'NTVDM: An OUTSW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTSWOpcode
+ call _DbgPrint ; display OUTSW opcode message
+ add esp,12
+endif
+ push ebp ; Trap Frame
+ push ebx ; size of outsb
+
+ movzx eax,word ptr [ebp].TsSegDs
+ shl eax,16
+ ; WARNING don't support 32bit'ness esi
+ mov ax,word ptr [ebp].TsEsi
+ push eax ; address
+
+ xor eax, eax
+ mov ecx,1
+ test ebx,PREFIX_REP
+ jz @f
+
+ mov eax, 1
+ ; WARNING don't support 32bit'ness ecx
+ movzx ecx,word ptr [ebp].TsEcx
+@@:
+ movzx edx,word ptr [ebp].TsEdx
+
+ push ecx ; number of io ops
+ push FALSE ; write op
+ push eax ; REP prefix
+ push 2 ; byte op
+ push edx ; port number
+ call _Ki386VdmDispatchStringIo@32 ; use retval
+
+ ret
+
+OpcodeOUTSW endp
+
+ page ,132
+ subttl "INTnn Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INTnn opcode. It retrieves the handler
+; from the IVT, pushes the current cs:ip and flags on the stack,
+; and dispatches to the handler.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; Current CS:IP on user stack
+; RiCs:RiEip -> handler from IVT
+;
+
+ public OpcodeINTnn
+OpcodeINTnn proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINTnnOpcode db 'NTVDM: An INTnn opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINTnnOpcode
+ call _DbgPrint ; display INTnn opcode message
+ add esp,12
+endif
+
+ mov eax,dword ptr [ebp].TsEFlags
+ call GetVirtualBits ; set interrupt flag
+ mov [esi].RiEFlags,eax
+ movzx eax,word ptr [ebp].TsHardwareSegSs
+ call SsToLinear
+ test al,0FFh
+ jz oinerr
+
+ inc edi ; point to int #
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja oinerr
+
+ movzx ecx,byte ptr [edi] ; get int #
+ inc eax ; inc past end of instruction
+ mov [esi].RiEip,eax ; save for pushint's benefit
+ call PushInt ; will return retcode in al
+ test al,0FFh
+ jz oinerr ; error!
+
+ mov eax,[esi].RiEsp
+ mov [ebp].TsHardwareEsp,eax
+ mov ax,word ptr [esi].RiSegCs
+ mov word ptr [ebp].TsSegCs,ax
+ mov eax,[esi].RiEFlags
+ mov [ebp].TsEFlags,eax
+ mov eax,[esi].RiEip
+ mov [ebp].TsEip,eax
+ mov eax,1
+ ret
+
+oinerr:
+ xor eax,eax
+ ret
+
+
+OpcodeINTnn endp
+
+ page ,132
+ subttl "INTO Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INTO opcode. Currently, it prints
+; a message, and reflects a GP fault to the debugger.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINTO
+OpcodeINTO proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINTOOpcode db 'NTVDM: An INTO opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINTOOpcode
+ call _DbgPrint ; display INTO opcode message
+ add esp,12
+endif
+ xor eax,eax
+ ret
+
+OpcodeINTO endp
+
+
+ page ,132
+ subttl "In Byte Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an in byte immediate opcode. Currently, it
+; prints a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINBimm
+OpcodeINBimm proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINBimmOpcode db 'NTVDM: An INBimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINBimmOpcode
+ call _DbgPrint ; display INBimm opcode message
+ add esp,12
+endif
+
+ inc ebx ; length count
+ inc edi
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja oibi20
+
+ movzx ecx,byte ptr [edi]
+
+; (eax) = inst. size
+; read op
+; I/O size = 1
+; (ecx) = port number
+
+ stdCall _Ki386VdmDispatchIo, <ecx, 1, TRUE, ebx, ebp>
+ ret
+oibi20:
+ xor eax, eax ; not handled
+ ret
+
+OpcodeINBimm endp
+
+ page ,132
+ subttl "Word In Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an in word immediate opcode. Currently, it
+; prints a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINWimm
+OpcodeINWimm proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINWimmOpcode db 'NTVDM: An INWimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINWimmOpcode
+ call _DbgPrint ; display INWimm opcode message
+ add esp,12
+endif
+
+ inc ebx ; length count
+ inc edi
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja oiwi20
+
+ movzx ecx,byte ptr [edi]
+
+; TRUE - read op
+; 2 - word op
+; ecx - port number
+ stdCall _Ki386VdmDispatchIo, <ecx, 2, TRUE, ebx, ebp>
+ ret
+oiwi20:
+ xor eax, eax ; not handled
+ ret
+
+OpcodeINWimm endp
+
+ page ,132
+ subttl "Out Byte Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an invalid opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTBimm
+OpcodeOUTBimm proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTBimmOpcode db 'NTVDM: An OUTBimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTBimmOpcode
+ call _DbgPrint ; display OUTBimm opcode message
+ add esp,12
+endif
+
+ inc ebx ; length count
+ inc edi
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja oobi20
+
+ movzx ecx,byte ptr [edi]
+
+; FALSE - write op
+; 1 - byte op
+; ecx - port #
+
+ stdCall _Ki386VdmDispatchIo, <ecx, 1, FALSE, ebx, ebp>
+ ret
+oobi20:
+ xor eax, eax ; not handled
+ ret
+
+OpcodeOUTBimm endp
+
+ page ,132
+ subttl "Out Word Immediate Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an out word immediate opcode. Currently,
+; it prints a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTWimm
+OpcodeOUTWimm proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTWimmOpcode db 'NTVDM: An OUTWimm opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTWimmOpcode
+ call _DbgPrint ; display OUTWimm opcode message
+ add esp,12
+endif
+
+ inc ebx ; length count
+ inc edi
+ mov eax,edi ; current linear address
+ sub eax,[esi].RiCsBase ; make address eip
+ cmp eax,[esi].RiCsLimit ; check eip
+ ja oowi20
+
+ movzx ecx,byte ptr [edi]
+
+; FALSE - write op
+; 2 - word op
+; ecx - port number
+ stdCall _Ki386VdmDispatchIo, <ecx, 2, FALSE, ebx, ebp>
+ ret
+
+oowi20:
+ xor eax, eax ; not handled
+ ret
+
+OpcodeOUTWimm endp
+
+ page ,132
+ subttl "INB Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINB
+OpcodeINB proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINBOpcode db 'NTVDM: An INB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINBOpcode
+ call _DbgPrint ; display INB opcode message
+ add esp,12
+endif
+
+ movzx eax,word ptr [ebp].TsEdx
+
+; TRUE - read op
+; 1 - byte op
+; eax - port number
+
+ cmp eax, 3bdh
+ jz oib_prt1
+ cmp eax, 379h
+ jz oib_prt1
+ cmp eax, 279h
+ jz oib_prt1
+
+oib_reflect:
+ stdCall _Ki386VdmDispatchIo, <eax, 1, TRUE, ebx, ebp>
+ ret
+oib20:
+ xor eax, eax ; not handled
+ ret
+
+oib_prt1:
+ ; call printer status routine with port number, size, trap frame
+ movzx ebx, bl ;clear prefix flags
+ push eax
+ stdCall _VdmPrinterStatus, <eax, ebx, ebp>
+ or al,al
+ pop eax
+ jz short oib_reflect
+ mov al, 1
+ ret
+
+OpcodeINB endp
+
+ page ,132
+ subttl "INW Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an INW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeINW
+OpcodeINW proc
+
+ifdef VDMDBG
+_DATA segment
+MsgINWOpcode db 'NTVDM: An INW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgINWOpcode
+ call _DbgPrint ; display INW opcode message
+ add esp,12
+endif
+
+ movzx eax,word ptr [ebp].TsEdx
+
+; TRUE - read operation
+; 2 - word op
+; eax - port number
+ stdCall _Ki386VdmDispatchIo, <eax, 2, TRUE, ebx, ebp>
+ ret
+
+OpcodeINW endp
+
+ page ,132
+ subttl "OUTB Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTB opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTB
+OpcodeOUTB proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTBOpcode db 'NTVDM: An OUTB opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTBOpcode
+ call _DbgPrint ; display OUTB opcode message
+ add esp,12
+endif
+
+ movzx eax,word ptr [ebp].TsEdx
+ cmp eax, 03BCh
+ je short oob_printerVDD
+ cmp eax, 0378h
+ je short oob_printerVDD
+ cmp eax, 0278h
+ jz short oob_printerVDD
+
+oob_reflect:
+; FALSE - write op
+; 1 - byte op
+; eax - port number
+ stdCall _Ki386VdmDispatchIo, <eax, 1, FALSE, ebx, ebp>
+ ret
+
+oob_printerVDD:
+ movzx ebx, bl ; instruction size
+ push eax ; save port address
+ stdCall _VdmPrinterWriteData, <eax, ebx, ebp>
+ or al,al ;
+ pop eax
+ jz short oob_reflect
+ mov al, 1
+ ret
+
+OpcodeOUTB endp
+
+ page ,132
+ subttl "OUTW Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an OUTW opcode. Currently, it prints
+; a message, and ignores the instruction.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeOUTW
+OpcodeOUTW proc
+
+ifdef VDMDBG
+_DATA segment
+MsgOUTWOpcode db 'NTVDM: An OUTW opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgOUTWOpcode
+ call _DbgPrint ; display OUTW opcode message
+ add esp,12
+endif
+
+ movzx eax,word ptr [ebp].TsEdx
+
+; FALSE - write op
+; 2 - word op
+; edi - port #
+ stdCall _Ki386VdmDispatchIo, <eax, 2, FALSE, ebx, ebp>
+ ret
+
+OpcodeOUTW endp
+
+ page ,132
+ subttl "CLI Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an CLI opcode. Currently, it prints
+; a message, and clears the virtual interrupt flag in the VdmTeb.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeCLI
+OpcodeCLI proc
+
+ifdef VDMDBG
+_DATA segment
+MsgCLIOpcode db 'NTVDM: An CLI opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgCLIOpcode
+ call _DbgPrint ; display CLI opcode message
+ add esp,12
+endif
+
+ mov eax,[ebp].TsEFlags
+ and eax,NOT EFLAGS_INTERRUPT_MASK
+ call SetVirtualBits
+ inc dword ptr [ebp].TsEip
+
+ mov eax,1
+ ret
+
+OpcodeCLI endp
+
+ page ,132
+ subttl "STI Opcode Handler"
+;++
+;
+; Routine Description:
+;
+; This routine emulates an STI opcode. Currently, it prints
+; a message, and sets the virtual interrupt flag in the VDM teb.
+;
+; Arguments:
+; EBP -> trap frame
+; EBX -> prefix flags, BL = instruction length count
+; ECX -> byte at the faulting address
+; EDX -> pointer to vdm state in DOS arena
+; ESI -> Reginfo struct
+; EDI -> address of faulting instruction
+;
+; Returns:
+;
+; nothing
+;
+
+ public OpcodeSTI
+OpcodeSTI proc
+
+ifdef VDMDBG
+_DATA segment
+MsgSTIOpcode db 'NTVDM: An STI opcode %lx was encountered at '
+ db 'address %lx',0ah, 0dh, 0
+_DATA ends
+ push [ebp].TsEip
+ push [ebp].TsSegCs
+ push offset FLAT:MsgSTIOpcode
+ call _DbgPrint ; display STI opcode message
+ add esp,12
+endif
+
+ mov eax,[ebp].TsEFlags
+ or eax,EFLAGS_INTERRUPT_MASK
+ call SetVirtualBits
+ inc dword ptr [ebp].TsEip
+ mov eax,_VdmFixedStateLinear
+ mov eax,dword ptr [eax]
+ test eax,VDM_INTERRUPT_PENDING
+ jz os10
+
+ call VdmDispatchIntAck
+os10:
+ mov eax,1
+ ret
+
+OpcodeSTI endp
+
+ page ,132
+ subttl "Check Vdm Flags"
+;++
+;
+; Routine Description:
+;
+; This routine checks the flags that are going to be used for the
+; dos or windows application.
+;
+; Arguments:
+;
+; ecx = EFlags to be set
+; esi = address of reg info
+;
+; Returns:
+;
+; ecx = fixed flags
+;
+ public CheckVdmFlags
+CheckVdmFlags proc
+
+ push eax
+ mov eax,[esi].RiEFlags
+ and eax,EFLAGS_V86_MASK
+ test _KeI386VdmIoplAllowed,1
+ jnz cvf30
+
+ test _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS OR PM_VIRTUAL_INT_EXTENSIONS
+ jnz cvf40
+
+cvf10: or ecx,EFLAGS_INTERRUPT_MASK
+cvf20: and ecx,NOT (EFLAGS_IOPL_MASK OR EFLAGS_NT_MASK OR EFLAGS_V86_MASK OR EFLAGS_VIF OR EFLAGS_VIP)
+ or ecx,eax ; restore original v86 bit
+ pop eax
+ ret
+
+cvf30: test eax,EFLAGS_V86_MASK
+ jz cvf10
+
+ jmp cvf20
+
+cvf40: test eax,EFLAGS_V86_MASK
+ jz cvf60
+
+ test _KeI386VirtualIntExtensions,V86_VIRTUAL_INT_EXTENSIONS
+ jz cvf10
+
+cvf50: push eax
+ mov eax,ecx
+ and eax,EFLAGS_INTERRUPT_MASK
+ shl eax,0ah
+ pop eax
+ jmp cvf10
+
+cvf60: test _KeI386VirtualIntExtensions,PM_VIRTUAL_INT_EXTENSIONS
+ jz cvf10
+
+ jmp cvf50
+CheckVdmFlags endp
+
+ page ,132
+ subttl "Get Virtual Interrupt Flag"
+;++
+;
+; Routine Description:
+;
+; This routine correctly gets the VDMs virtual interrupt flag and
+; puts it into an EFlags image to be put on the stack.
+;
+; Arguments:
+;
+; eax = EFlags value
+;
+; Returns:
+;
+; eax = EFlags value with correct setting for IF
+;
+; Uses:
+; ecx
+;
+ public GetVirtualBits
+GetVirtualBits proc
+
+ test _KeI386VdmIoplAllowed,1
+ jnz gvb60
+
+ test _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS OR PM_VIRTUAL_INT_EXTENSIONS
+ jnz gvb30
+
+gvb10: and eax,NOT EFLAGS_INTERRUPT_MASK
+ mov ecx,_VdmFixedStateLinear ; get pointer to Teb
+ mov ecx,dword ptr [ecx] ; get virtual int flag
+ and ecx,VDM_VIRTUAL_INTERRUPTS OR VDM_VIRTUAL_AC
+ or eax,ecx ; put virtual int flag into flags
+ or eax,EFLAGS_IOPL_MASK ; make it look like a 386
+ ret
+
+gvb30: test eax, EFLAGS_V86_MASK
+ jz gvb50
+
+ test _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS
+ jz gvb10
+
+gvb40: mov ecx,eax
+ and ecx,EFLAGS_VIF
+ shr ecx,0ah ; mov vif to if posn
+ and eax,NOT EFLAGS_INTERRUPT_MASK
+ or eax,ecx
+
+ mov ecx,_VdmFixedStateLinear
+ mov ecx,dword ptr [ecx]
+ and ecx,VDM_VIRTUAL_AC
+ and eax,NOT EFLAGS_ALIGN_CHECK
+ or eax,ecx
+ or eax,EFLAGS_IOPL_MASK
+ ret
+
+gvb50: test _KeI386VirtualIntExtensions, PM_VIRTUAL_INT_EXTENSIONS
+ jz gvb10
+
+ jmp gvb40
+
+gvb60: test eax,EFLAGS_V86_MASK
+ jz gvb10
+
+ mov ecx,_VdmFixedStateLinear
+ mov ecx,dword ptr [ecx]
+ and ecx,VDM_VIRTUAL_AC
+ and eax,NOT EFLAGS_ALIGN_CHECK
+ or eax,ecx
+ or eax,EFLAGS_IOPL_MASK
+ ret
+
+GetVirtualBits endp
+
+ page ,132
+ subttl "Set Virtual Interrupt Flag"
+;++
+;
+; Routine Description:
+;
+; This routine correctly sets the VDMs virtual interrupt flag.
+;
+; Arguments:
+;
+; eax = EFlags value
+;
+; Returns:
+;
+; Virtual interrupt flag set
+;
+ public SetVirtualBits
+SetVirtualBits proc
+Flags equ [ebp - 4]
+
+ push ebp
+ mov ebp,esp
+ sub esp,4
+
+ push edx
+ mov Flags,eax
+ mov edx,_VdmFixedStateLinear ; get pointer to Teb
+ and eax,EFLAGS_INTERRUPT_MASK ; isolate int flag
+ MPLOCK and [edx],NOT VDM_VIRTUAL_INTERRUPTS
+ MPLOCK or [edx],eax ; place virtual int flag value
+ test _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS OR PM_VIRTUAL_INT_EXTENSIONS
+ jnz svb40
+svb20:
+ ; WARNING 32 bit support!
+ test ebx,PREFIX_OPER32
+ jz svb30 ; 16 bit instr
+
+ mov eax,Flags
+ and eax,EFLAGS_ALIGN_CHECK
+ MPLOCK and dword ptr [edx],NOT EFLAGS_ALIGN_CHECK
+ MPLOCK or [edx],eax
+svb30: pop edx
+ mov esp,ebp
+ pop ebp
+ ret
+
+svb40: test Flags,dword ptr EFLAGS_V86_MASK
+ jz svb60
+
+ test _KeI386VirtualIntExtensions,V86_VIRTUAL_INT_EXTENSIONS
+ jz svb20
+
+svb50: shl eax,0ah
+ jmp svb20
+
+svb60: test _KeI386VirtualIntExtensions,PM_VIRTUAL_INT_EXTENSIONS
+ jz svb20
+
+ jmp svb50
+SetVirtualBits endp
+
+
+ page ,132
+ subttl "Reflect Exception to a Vdm"
+;++
+;
+; Routine Description:
+;
+; This routine reflects an exception to a VDM. It uses the information
+; in the trap frame to determine what exception to reflect, and updates
+; the trap frame with the new CS, EIP, SS, and SP values
+;
+; Arguments:
+;
+; ebp -> Trap frame
+; ss:esp + 4 = trap number
+;
+; Returns
+;
+; Nothing
+;
+; Notes:
+; Interrupts are enable upon entry, Irql is at APC level
+; This routine may not preserve all of the non-volatile registers if
+; a fault occurs.
+;
+cPublicProc _Ki386VdmReflectException,1
+
+RI equ [ebp - REGINFOSIZE]
+
+ push ebp
+ mov ebp,esp
+ sub esp,REGINFOSIZE
+
+ pushad
+
+ mov esi,_VdmFixedStateLinear
+
+ cmp word ptr [ebp + 8],0dh
+ jne vre00
+
+ test dword ptr [esi],VDM_BREAK_EXCEPTIONS
+ jnz vrexcd ; reflect the exception to 32
+vre00:
+ test dword ptr [esi],VDM_BREAK_DEBUGGER
+ jz vre04 ; reflect to vdm
+
+ cmp word ptr [ebp + 8],1
+ je vrexc1
+
+ cmp word ptr [ebp + 8],3
+ je vrexc3
+vre04:
+ mov esi,[ebp]
+ cmp word ptr [esi].TsSegCs, KGDT_R3_CODE OR RPL_MASK ; int sim after fault?
+ je vre28
+if DEVL
+ cmp word ptr [ebp + 8],11
+ jne xyzzy1
+ inc _ExVdmSegmentNotPresent
+xyzzy1:
+endif
+
+ mov RI.RiTrapFrame,esi
+ mov eax,[esi].TsHardwareSegSs
+ mov RI.RiSegSs,eax
+ mov eax,[esi].TsHardwareEsp
+ mov RI.RiEsp,eax
+ mov eax,[esi].TsEFlags
+ mov RI.RiEFlags,eax
+ mov eax,[esi].TsEip
+ mov RI.RiEip,eax
+ mov eax,[esi].TsSegCs
+ mov RI.RiSegCs,eax
+ lea esi,RI
+ call CsToLinear ; uses eax as selector
+ test al,0FFh
+ jz vrerr
+
+ mov eax,[esi].RiSegSs
+ call SsToLinear
+ test al,0FFh
+ jz vrerr
+
+ mov ecx,[ebp + 8]
+ call PushException
+ test al,0FFh
+ jz vrerr
+
+ mov esi,RI.RiTrapFrame
+ mov eax,RI.RiEsp
+ mov [esi].TsHardwareEsp,eax
+ mov eax,RI.RiSegSs
+ mov [esi].TsHardwareSegSs,eax
+ mov eax,RI.RiEFlags
+ mov [esi].TsEFlags,eax
+ mov eax,RI.RiSegCs
+ mov [esi].TsSegCs,eax
+ mov eax,RI.RiEip
+ mov [esi].TsEip,eax
+ cmp word ptr [ebp + 8],1
+ jne vre28
+ and dword ptr [esi].TsEFlags, NOT EFLAGS_TF_MASK
+
+vre28:
+ popad
+ mov eax,1 ; handled
+
+vre30:
+ mov esp,ebp
+ pop ebp
+ stdRET _Ki386VdmReflectException
+
+vrerr:
+ popad
+ xor eax,eax
+ jmp vre30
+
+vrexc1:
+ mov eax, [ebp]
+ and dword ptr [eax]+TsEflags, not EFLAGS_TF_MASK
+ mov eax, [ebp]+TsEip ; (eax)-> faulting instruction
+ stdCall _VdmDispatchException <[ebp],STATUS_SINGLE_STEP,eax,0,0,0,0>
+ jmp short vre28
+
+vrexc3:
+ mov eax,BREAKPOINT_BREAK
+ mov ebx, [ebp]
+ mov ebx, [ebx]+TsEip
+ dec ebx ; (eax)-> int3 instruction
+ stdCall _VdmDispatchException <[ebp],STATUS_BREAKPOINT,ebx,3,eax,ecx,edx>
+ jmp short vre28
+
+vrexcd:
+ mov eax, [ebp]
+ mov eax, [eax]+TsEip
+ stdCall _VdmDispatchException <[ebp],STATUS_ACCESS_VIOLATION,eax,2,0,-1,0>
+ jmp short vre28
+
+stdENDP _Ki386VdmReflectException
+
+
+ page ,132
+ subttl "Reflect Segment Not Present Exception to a Vdm"
+;++
+;
+; Routine Description:
+;
+; This routine reflects an TRAP B to a VDM. It uses the information
+; in the trap frame to determine what exception to reflect, and updates
+; the trap frame with the new CS, EIP, SS, and SP values
+;
+; Arguments:
+;
+; ebp -> Trap frame
+;
+; Returns
+;
+; 0 is returned if the reflection fails.
+;
+
+cPublicProc _Ki386VdmSegmentNotPresent,0
+
+ mov edi,PCR[PcTeb]
+ mov ecx,VDM_FAULT_HANDLER_SIZE * 0Bh
+ mov edi,[edi].TbVdm
+ xor ebx, ebx
+ lea esi,[edi].VtPmStackInfo ; (edi)->dpmi stack struct
+ lea edi,[edi+ecx].VtFaultHandlers ; (edi)->FaultHandler
+ cmp word ptr [esi].VpLockCount, 0 ; switching stacks?
+ jz short @f ; yes, we can handle it
+ ; no, let normal code check
+ ; for stack faults
+
+ pop eax ; (eax) = return addr
+ push 0bh
+ push eax
+ jmp _Ki386VdmReflectException
+
+@@:
+if DEVL
+ inc _ExVdmSegmentNotPresent
+endif
+ inc word ptr [esi].VpLockCount
+
+; save stuff just like SwitchToHandlerStack does
+ mov eax, [ebp].TsEip
+ mov [esi].VpSaveEip, eax
+ mov eax, [ebp].TsHardwareEsp
+ mov [esi].VpSaveEsp, eax
+ mov ax, [ebp].TsHardwareSegSs
+ mov [esi].VpSaveSsSelector, ax
+
+ mov bx,word ptr [esi].VpSsSelector
+ mov eax, PCR[PcGdt]
+ lea eax, [eax]+KGDT_LDT
+ mov ch, [eax+KgdtBaseHi]
+ mov cl, [eax+KgdtBaseMid]
+ shl ecx, 16
+ and ebx, 0fffffff8h
+ mov cx, [eax+KgdtBaseLow]
+ lea eax, [ecx+ebx]
+ mov bh, [eax+KgdtBaseHi]
+ mov bl, [eax+KgdtBaseMid]
+ shl ebx, 16
+ mov bx, [eax+KgdtBaseLow] ; (ebx) = Base of SS
+
+ mov eax, [ebp].TsEFlags
+ call GetVirtualBits ; (eax) = app's eflags
+ push esi
+ mov edx, 0fe0h ; dpmistack offset (per win31)
+ test word ptr [esi].VpFlags, 1 ; 32-bit frame?
+ jz short @f
+ sub edx, 8 * 4
+ add edx, ebx
+ mov esi, [ebp].TsHardwareEsp
+ mov ecx, [ebp].TsHardwareSegSs
+ mov [edx + 20], eax ; push flags
+ mov [edx + 24], esi ; put esp on new stack
+ mov [edx + 28], ecx ; put ss on new stack
+ mov ecx, [ebp].TsSegCs
+ mov eax, [ebp].TsEip
+ mov esi, [ebp].TsErrCode
+ mov [edx + 16], ecx ; push cs
+ mov [edx + 12], eax ; push ip
+ mov [edx], esi ; push error code
+ pop esi
+ mov ecx, [esi].VpDosxFaultIretD
+ mov eax, ecx
+ shr eax, 16
+ and ecx, 0ffffh
+ mov [edx + 4], eax ; push fault iret seg
+ mov [edx], ecx ; push fault iret offset
+ jmp short vsnp_update
+@@:
+ sub edx, 8 * 2
+ add edx, ebx
+ mov esi, [ebp].TsHardwareEsp
+ mov ecx, [ebp].TsHardwareSegSs
+ mov [edx + 10], ax ; push flags
+ mov [edx + 12], si ; put esp on new stack
+ mov [edx + 14], cx ; put ss on new stack
+ mov ecx, [ebp].TsSegCs
+ mov eax, [ebp].TsEip
+ mov esi, [ebp].TsErrCode
+ mov [edx + 8], cx ; push cs
+ mov [edx + 6], ax ; push ip
+ mov [edx + 4], si ; push error code
+ pop esi
+ mov ecx, [esi].VpDosxFaultIret
+ mov eax, ecx
+ shr eax, 16
+ mov [edx + 2], ax ; push fault iret seg
+ mov [edx], cx ; push fault iret offset
+
+vsnp_update:
+ mov eax,[edi].VfEip
+ sub edx, ebx
+ mov cx, word ptr [edi].VfCsSelector
+ mov bx, word ptr [esi].VpSsSelector
+ test dword ptr [edi].VfFlags, VDM_INT_INT_GATE
+ jz short @f
+
+ mov esi,_VdmFixedStateLinear ; get pointer to Teb
+ MPLOCK and [esi],NOT VDM_VIRTUAL_INTERRUPTS
+ and dword ptr [ebp].TsEflags, 0FFF7FFFFH ; clear VIF
+@@:
+ mov [ebp].TsSegCs, cx
+ mov [ebp].TsEip, eax
+ mov [ebp].TsHardwareEsp,edx
+ mov [ebp].TsHardwareSegSs,bx
+
+ mov eax, 1
+ stdRET _Ki386VdmSegmentNotPresent
+
+stdENDP _Ki386VdmSegmentNotPresent
+
+ page ,132
+ subttl "Dispatch UserMode Exception to a Vdm"
+;++
+;
+; Routine Description:
+;
+; Dispatches exception for vdm in from the kernel, by invoking
+; CommonDispatchException.
+;
+; Arguments: See CommonDispatchException for parameter description
+;
+; VOID
+; VdmDispatchException(
+; PKTRAP_FRAME TrapFrame,
+; NTSTATUS ExcepCode,
+; PVOID ExcepAddr,
+; ULONG NumParms,
+; ULONG Parm1,
+; ULONG Parm2,
+; ULONG Parm3
+; )
+;
+; Returns
+;
+; Nothing
+;
+; Notes:
+;
+; This routine may not preserve all of the non-volatile registers if
+; a fault occurs.
+;
+cPublicProc _VdmDispatchException,7
+
+TrapFrame equ [ebp+8]
+ExcepCode equ [ebp+12]
+ExcepAddr equ [ebp+16]
+NumParms equ [ebp+20]
+Parm1 equ [ebp+24]
+Parm2 equ [ebp+28]
+Parm3 equ [ebp+32]
+
+ push ebp
+ mov ebp,esp
+ pushad
+
+ xor ecx, ecx ; lower irql to 0
+ fstCall KfLowerIrql ; allow apc's and debuggers in!
+
+ mov eax, ExcepCode
+ mov ebx, ExcepAddr
+ mov ecx, NumParms
+ mov edx, Parm1
+ mov esi, Parm2
+ mov edi, Parm3
+ mov ebp, TrapFrame
+ call CommonDispatchException
+
+ popad
+ stdRET _VdmDispatchException
+
+stdENDP _VdmDispatchException
+
+
+
+
+ page ,132
+ subttl "Push Interrupt frame on user stack"
+;++
+;
+; Routine Description:
+;
+; This routine pushes an interrupt frame on the user stack
+;
+; Arguments:
+;
+; ecx = interrupt #
+; esi = address of reg info
+; Returns:
+;
+; interrupt frame pushed on stack
+; reg info updated
+;
+ public PushInt
+PushInt proc
+
+ push ebx
+ push edi
+
+pi100:
+;
+; Handle dispatching interrupts directly to the handler, rather than
+; to the dos extender
+;
+ ;
+ ; Get a the information on the interrupt handler
+ ;
+ .errnz (VDM_INTERRUPT_HANDLER_SIZE - 8)
+ mov eax,PCR[PcTeb]
+ mov eax,[eax].TbVdm
+ lea eax,[eax].VtInterruptHandlers[ecx*8]
+
+ ;
+ ; Get SP
+ ;
+ mov edi,[ebp].TsHardwareEsp
+ test [esi].RiSsFlags,SEL_TYPE_BIG
+ jnz pi110
+
+ movzx edi,di ; zero high bits for 64k stack
+
+ ;
+ ; Update SP
+ ;
+pi110: test [eax].ViFlags,dword ptr VDM_INT_32
+ jz pi120
+
+ ;
+ ; 32 bit iret frame
+ ;
+ cmp edi,12 ; enough space on stack?
+ jb pierr ; no, go fault
+
+ sub edi,12
+ mov [esi].RiEsp,edi
+ jmp pi130
+
+ ;
+ ; 16 bit iret frame
+ ;
+pi120: cmp edi,6 ; enough space on stack?
+ jb pierr ; no, go fault
+
+ sub edi,6
+ mov [esi].RiEsp,edi
+
+ ;
+ ; Check limit
+ ;
+pi130: test [esi].RiSsFlags,SEL_TYPE_ED
+ jz pi140
+
+ ;
+ ; Expand down, Sp must be above limit
+ ;
+ cmp edi,[esi].RiSsLimit
+ jna pierr
+ jmp pi150
+
+ ;
+ ; Normal, Sp must be below limit
+ ;
+pi140: cmp edi,[esi].RiSsLimit
+ jnb pierr
+
+ ;
+ ; Get base of ss
+ ;
+pi150: mov ebx,[esi].RiSsBase
+ test [eax].ViFlags,dword ptr VDM_INT_32
+ jz pi160
+
+ ;
+ ; "push" 32 bit iret frame
+ ;
+ mov edx,[esi].RiEip
+ mov [edi + ebx],edx
+ mov dx,word ptr [ebp].TsSegCs
+ mov [edi + ebx] + 4,edx
+ push eax
+ mov eax,[esi].RiEFlags
+ call GetVirtualBits
+
+ mov [edi + ebx] + 8,eax
+ pop eax
+ jmp pi170
+
+ ;
+ ; push 16 bit iret frame
+ ;
+pi160: mov dx,word ptr [esi].RiEip
+ mov [edi + ebx],dx
+ mov dx,word ptr [ebp].TsSegCs
+ mov [edi + ebx] + 2,dx
+ push eax
+ mov eax,[esi].RiEFlags
+ call GetVirtualBits
+
+ mov [edi + ebx] + 4,ax
+ pop eax
+
+ ;
+ ; Update CS and IP
+ ;
+pi170: mov ebx,eax ; save int info
+ mov dx,[eax].ViCsSelector
+ mov word ptr [esi].RiSegCs,dx
+ mov edx,[eax].ViEip
+ mov [esi].RiEip,edx
+
+ movzx eax, word ptr [esi].RiSegCs
+ call CsToLinear ; uses eax as selector
+
+ test al,0ffh
+ jnz pi175
+
+ ;
+ ; Check for destination not present
+ ;
+ test [esi].RiCsFlags,SEL_TYPE_NP
+ jz pierr
+
+ mov al,0ffh ; succeeded
+ jmp pi180
+
+ ;
+ ; Check handler address
+ ;
+pi175: mov edx,[esi].RiEip
+ cmp edx,[esi].RiCsLimit
+ jnb pierr
+
+ ;
+ ; Turn off the trap flag
+ ;
+pi180: and [esi].RiEFlags,NOT EFLAGS_TF_MASK
+
+ ;
+ ; Turn off virtual interrupts if necessary
+ ;
+ test [ebx].ViFlags,dword ptr VDM_INT_INT_GATE
+ ; n.b. We know al is non-zero, because we succeeded in cstolinear
+ jz pi80
+
+ test _KeI386VirtualIntExtensions,PM_VIRTUAL_INT_EXTENSIONS
+ jz pi75
+
+ and [esi].RiEFlags, NOT (EFLAGS_VIF)
+
+pi75: mov ebx,_VdmFixedStateLinear
+ MPLOCK and [ebx], NOT EFLAGS_INTERRUPT_MASK
+
+pi80: and [esi].RiEFlags,NOT (EFLAGS_IOPL_MASK OR EFLAGS_NT_MASK OR EFLAGS_V86_MASK)
+ or [esi].RiEFlags,EFLAGS_INTERRUPT_MASK
+
+pi90: pop edi
+ pop ebx
+ ret
+
+pierr: xor eax,eax
+ jmp pi90
+
+PushInt endp
+
+ page ,132
+ subttl "Convert CS Segment or selector to linear address"
+;++
+;
+; Routine Description:
+;
+; Convert CS segment or selector to linear address as appropriate
+; for the curret user mode processor mode.
+;
+; Arguments:
+;
+; esi = reg info
+;
+; Returns:
+;
+; reg info updated
+;
+ public CsToLinear
+CsToLinear proc
+
+ test [esi].RiEFlags,EFLAGS_V86_MASK
+ jz ctl10
+
+;;; selector now passed in eax
+;;; movzx eax,word ptr [esi].RiSegCs
+ shl eax,4
+ mov [esi].RiCsBase,eax
+ mov [esi].RiCsLimit,0FFFFh
+ mov [esi].RiCsFlags,0
+ mov eax,1
+ ret
+
+ifdef NOT_USED_ANYMORE
+ctl10: push edx ; WARNING volatile regs!!!
+ lea eax,[esi].RiCsLimit
+ push eax
+ lea eax,[esi].RiCsBase
+ push eax
+ lea eax,[esi].RiCsFlags
+ push eax
+ push [esi].RiSegCs
+
+endif
+
+ctl10:
+ push edx ; WARNING volatile regs!!!
+ lea edx,[esi].RiCsLimit
+ push edx
+ lea edx,[esi].RiCsBase
+ push edx
+ lea edx,[esi].RiCsFlags
+ push edx
+ push eax ; push selector
+
+IFDEF STD_CALL
+ call _Ki386GetSelectorParameters@16
+ELSE
+ call _Ki386GetSelectorParameters
+ add esp,10h
+ENDIF
+ pop edx
+
+ or al,al
+ jz ctlerr
+
+ test [esi].RiCsFlags,SEL_TYPE_EXECUTE
+ jz ctlerr
+
+ test [esi].RiCsFlags,SEL_TYPE_2GIG
+ jz ctl30
+
+ ; Correct limit value for granularity
+ shl [esi].RiCsLimit,12
+ or [esi].RiCsLimit,0FFFh
+ctl30:
+ mov eax,1
+ctl40: ret
+
+ctlerr: xor eax,eax
+ jmp ctl40
+
+CsToLinear endp
+
+
+ page ,132
+ subttl "Verify that EIP is still valid"
+;++
+;
+; Routine Description:
+;
+; Verify that Eip is still valid and put it into the trap frame
+;
+; Arguments:
+;
+; esi = address of reg info
+;
+; Returns:
+;
+;
+ public CheckEip
+CheckEip proc
+ mov eax,[esi].RiEip
+ test [esi].RiEFlags,EFLAGS_V86_MASK
+ jz ce20
+
+ and eax,[esi].RiCsLimit
+ mov [esi].RiEip,eax
+ jmp ce40
+
+ce20: cmp eax,[esi].RiCsLimit
+ ja ceerr
+ce40: mov eax,1
+ce50: ret
+
+ceerr: xor eax,eax
+ jmp ce50
+
+CheckEip endp
+
+ page ,132
+ subttl "Convert Ss Segment or selector to linear address"
+;++
+;
+; Routine Description:
+;
+; Convert Ss segment or selector to linear address as appropriate
+; for the curret user mode processor mode.
+;
+; Arguments:
+;
+; eax = selector to convert
+; esi = address of reg info
+;
+; Returns:
+;
+; reg info updated
+;
+ public SsToLinear
+SsToLinear proc
+
+ test [esi].RiEFlags,EFLAGS_V86_MASK
+ jz stl10
+
+ shl eax,4
+ mov [esi].RiSsBase,eax
+ mov [esi].RiSsLimit,0FFFFh
+ mov [esi].RiSsFlags,0
+ mov eax,1
+ ret
+
+stl10: push ecx
+ lea ecx,[esi].RiSsLimit
+ push ecx
+ lea ecx,[esi].RiSsBase
+ push ecx
+ lea ecx,[esi].RiSsFlags
+ push ecx
+ push eax ;selector
+
+IFDEF STD_CALL
+ call _Ki386GetSelectorParameters@16
+ELSE
+ call _Ki386GetSelectorParameters
+ add esp,10h
+ENDIF
+ pop ecx
+
+ or al,al
+ jz stlerr
+
+ test [esi].RiSsFlags,SEL_TYPE_WRITE
+ jz stlerr
+
+ test [esi].RiSsFlags,SEL_TYPE_2GIG
+ jz stl30
+
+ ; Correct limit value for granularity
+
+ mov eax,[esi].RiSsLimit
+ shl eax,12
+ or eax,0FFFh
+ mov [esi].RiSsLimit,eax
+stl30:
+ mov eax,1
+stl40: ret
+
+stlerr: xor eax,eax
+ jmp stl40
+
+SsToLinear endp
+
+ page ,132
+ subttl "Verify that Esp is still valid"
+;++
+;
+; Routine Description:
+;
+; Verify that Esp is still valid
+;
+; Arguments:
+;
+; ecx = # of bytes needed for stack frame
+; esi = address of reg info
+;
+; Returns:
+;
+;
+;
+ public CheckEsp
+CheckEsp proc
+ mov eax,[esi].RiEsp
+ test [esi].RiEFlags,EFLAGS_V86_MASK
+ jz cs20
+
+ and eax,[esi].RiSsLimit
+ mov [esi].RiEsp,eax
+ jmp cs40
+
+cs20: test [esi].RiSsFlags,SEL_TYPE_BIG
+ jnz cs25
+
+ and eax,0FFFFh ; only use 16 bit for 16 bit
+cs25:
+ cmp ecx, eax ; StackOffset > SP?
+ ja cserr ; yes error
+ dec eax ; make limit checks work
+ test [esi].RiSsFlags,SEL_TYPE_ED ; Expand down?
+ jz cs30 ; jif no
+
+;
+; Expand Down
+;
+ sub eax, ecx ; New SP
+ cmp eax,[esi].RiSsLimit ; NewSp < Limit?
+ jb cserr
+ jmp cs40
+
+;
+; Not Expand Down
+;
+cs30: cmp eax,[esi].RiSsLimit
+ ja cserr
+
+cs40: mov eax,1
+cs50: ret
+
+
+cserr: xor eax,eax
+ jmp cs50
+
+CheckEsp endp
+
+ page ,132
+ subttl "Switch to protected mode interrupt stack"
+;++
+;
+; Routine Description:
+;
+; Switch to protected mode interrupt handler stack
+;
+; Arguments:
+;
+; ecx = interrupt number
+; esi = address of reg info
+; edi = address of PM Stack info
+;
+; Returns:
+;
+; reg info updated
+;
+ public SwitchToHandlerStack
+SwitchToHandlerStack proc
+
+
+ cmp word ptr [edi].VpLockCount, 0 ; already switched?
+ jnz short @f ; yes
+
+ mov eax, [esi].RiEip
+ mov [edi].VpSaveEip, eax
+ mov eax, [esi].RiEsp
+ mov [edi].VpSaveEsp, eax
+ mov eax, [esi].RiSegSs
+ mov [edi].VpSaveSsSelector, ax
+
+ movzx eax,word ptr [edi].VpSsSelector
+ mov [esi].RiSegSs,eax
+ mov dword ptr [esi].RiEsp,1000h ; dpmi stack offset
+
+ movzx eax, word ptr [esi].RiSegSs
+ push ecx
+ call SsToLinear ; compute new base
+ pop ecx
+ test al,0FFh
+ jz shserr
+@@:
+ inc word ptr [edi].VpLockCount ; maintain lock count
+ mov eax,1
+ ret
+
+shserr:
+ xor eax,eax
+ ret
+
+SwitchToHandlerStack endp
+
+
+ page ,132
+ subttl "Get protected mode interrupt handler address"
+;++
+;
+; Routine Description:
+;
+; Get the address of the interrupt handler for the sepcified interrupt
+;
+; Arguments:
+;
+; ecx = interrupt number
+; esi = address of reg info
+;
+; Returns:
+;
+; reg info updated
+;
+ public GetHandlerAddress
+GetHandlerAddress proc
+
+ push ecx
+ push edx
+ mov eax,VDM_FAULT_HANDLER_SIZE
+ mul ecx
+ mov edi,PCR[PcTeb]
+ mov edi,[edi].TbVdm
+ lea edi,[edi].VtFaultHandlers
+ movzx ecx,word ptr [edi + eax].VfCsSelector
+ mov [esi].RiSegCs,ecx
+ mov ecx,[edi + eax].VfEip
+ mov [esi].RiEip,ecx
+ pop edx
+ pop ecx
+ mov eax,1
+ ret
+GetHandlerAddress endp
+
+ page ,132
+ subttl "Push processor exception"
+;++
+;
+; Routine Description:
+;
+; Update the stack and registers to emulate the specified exception
+;
+; Arguments:
+;
+; ecx = interrupt number
+; esi = address of reg info
+;
+; Returns:
+;
+; reg info updated
+;
+ public PushException
+PushException Proc
+
+ push ebx
+ push edi
+
+ test [esi].RiEflags,EFLAGS_V86_MASK
+ jz pe40
+
+;
+; Push V86 mode exception
+;
+ cmp ecx, 7 ; device not available fault
+ ja peerr ; per win3.1, no exceptions
+ ; above 7 for v86 mode
+ mov edx,[esi].RiEsp
+ mov ebx,[esi].RiSsBase
+ and edx,0FFFFh ; only use a 16 bit sp
+ sub dx,2
+ mov eax,[esi].RiEFlags
+ push ecx
+ call GetVirtualBits
+ pop ecx
+ mov [ebx+edx],ax ; push flags
+ sub dx,2
+ mov ax,word ptr [esi].RiSegCs
+ mov [ebx+edx],ax ; push cs
+ sub dx,2
+ mov ax,word ptr [esi].RiEip
+ mov [ebx+edx],ax ; push ip
+ mov eax,[ecx*4] ; get new cs:ip value
+ push eax
+ movzx eax,ax
+ mov [esi].RiEip,eax
+ pop eax
+ shr eax,16
+ mov [esi].RiSegCs,eax
+ mov word ptr [esi].RiEsp,dx
+ jmp pe60
+
+;
+; Push PM exception
+;
+pe40:
+ push [esi].RiEsp ; save for stack frame
+ push [esi].RiSegSs
+
+ mov edi,PCR[PcTeb]
+ mov edi,[edi].TbVdm
+ lea edi,[edi].VtPmStackInfo
+ call SwitchToHandlerStack
+ test al,0FFh
+ jz peerr1 ; pop off stack and exit
+
+ sub [esi].RiEsp, 20h ; win31 undocumented feature
+
+ mov ebx,[esi].RiSsBase
+ mov edx,[esi].RiEsp
+ test [esi].RiSsFlags,SEL_TYPE_BIG
+ jnz short @f
+ movzx edx,dx ; zero high bits for 64k stack
+@@:
+
+ test word ptr [edi].VpFlags, 1 ; 32 bit app?
+ jnz short pe45 ; yes
+
+;
+; push 16-bit frame
+;
+ push ecx
+ mov ecx, 8*2 ; room for 8 words?
+ call CheckEsp
+ pop ecx
+ test al,0FFh
+ jz peerr1 ; pop off stack and exit
+
+ sub edx,8*2
+ mov [esi].RiEsp,edx
+
+ pop eax ; ss
+ mov [ebx+edx+14], ax
+ pop eax ; esp
+ mov [ebx+edx+12], ax
+
+ mov eax,[esi].RiEFlags
+ push ecx
+ call GetVirtualBits
+ pop ecx
+ mov [ebx+edx+10],ax ; push flags
+ movzx eax,word ptr [esi].RiSegCs
+ mov [ebx+edx+8],ax ; push cs
+ mov eax,[esi].RiEip
+ mov [ebx+edx+6],ax ; push ip
+ mov eax,RI.RiTrapFrame
+ mov eax,[eax].TsErrCode
+ mov [ebx+edx+4],ax ; push error code
+ mov eax,[edi].VpDosxFaultIret
+ mov [ebx+edx],eax ; push iret address
+ jmp pe50
+pe45:
+;
+; push 32-bit frame
+;
+ push ecx
+ mov ecx, 8*4 ; room for 8 dwords?
+ call CheckEsp
+ pop ecx
+ test al,0FFh
+ jz peerr1 ; pop off stack and exit
+
+ sub edx,8*4
+ mov [esi].RiEsp,edx
+
+ pop eax ; ss
+ mov [ebx+edx+28], eax
+ pop eax ; esp
+ mov [ebx+edx+24], eax
+
+ mov eax,[esi].RiEFlags
+ push ecx
+ call GetVirtualBits
+ pop ecx
+ mov [ebx+edx+20],eax ; push flags
+ movzx eax,word ptr [esi].RiSegCs
+ mov [ebx+edx+16],eax ; push cs
+ mov eax,[esi].RiEip
+ mov [ebx+edx+12],eax ; push ip
+ mov eax,RI.RiTrapFrame
+ mov eax,[eax].TsErrCode
+ mov [ebx+edx+8],eax ; push error code
+ mov eax,[edi].VpDosxFaultIretD
+ shr eax, 16
+ mov [ebx+edx+4],eax ; push iret seg
+ mov eax,[edi].VpDosxFaultIretD
+ and eax, 0ffffh
+ mov [ebx+edx],eax ; push iret offset
+pe50:
+ call GetHandlerAddress
+ test al,0FFh
+ jz peerr
+
+pe60: push ecx
+ movzx eax,word ptr [esi].RiSegCs
+ call CsToLinear ; uses eax as selector
+ pop ecx
+ test al,0FFh
+ jz peerr
+
+ mov eax,[esi].RiEip
+ cmp eax,[esi].RiCsLimit
+ ja peerr
+
+ mov eax,VDM_FAULT_HANDLER_SIZE
+ push edx
+ mul ecx
+ pop edx
+ mov edi,PCR[PcTeb]
+ mov edi,[edi].TbVdm
+ lea edi,[edi].VtFaultHandlers
+ add edi,eax
+ mov eax,[esi].RiEFlags ;WARNING 16 vs 32
+ test dword ptr [edi].VfFlags,VDM_INT_INT_GATE
+ jz pe70
+
+ and eax,NOT (EFLAGS_INTERRUPT_MASK OR EFLAGS_TF_MASK)
+ push eax
+ xor ebx, ebx ; clear prefix flags
+ call SetVirtualBits
+ pop eax
+pe70: push ecx
+ mov ecx,eax
+ call CheckVdmFlags
+ and ecx,NOT EFLAGS_TF_MASK
+ mov [esi].RiEFlags,ecx
+ pop ecx
+ mov eax,1 ; success
+pe80: pop edi
+ pop ebx
+ ret
+
+peerr1: add esp, 8 ;throw away esp, ss
+peerr: xor eax,eax
+ jmp pe80
+
+PushException endp
+
+
+ifdef VDMDBG
+ public TraceOpcode
+TraceOpcode Proc
+
+ push eax
+ push edx
+ mov eax, TracePointer
+
+ mov edx,dword ptr [esp+12] ;pushed code
+ mov dword ptr [eax],edx
+ mov edx,dword ptr [esp+8] ;ret addr
+ mov dword ptr [eax+4],edx
+ mov edx,-1
+ mov dword ptr [eax+8],edx
+ mov dword ptr [eax+12],edx
+
+ mov edx,[ebp].TsEax
+ mov dword ptr [eax+16],edx
+ mov edx,[ebp].TsEbx
+ mov dword ptr [eax+20],edx
+ mov edx,[ebp].TsEcx
+ mov dword ptr [eax+24],edx
+ mov edx,[ebp].TsEdx
+ mov dword ptr [eax+28],edx
+
+ mov edx,[ebp].TsEip
+ mov dword ptr [eax+36],edx
+ mov edx,[ebp].TsHardwareEsp
+ mov dword ptr [eax+32],edx
+ mov edx,[ebp].TsEdi
+ mov dword ptr [eax+40],edx
+ mov edx,[ebp].TsEsi
+ mov dword ptr [eax+44],edx
+
+ mov edx,[ebp].TsHardwareSegSs
+ mov dword ptr [eax+48],edx
+ mov edx,[ebp].TsSegCs
+ mov dword ptr [eax+52],edx
+ mov edx,[ebp].TsSegDs
+ mov dword ptr [eax+56],edx
+ mov edx,[ebp].TsSegEs
+ mov dword ptr [eax+60],edx
+
+ add eax, TRACE_ENTRY_SIZE*4
+ cmp eax, Offset OpcodeTrace + NUM_TRACE_ENTRIES*TRACE_ENTRY_SIZE*4
+ jb @f
+ mov eax, Offset OpcodeTrace
+@@:
+ mov TracePointer, eax
+
+ pop edx
+ pop eax
+ ret 4
+TraceOpcode endp
+endif
+
+_PAGE ends
+
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+;
+; Non-pagable code
+;
+
+ page ,132
+ subttl "Ipi worker for enabling Pentium extensions"
+;++
+;
+; Routine Description:
+;
+; This routine sets or resets the VME bit in CR4 for each processor
+;
+; Arguments:
+;
+; [esp + 4] -- 1 if VME is to be set, 0 if it is to be reset
+; Returns:
+;
+; 0
+;
+cPublicProc _Ki386VdmEnablePentiumExtentions, 1
+
+Enable equ [ebp + 8]
+ push ebp
+ mov ebp,esp
+;
+; Insure we do not get an interrupt in here. We may
+; be called at IPI_LEVEL - 1 by KiIpiGenericCall.
+;
+ pushf
+ cli
+
+; mov eax,cr4
+ db 0fh, 020h,0e0h
+
+ test Enable,1
+ je vepe20
+
+ or eax,CR4_VME
+ jmp vepe30
+
+vepe20: and eax,NOT CR4_VME
+
+; mov cr4,eax
+vepe30: db 0fh,022h,0e0h
+
+ popf
+ xor eax,eax
+
+ mov esp,ebp
+ pop ebp
+ stdRET _Ki386VdmEnablePentiumExtentions
+stdENDP _Ki386VdmEnablePentiumExtentions
+
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/int.asm b/private/ntos/ke/i386/int.asm
new file mode 100644
index 000000000..d4bf4fa8e
--- /dev/null
+++ b/private/ntos/ke/i386/int.asm
@@ -0,0 +1,132 @@
+ title "Trap Processing"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; int.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to field and process i386
+; interrupt.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 8-Jan-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include callconv.inc
+ .list
+
+;
+; Interrupt flag bit maks for EFLAGS
+;
+
+EFLAGS_IF equ 200H
+EFLAGS_SHIFT equ 9
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+; NOTE This routine is never actually called on standard x86 hardware,
+; because passive level doesn't actually exist. It's here to
+; fill out the portable skeleton.
+;
+; The following code is called when a passive release occurs and there is
+; no interrupt to process.
+;
+
+cPublicProc _KiPassiveRelease ,0
+ stdRET _KiPassiveRelease ; cReturn
+stdENDP _KiPassiveRelease
+
+
+ page ,132
+ subttl "Disable Processor Interrupts"
+;++
+;
+; BOOLEAN
+; KiDisableInterrupts(
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine disables interrupts at the processor level. It does not
+; edit the PICS or adjust IRQL, it is for use in the debugger only.
+;
+; Arguments:
+;
+; None
+;
+; Return Value:
+;
+; (eax) = !0 if interrupts were on, 0 if they were off
+;
+;--
+cPublicProc _KiDisableInterrupts ,0
+cPublicFpo 0, 0
+ pushfd
+ pop eax
+ and eax,EFLAGS_IF ; (eax) = the interrupt bit
+ shr eax,EFLAGS_SHIFT ; low bit of (eax) == interrupt bit
+ cli
+ stdRET _KiDisableInterrupts
+
+stdENDP _KiDisableInterrupts
+
+
+ page ,132
+ subttl "Restore Processor Interrupts"
+;++
+;
+; VOID
+; KiRestoreInterrupts(
+; BOOLEAN Restore
+; )
+;
+; Routine Description:
+;
+; This routine restores interrupts at the processor level. It does not
+; edit the PICS or adjust IRQL, it is for use in the debugger only.
+;
+; Arguments:
+;
+; Restore (esp+4) - a "boolean" returned by KiDisableInterrupts, if
+; !0 interrupts will be turned on, else left off.
+;
+; NOTE: We don't actually test the boolean as such, we just or
+; it directly into the flags!
+;
+; Return Value:
+;
+; none.
+;
+;--
+cPublicProc _KiRestoreInterrupts ,1
+cPublicFpo 1, 0
+ xor eax, eax
+ mov al, byte ptr [esp]+4
+ shl eax,EFLAGS_SHIFT ; (eax) == the interrupt bit
+ pushfd
+ or [esp],eax ; or EI into flags
+ popfd
+ stdRET _KiRestoreInterrupts
+
+stdENDP _KiRestoreInterrupts
+
+_TEXT ends
+ end
diff --git a/private/ntos/ke/i386/intobj.c b/private/ntos/ke/i386/intobj.c
new file mode 100644
index 000000000..f4da6ca33
--- /dev/null
+++ b/private/ntos/ke/i386/intobj.c
@@ -0,0 +1,767 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ intobj.c
+
+Abstract:
+
+ This module implements the kernel interrupt object. Functions are provided
+ to initialize, connect, and disconnect interrupt objects.
+
+Author:
+
+ David N. Cutler (davec) 30-Jul-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ 23-Jan-1990 shielint
+
+ Modified for NT386 interrupt manager
+
+--*/
+
+#include "ki.h"
+
+//
+// Externs from trap.asm used to compute and set handlers for unexpected
+// hardware interrupts.
+//
+
+extern ULONG KiStartUnexpectedRange(VOID);
+extern ULONG KiEndUnexpectedRange(VOID);
+extern ULONG KiUnexpectedEntrySize;
+
+
+VOID
+KiInterruptDispatch2ndLvl(
+ VOID
+ );
+
+
+VOID
+KiChainedDispatch2ndLvl(
+ VOID
+ );
+
+
+typedef enum {
+ NoConnect,
+ NormalConnect,
+ ChainConnect,
+ UnkownConnect
+} CONNECT_TYPE, *PCONNECT_TYPE;
+
+typedef struct {
+ CONNECT_TYPE Type;
+ PKINTERRUPT Interrupt;
+ PKINTERRUPT_ROUTINE NoDispatch;
+ PKINTERRUPT_ROUTINE InterruptDispatch;
+ PKINTERRUPT_ROUTINE FloatingDispatch;
+ PKINTERRUPT_ROUTINE ChainedDispatch;
+ PKINTERRUPT_ROUTINE *FlatDispatch;
+} DISPATCH_INFO, *PDISPATCH_INFO;
+
+
+VOID
+KiGetVectorInfo (
+ IN ULONG Vector,
+ OUT PDISPATCH_INFO DispatchInfo
+ );
+
+VOID
+KiConnectVectorAndInterruptObject (
+ IN PKINTERRUPT Interrupt,
+ IN CONNECT_TYPE Type
+ );
+
+
+VOID
+KeInitializeInterrupt (
+ IN PKINTERRUPT Interrupt,
+ IN PKSERVICE_ROUTINE ServiceRoutine,
+ IN PVOID ServiceContext,
+ IN PKSPIN_LOCK SpinLock OPTIONAL,
+ IN ULONG Vector,
+ IN KIRQL Irql,
+ IN KIRQL SynchronizeIrql,
+ IN KINTERRUPT_MODE InterruptMode,
+ IN BOOLEAN ShareVector,
+ IN CCHAR ProcessorNumber,
+ IN BOOLEAN FloatingSave
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel interrupt object. The service routine,
+ service context, spin lock, vector, IRQL, SynchronizeIrql, and floating
+ context save flag are initialized.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+ ServiceRoutine - Supplies a pointer to a function that is to be
+ executed when an interrupt occurs via the specified interrupt
+ vector.
+
+ ServiceContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the ServiceRoutine parameter.
+
+ SpinLock - Supplies a pointer to an executive spin lock.
+
+ Vector - Supplies the index of the entry in the Interrupt Dispatch Table
+ that is to be associated with the ServiceRoutine function.
+
+ Irql - Supplies the request priority of the interrupting source.
+
+ SynchronizeIrql - The request priority that the interrupt should be
+ synchronized with.
+
+ InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
+
+ ShareVector - Supplies a boolean value that specifies whether the
+ vector can be shared with other interrupt objects or not. If FALSE
+ then the vector may not be shared, if TRUE it may be.
+ Latched.
+
+ ProcessorNumber - Supplies the number of the processor to which the
+ interrupt will be connected.
+
+ FloatingSave - Supplies a boolean value that determines whether the
+ floating point registers and pipe line are to be saved before calling
+ the ServiceRoutine function.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+ PULONG pl;
+ PULONG NormalDispatchCode;
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Interrupt->Type = InterruptObject;
+ Interrupt->Size = sizeof(KINTERRUPT);
+
+ //
+ // Initialize the address of the service routine,
+ // the service context, the address of the spin lock, the vector
+ // number, the IRQL of the interrupting source, the Irql used for
+ // synchronize execution, the interrupt mode, the processor
+ // number, and the floating context save flag.
+ //
+
+ Interrupt->ServiceRoutine = ServiceRoutine;
+ Interrupt->ServiceContext = ServiceContext;
+
+ if (ARGUMENT_PRESENT(SpinLock)) {
+ Interrupt->ActualLock = SpinLock;
+ } else {
+ KeInitializeSpinLock (&Interrupt->SpinLock);
+ Interrupt->ActualLock = &Interrupt->SpinLock;
+ }
+
+ Interrupt->Vector = Vector;
+ Interrupt->Irql = Irql;
+ Interrupt->SynchronizeIrql = SynchronizeIrql;
+ Interrupt->Mode = InterruptMode;
+ Interrupt->ShareVector = ShareVector;
+ Interrupt->Number = ProcessorNumber;
+ Interrupt->FloatingSave = FloatingSave;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and edit the machine code stored in the structure (please see
+ // _KiInterruptTemplate in intsup.asm.) Finally, flush the dcache
+ // on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ NormalDispatchCode = &(Interrupt->DispatchCode[0]);
+
+ pl = NormalDispatchCode;
+
+ for (Index = 0; Index < NORMAL_DISPATCH_LENGTH; Index += 1) {
+ *NormalDispatchCode++ = KiInterruptTemplate[Index];
+ }
+
+ //
+ // The following two instructions set the address of current interrupt
+ // object the the NORMAL dispatching code.
+ //
+
+ pl = (PULONG)((PUCHAR)pl + ((PUCHAR)&KiInterruptTemplateObject -
+ (PUCHAR)KiInterruptTemplate));
+ *pl = (ULONG)Interrupt;
+
+ KeSweepDcache(FALSE);
+
+ //
+ // Set the connected state of the interrupt object to FALSE.
+ //
+
+ Interrupt->Connected = FALSE;
+ return;
+}
+
+BOOLEAN
+KeConnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function connects an interrupt object to the interrupt vector
+ specified by the interrupt object. If the interrupt object is already
+ connected, or an attempt is made to connect to an interrupt that cannot
+ be connected, then a value of FALSE is returned. Else the specified
+ interrupt object is connected to the interrupt vector, the connected
+ state is set to TRUE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is already connected or an attempt is made to
+ connect to an interrupt vector that cannot be connected, then a value
+ of FALSE is returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+ DISPATCH_INFO DispatchInfo;
+ BOOLEAN Connected;
+ BOOLEAN ConnectError;
+ BOOLEAN Enabled;
+ KIRQL Irql;
+ CCHAR Number;
+ KIRQL OldIrql;
+ ULONG Vector;
+
+ //
+ // If the interrupt object is already connected, the interrupt vector
+ // number is invalid, an attempt is being made to connect to a vector
+ // that cannot be connected, the interrupt request level is invalid, or
+ // the processor number is invalid, then do not connect the interrupt
+ // object. Else connect interrupt object to the specified vector and
+ // establish the proper interrupt dispatcher.
+ //
+
+ Connected = FALSE;
+ ConnectError = FALSE;
+ Irql = Interrupt->Irql;
+ Number = Interrupt->Number;
+ Vector = Interrupt->Vector;
+ if ( !((Irql > HIGH_LEVEL) ||
+ (Number >= KeNumberProcessors) ||
+ (Interrupt->SynchronizeIrql < Irql) ||
+ (Interrupt->FloatingSave) // R0 x87 usage not supported on x86
+ )
+ ) {
+
+ //
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1<<Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Is interrupt object already connected?
+ //
+
+ if (!Interrupt->Connected) {
+
+ //
+ // Determine interrupt dispatch vector
+ //
+
+ KiGetVectorInfo (
+ Vector,
+ &DispatchInfo
+ );
+
+ //
+ // If dispatch vector is not connected, then connect it
+ //
+
+ if (DispatchInfo.Type == NoConnect) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+
+ //
+ // Connect interrupt dispatch to interrupt object dispatch code
+ //
+
+ InitializeListHead(&Interrupt->InterruptListEntry);
+ KiConnectVectorAndInterruptObject (Interrupt, NormalConnect);
+
+ //
+ // Enabled system vector
+ //
+
+ Enabled = HalEnableSystemInterrupt(Vector, Irql, Interrupt->Mode);
+ if (!Enabled) {
+ ConnectError = TRUE;
+ }
+
+
+ } else if (DispatchInfo.Type != UnkownConnect &&
+ Interrupt->ShareVector &&
+ DispatchInfo.Interrupt->ShareVector &&
+ DispatchInfo.Interrupt->Mode == Interrupt->Mode) {
+
+ //
+ // Vector is already connected as sharable. New vector is sharable
+ // and modes match. Chain new vector.
+ //
+
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+
+ ASSERT (Irql <= SYNCH_LEVEL);
+
+ //
+ // If not already using chained dispatch handler, set it up
+ //
+
+ if (DispatchInfo.Type != ChainConnect) {
+ KiConnectVectorAndInterruptObject (DispatchInfo.Interrupt, ChainConnect);
+ }
+
+ //
+ // Add to tail of chained dispatch
+ //
+
+ InsertTailList(
+ &DispatchInfo.Interrupt->InterruptListEntry,
+ &Interrupt->InterruptListEntry
+ );
+
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+ }
+
+ if (Connected && ConnectError) {
+#if DBG
+ DbgPrint ("HalEnableSystemInterrupt failed\n");
+#endif
+ KeDisconnectInterrupt (Interrupt);
+ Connected = FALSE;
+ }
+
+ //
+ // Return whether interrupt was connected to the specified vector.
+ //
+
+ return Connected;
+}
+
+BOOLEAN
+KeDisconnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function disconnects an interrupt object from the interrupt vector
+ specified by the interrupt object. If the interrupt object is not
+ connected, then a value of FALSE is returned. Else the specified interrupt
+ object is disconnected from the interrupt vector, the connected state is
+ set to FALSE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is not connected, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ DISPATCH_INFO DispatchInfo;
+ BOOLEAN Connected;
+ PKINTERRUPT Interrupty;
+ KIRQL Irql;
+ KIRQL OldIrql;
+ ULONG Vector;
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1<<Interrupt->Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the interrupt object is connected, then disconnect it from the
+ // specified vector.
+ //
+
+ Connected = Interrupt->Connected;
+ if (Connected) {
+ Irql = Interrupt->Irql;
+ Vector = Interrupt->Vector;
+
+ //
+ // If the specified interrupt vector is not connected to the chained
+ // interrupt dispatcher, then disconnect it by setting its dispatch
+ // address to the unexpected interrupt routine. Else remove the
+ // interrupt object from the interrupt chain. If there is only
+ // one entry remaining in the list, then reestablish the dispatch
+ // address.
+ //
+
+ //
+ // Determine interrupt dispatch vector
+ //
+
+ KiGetVectorInfo (
+ Vector,
+ &DispatchInfo
+ );
+
+
+ //
+ // Is dispatch a chained handler?
+ //
+
+ if (DispatchInfo.Type == ChainConnect) {
+
+ ASSERT (Irql <= SYNCH_LEVEL);
+
+ //
+ // Is interrupt being removed from head?
+ //
+
+ if (Interrupt == DispatchInfo.Interrupt) {
+
+ //
+ // Update next interrupt object to be head
+ //
+
+ DispatchInfo.Interrupt = CONTAINING_RECORD(
+ DispatchInfo.Interrupt->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry
+ );
+
+ KiConnectVectorAndInterruptObject (DispatchInfo.Interrupt, ChainConnect);
+ }
+
+ //
+ // Remove interrupt object
+ //
+
+ RemoveEntryList(&Interrupt->InterruptListEntry);
+
+ //
+ // If there's only one interrupt object left on this vector,
+ // determine proper interrupt dispatcher
+ //
+
+ Interrupty = CONTAINING_RECORD(
+ DispatchInfo.Interrupt->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry
+ );
+
+ if (DispatchInfo.Interrupt == Interrupty) {
+ KiConnectVectorAndInterruptObject (Interrupty, NormalConnect);
+ }
+
+ } else {
+
+ //
+ // Removing last interrupt object from the vector. Disable the
+ // vector, and set it to unconnected
+ //
+
+ HalDisableSystemInterrupt(Interrupt->Vector, Irql);
+ KiConnectVectorAndInterruptObject (Interrupt, NoConnect);
+ }
+
+
+ KeSweepIcache(TRUE);
+ Interrupt->Connected = FALSE;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+
+ //
+ // Return whether interrupt was disconnected from the specified vector.
+ //
+
+ return Connected;
+}
+
+VOID
+KiGetVectorInfo (
+ IN ULONG Vector,
+ OUT PDISPATCH_INFO DispatchInfo
+ )
+{
+ PKINTERRUPT_ROUTINE Dispatch;
+ ULONG CurrentDispatch;
+ ULONG DispatchType;
+
+ //
+ // Get second level dispatch point
+ //
+
+
+ DispatchType = HalSystemVectorDispatchEntry (
+ Vector,
+ &DispatchInfo->FlatDispatch,
+ &DispatchInfo->NoDispatch
+ );
+
+ //
+ // Get vector info
+ //
+
+ switch (DispatchType) {
+ case 0:
+ //
+ // Primary dispatch
+ //
+
+ DispatchInfo->NoDispatch = (PKINTERRUPT_ROUTINE) (((ULONG) &KiStartUnexpectedRange) +
+ (Vector - PRIMARY_VECTOR_BASE) * KiUnexpectedEntrySize);
+
+ DispatchInfo->InterruptDispatch = KiInterruptDispatch;
+ DispatchInfo->FloatingDispatch = KiFloatingDispatch;
+ DispatchInfo->ChainedDispatch = KiChainedDispatch;
+ DispatchInfo->FlatDispatch = NULL;
+
+ CurrentDispatch = (ULONG) KiReturnHandlerAddressFromIDT(Vector);
+ DispatchInfo->Interrupt = CONTAINING_RECORD (
+ CurrentDispatch,
+ KINTERRUPT,
+ DispatchCode
+ );
+ break;
+
+ case 1:
+ //
+ // Secondardy dispatch.
+ //
+
+ DispatchInfo->InterruptDispatch = KiInterruptDispatch2ndLvl;
+ DispatchInfo->FloatingDispatch = KiInterruptDispatch2ndLvl;
+ DispatchInfo->ChainedDispatch = KiChainedDispatch2ndLvl;
+
+ CurrentDispatch = (ULONG) *DispatchInfo->FlatDispatch;
+ DispatchInfo->Interrupt = (PKINTERRUPT) ( (PUCHAR) CurrentDispatch -
+ (PUCHAR) KiInterruptTemplate +
+ (PUCHAR) &KiInterruptTemplate2ndDispatch
+ );
+ break;
+
+ default:
+ // Other values reserved
+ KeBugCheck (MISMATCHED_HAL);
+ }
+
+
+ //
+ // Determine dispatch type
+ //
+
+ if (((PKINTERRUPT_ROUTINE) CurrentDispatch) == DispatchInfo->NoDispatch) {
+
+ //
+ // Is connected to the NoDispatch function
+ //
+
+ DispatchInfo->Type = NoConnect;
+
+ } else {
+ Dispatch = DispatchInfo->Interrupt->DispatchAddress;
+
+ if (Dispatch == DispatchInfo->ChainedDispatch) {
+ //
+ // Is connected to the chained handler
+ //
+
+ DispatchInfo->Type = ChainConnect;
+
+ } else if (Dispatch == DispatchInfo->InterruptDispatch ||
+ Dispatch == DispatchInfo->FloatingDispatch) {
+ //
+ // If connection to the non-chained handler
+ //
+
+ DispatchInfo->Type = NormalConnect;
+
+ } else {
+
+ //
+ // Unkown connection
+ //
+
+ DispatchInfo->Type = UnkownConnect;
+#if DBG
+ DbgPrint ("KiGetVectorInfo not understood\n");
+#endif
+ }
+ }
+}
+
+VOID
+KiConnectVectorAndInterruptObject (
+ IN PKINTERRUPT Interrupt,
+ IN CONNECT_TYPE Type
+ )
+{
+ PKINTERRUPT_ROUTINE DispatchAddress;
+ DISPATCH_INFO DispatchInfo;
+ PULONG pl;
+
+ //
+ // Get current connect info
+ //
+
+ KiGetVectorInfo (
+ Interrupt->Vector,
+ &DispatchInfo
+ );
+
+ //
+ // If disconnecting, set vector to NoDispatch
+ //
+
+ if (Type == NoConnect) {
+
+ DispatchAddress = DispatchInfo.NoDispatch;
+
+ } else {
+
+ //
+ // Set interrupt objects dispatch for new type
+ //
+
+ DispatchAddress = DispatchInfo.ChainedDispatch;
+
+ if (Type == NormalConnect) {
+ DispatchAddress = DispatchInfo.InterruptDispatch;
+ if (Interrupt->FloatingSave) {
+ DispatchAddress = DispatchInfo.FloatingDispatch;
+ }
+ }
+
+ Interrupt->DispatchAddress = DispatchAddress;
+
+ //
+ // Set interrupt objects dispatch code to kernel dispatcher
+ //
+
+ pl = &(Interrupt->DispatchCode[0]);
+ pl = (PULONG)((PUCHAR)pl +
+ ((PUCHAR)&KiInterruptTemplateDispatch -
+ (PUCHAR)KiInterruptTemplate));
+
+ *pl = (ULONG)DispatchAddress-(ULONG)((PUCHAR)pl+4);
+
+ //
+ // Set dispatch vector to proper address dispatch code location
+ //
+
+ if (DispatchInfo.FlatDispatch) {
+
+ //
+ // Connect to flat dispatch
+ //
+
+ DispatchAddress = (PKINTERRUPT_ROUTINE)
+ ((PUCHAR) &(Interrupt->DispatchCode[0]) +
+ ((PUCHAR) &KiInterruptTemplate2ndDispatch -
+ (PUCHAR) KiInterruptTemplate));
+
+ } else {
+
+ //
+ // Connect to enter_all dispatch
+ //
+
+ DispatchAddress = (PKINTERRUPT_ROUTINE) &Interrupt->DispatchCode;
+ }
+ }
+
+
+ if (DispatchInfo.FlatDispatch) {
+
+ //
+ // Connect to flat dispatch
+ //
+
+ *DispatchInfo.FlatDispatch = DispatchAddress;
+
+ } else {
+
+ //
+ // Connect to IDT
+ //
+
+ KiSetHandlerAddressToIDT (Interrupt->Vector, DispatchAddress);
+ }
+}
diff --git a/private/ntos/ke/i386/intsup.asm b/private/ntos/ke/i386/intsup.asm
new file mode 100644
index 000000000..8b2db6cc3
--- /dev/null
+++ b/private/ntos/ke/i386/intsup.asm
@@ -0,0 +1,774 @@
+ TITLE "Interrupt Object Support Routines"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; intsup.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to support interrupt objects.
+; It contains the interrupt dispatch code and the code template that gets
+; copied into an interrupt object.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 20-Jan-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+.386p
+ .xlist
+KERNELONLY equ 1
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include callconv.inc
+ .list
+
+ EXTRNP KfRaiseIrql,1,IMPORT,FASTCALL
+ EXTRNP KfLowerIrql,1,IMPORT,FASTCALL
+ EXTRNP _KeBugCheck,1
+ EXTRNP _KiDeliverApc,3
+ EXTRNP _HalBeginSystemInterrupt,3,IMPORT
+ EXTRNP _HalEndSystemInterrupt,2,IMPORT
+ EXTRNP Kei386EoiHelper
+if DBG
+ extrn _DbgPrint:near
+ extrn _MsgISRTimeout:BYTE
+ extrn _MsgISROverflow:BYTE
+ extrn _KeTickCount:DWORD
+ extrn _KiISRTimeout:DWORD
+ extrn _KiISROverflow:DWORD
+endif
+
+MI_MOVEDI EQU 0BFH ; op code for mov edi, constant
+MI_DIRECTJMP EQU 0E9H ; op code for indirect jmp
+ ; or index registers
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+
+if DBG
+ public KiInterruptCounts
+KiInterruptCounts dd 256*2 dup (0)
+endif
+
+_DATA ends
+
+ page ,132
+ subttl "Synchronize Execution"
+
+_TEXT$00 SEGMENT PARA PUBLIC 'CODE'
+
+;++
+;
+; BOOLEAN
+; KeSynchronizeExecution (
+; IN PKINTERRUPT Interrupt,
+; IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
+; IN PVOID SynchronizeContext
+; )
+;
+; Routine Description:
+;
+; This function synchronizes the execution of the specified routine with the
+; execution of the service routine associated with the specified interrupt
+; object.
+;
+; Arguments:
+;
+; Interrupt - Supplies a pointer to a control object of type interrupt.
+;
+; SynchronizeRoutine - Supplies a pointer to a function whose execution
+; is to be synchronized with the execution of the service routine associated
+; with the specified interrupt object.
+;
+; SynchronizeContext - Supplies a pointer to an arbitrary data structure
+; which is to be passed to the function specified by the SynchronizeRoutine
+; parameter.
+;
+; Return Value:
+;
+; The value returned by the SynchronizeRoutine function is returned as the
+; function value.
+;
+;--
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+cPublicProc _KeSynchronizeExecution ,3
+
+; equates of Local variables
+
+KsePreviousIrql equ [ebp - 4] ; previous IRQL
+KseStackSize = 4 * 1
+
+; equates for arguments
+
+KseInterrupt equ [ebp +8]
+KseSynchronizeRoutine equ [ebp + 12]
+KseSynchronizeContext equ [ebp + 16]
+
+ push ebp ; save ebp
+ mov ebp, esp ; (ebp)-> base of local variable frame
+ sub esp, KseStackSize ; allocate local variables space
+ push ebx ; save ebx
+ push esi ; save esi
+
+; Acquire the associated spin lock and raise IRQL to the interrupting source.
+
+ mov ebx, KseInterrupt ; (ebx)->interrupt object
+
+ mov ecx, InSynchronizeIrql[ebx] ; (ecx) = Synchronize Irql
+ fstCall KfRaiseIrql
+ mov KsePreviousIrql, al
+
+kse10: mov esi,[ebx+InActualLock] ; (esi)->Spin lock variable
+ ACQUIRE_SPINLOCK esi,<short kse20>
+
+; Call specified routine passing the specified context parameter.
+ mov eax,KseSynchronizeContext
+ push eax
+ call KseSynchronizeRoutine
+ mov ebx, eax ; save function returned value
+
+; Unlock spin lock, lower IRQL to its previous level, and return the value
+; returned by the specified routine.
+
+ RELEASE_SPINLOCK esi
+
+ mov ecx, KsePreviousIrql
+ fstCall KfLowerIrql
+
+ mov eax, ebx ; (eax) = returned value
+ pop esi ; restore esi
+ pop ebx ; restore ebx
+ leave ; will clear stack
+ stdRET _KeSynchronizeExecution
+
+; Lock is already owned; spin until free and then attempt to acquire lock
+; again.
+
+ifndef NT_UP
+kse20: SPIN_ON_SPINLOCK esi,<short kse10>,,DbgMp
+endif
+
+stdENDP _KeSynchronizeExecution
+
+ page ,132
+ subttl "Chained Dispatch"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt being generated
+; via a vector that is connected to more than one interrupt object.
+;
+; Arguments:
+;
+; edi - Supplies a pointer to the interrupt object.
+; esp - Supplies a pointer to the top of trap frame
+; ebp - Supplies a pointer to the top of trap frame
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+
+align 16
+cPublicProc _KiChainedDispatch ,0
+.FPO (2, 0, 0, 0, 0, 1)
+
+;
+; update statistic
+;
+
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+
+;
+; set ebp to the top of trap frame. We don't need to save ebp because
+; it is saved in trap frame already.
+;
+
+ mov ebp, esp ; (ebp)->trap frame
+
+;
+; Save previous IRQL and set new priority level
+;
+
+ mov eax, [edi].InVector ; save vector
+ push eax
+ sub esp, 4 ; make room for OldIrql
+ mov ecx, [edi].InIrql ; Irql
+
+;
+; esp - pointer to OldIrql
+; eax - vector
+; ecx - Irql
+;
+
+ stdCall _HalBeginSystemInterrupt, <ecx, eax, esp>
+ or eax, eax ; check for spurious int.
+ jz kid_spuriousinterrupt
+
+ stdCall _KiChainedDispatch2ndLvl
+
+ INTERRUPT_EXIT ; will do an iret
+
+stdENDP _KiChainedDispatch
+
+
+ page ,132
+ subttl "Chained Dispatch 2nd Level"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt being generated
+; via a vector that is either connected to more than one interrupt object,
+; or is being 2nd level dispatched. Its function is to walk the list
+; of connected interrupt objects and call each interrupt service routine.
+; If the mode of the interrupt is latched, then a complete traversal of
+; the chain must be performed. If any of the routines require saving the
+; floating point machine state, then it is only saved once.
+;
+; Arguments:
+;
+; edi - Supplies a pointer to the interrupt object.
+;
+; Return Value:
+;
+; None.
+; Uses all registers
+;
+;--
+
+
+public _KiInterruptDispatch2ndLvl@0
+_KiInterruptDispatch2ndLvl@0:
+ nop
+
+cPublicProc _KiChainedDispatch2ndLvl,0
+cPublicFpo 0, 3
+
+ push ebp
+ sub esp, 8 ; Make room for scratch value
+ xor ebp, ebp ; init (ebp) = Interrupthandled = FALSE
+ lea ebx, [edi].InInterruptListEntry
+ ; (ebx)->Interrupt Head List
+
+;
+; Walk the list of connected interrupt objects and call the appropriate dispatch
+; routine.
+;
+
+kcd40:
+
+;
+; Raise irql level to the SynchronizeIrql level if it is not equal to current
+; irql.
+;
+
+ mov cl, [edi+InIrql] ; [cl] = Current Irql
+ mov esi,[edi+InActualLock]
+ cmp [edi+InSynchronizeIrql], cl ; Is SyncIrql > current IRQL?
+ je short kcd50 ; if e, no, go kcd50
+
+;
+; Arg2 esp : Address of OldIrql
+; Arg1 eax : Irql to raise to
+;
+
+ mov ecx, [edi+InSynchronizeIrql] ; (eax) = Irql to raise to
+ fstCall KfRaiseIrql
+ mov [esp], eax ; Save OldIrql
+
+
+;
+; Acquire the service routine spin lock and call the service routine.
+;
+
+kcd50:
+ ACQUIRE_SPINLOCK esi,kcd110
+if DBG
+ mov eax, _KeTickCount ; Grab ISR start time
+ mov [esp+4], eax ; save to local varible
+endif
+ mov eax, InServiceContext[edi] ; set parameter value
+ push eax
+ push edi ; pointer to interrupt object
+ call InServiceRoutine[edi] ; call specified routine
+
+if DBG
+ mov ecx, [esp+4] ; (ecx) = time isr started
+ add ecx, _KiISRTimeout ; adjust for timeout
+ cmp _KeTickCount, ecx ; Did ISR timeout?
+ jnc kcd200
+kcd51:
+endif
+
+;
+; Release the service routine spin lock and check to determine if end of loop.
+;
+
+ RELEASE_SPINLOCK esi
+
+;
+; Lower IRQL to earlier level if we raised it to SynchronizedLevel.
+;
+
+ mov cl, [edi+InIrql]
+ cmp [edi+InSynchronizeIrql], cl ; Is SyncIrql > current IRQL?
+ je short kcd55 ; if e, no, go kcd55
+
+ mov esi, eax ; save ISR returned value
+
+;
+; Arg1 : Irql to Lower to
+;
+
+ mov ecx, [esp]
+ fstCall KfLowerIrql
+
+ mov eax, esi ; [eax] = ISR returned value
+kcd55:
+ or al,al ; Is interrupt handled?
+ je short kcd60 ; if eq, interrupt not handled
+ cmp word ptr InMode[edi], InLevelSensitive
+ je short kcd70 ; if eq, level sensitive interrupt
+
+ mov ebp, eax ; else edge shared int is handled. Remember it.
+kcd60: mov edi, [edi].InInterruptListEntry
+ ; (edi)->next obj's addr of listentry
+ cmp ebx, edi ; Are we at end of interrupt list?
+ je short kcd65 ; if eq, reach end of list
+ sub edi, InInterruptListEntry; (edi)->addr of next interrupt object
+ jmp kcd40
+
+kcd65:
+;
+; If this is edge shared interrupt, we need to loop till no one handle the
+; interrupt. In theory only shared edge triggered interrupts come here.
+;
+
+ sub edi, InInterruptListEntry; (edi)->addr of next interrupt object
+ cmp word ptr InMode[edi], InLevelSensitive
+ je short kcd70 ; if level, exit. No one handle the interrupt?
+
+ test ebp, 0fh ; does anyone handle the interrupt?
+ je short kcd70 ; if e, no one, we can exit.
+
+ xor ebp, ebp ; init local var to no one handle the int
+ jmp kcd40 ; restart the loop.
+
+;
+; Either the interrupt is level sensitive and has been handled or the end of
+; the interrupt object chain has been reached.
+;
+
+; restore frame pointer, and deallocate trap frame.
+
+kcd70:
+ add esp, 8 ; clear local variable space
+ pop ebp
+ stdRet _KiChainedDispatch2ndLvl
+
+
+; Service routine Lock is currently owned, spin until free and then
+; attempt to acquire lock again.
+
+ifndef NT_UP
+kcd110: SPIN_ON_SPINLOCK esi, kcd50,,DbgMp
+endif
+
+;
+; ISR took a long time to complete, abort to debugger
+;
+
+if DBG
+kcd200: push eax ; save return code
+ push InServiceRoutine[edi]
+ push offset FLAT:_MsgISRTimeout
+ call _DbgPrint
+ add esp,8
+ pop eax
+ int 3
+ jmp kcd51 ; continue
+endif
+
+stdENDP _KiChainedDispatch2ndLvl
+
+
+ page ,132
+ subttl "Floating Dispatch"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt being generated
+; via a vector that is connected to an interrupt object. Its function is
+; to save the machine state and floating state and then call the specified
+; interrupt service routine.
+;
+; Arguments:
+;
+; edi - Supplies a pointer to the interrupt object.
+; esp - Supplies a pointer to the top of trap frame
+; ebp - Supplies a pointer to the top of trap frame
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicProc _KiFloatingDispatch ,0
+.FPO (2, 0, 0, 0, 0, 1)
+
+;
+; update statistic
+;
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+
+; set ebp to the top of trap frame. We don't need to save ebp because
+; it is saved in trap frame already.
+;
+
+ mov ebp, esp ; (ebp)->trap frame
+
+;
+; Save previous IRQL and set new priority level to interrupt obj's SyncIrql
+;
+ mov eax, [edi].InVector
+ mov ecx, [edi].InSynchronizeIrql ; Irql
+ push eax ; save vector
+ sub esp, 4 ; make room for OldIrql
+
+; arg3 - ptr to OldIrql
+; arg2 - vector
+; arg1 - Irql
+ stdCall _HalBeginSystemInterrupt, <ecx, eax, esp>
+
+ or eax, eax ; check for spurious int.
+ jz kid_spuriousinterrupt
+
+;
+; Acquire the service routine spin lock and call the service routine.
+;
+
+kfd30: mov esi,[edi+InActualLock]
+ ACQUIRE_SPINLOCK esi,kfd100
+
+if DBG
+ mov ebx, _KeTickCount ; Grab current tick time
+endif
+ mov eax, InServiceContext[edi] ; set parameter value
+ push eax
+ push edi ; pointer to interrupt object
+ call InServiceRoutine[edi] ; call specified routine
+if DBG
+ add ebx, _KiISRTimeout ; adjust for ISR timeout
+ cmp _KeTickCount, ebx ; Did ISR timeout?
+ jnc kfd200
+kfd31:
+endif
+
+;
+; Release the service routine spin lock.
+;
+
+ RELEASE_SPINLOCK esi
+
+;
+; Do interrupt exit processing
+;
+ INTERRUPT_EXIT ; will do an iret
+
+;
+; Service routine Lock is currently owned; spin until free and
+; then attempt to acquire lock again.
+;
+
+ifndef NT_UP
+kfd100: SPIN_ON_SPINLOCK esi,kfd30,,DbgMp
+endif
+
+;
+; ISR took a long time to complete, abort to debugger
+;
+
+if DBG
+kfd200: push InServiceRoutine[edi] ; timed out
+ push offset FLAT:_MsgISRTimeout
+ call _DbgPrint
+ add esp,8
+ int 3
+ jmp kfd31 ; continue
+endif
+
+stdENDP _KiFloatingDispatch
+
+ page ,132
+ subttl "Interrupt Dispatch"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt being generated
+; via a vector that is connected to an interrupt object. Its function is
+; to directly call the specified interrupt service routine.
+;
+; Arguments:
+;
+; edi - Supplies a pointer to the interrupt object.
+; esp - Supplies a pointer to the top of trap frame
+; ebp - Supplies a pointer to the top of trap frame
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicProc _KiInterruptDispatch ,0
+.FPO (2, 0, 0, 0, 0, 1)
+
+;
+; update statistic
+;
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+
+;
+; set ebp to the top of trap frame. We don't need to save ebp because
+; it is saved in trap frame already.
+;
+
+ mov ebp, esp ; (ebp)->trap frame
+
+;
+; Save previous IRQL and set new priority level
+;
+ mov eax, [edi].InVector ; save vector
+ mov ecx, [edi].InSynchronizeIrql ; Irql to raise to
+ push eax
+ sub esp, 4 ; make room for OldIrql
+
+ stdCall _HalBeginSystemInterrupt,<ecx, eax, esp>
+
+ or eax, eax ; check for spurious int.
+ jz kid_spuriousinterrupt
+
+;
+; Acquire the service routine spin lock and call the service routine.
+;
+
+kid30: mov esi,[edi+InActualLock]
+ ACQUIRE_SPINLOCK esi,kid100
+if DBG
+ mov ebx, [edi].InVector ; this vector
+ mov eax, _KeTickCount ; current time
+ and eax, NOT 31 ; mask to closest 1/2 second
+ shl ebx, 3 ; eax = eax * 8
+ cmp eax, [KiInterruptCounts+ebx] ; in same 1/2 range?
+ jne kid_overflowreset
+
+ mov eax, _KiISROverflow
+ inc [KiInterruptCounts+ebx+4]
+ cmp [KiInterruptCounts+ebx+4], eax
+ jnc kid_interruptoverflow
+kid_dbg2:
+ mov ebx, _KeTickCount
+endif
+ mov eax, InServiceContext[edi] ; set parameter value
+ push eax
+ push edi ; pointer to interrupt object
+ call InServiceRoutine[edi] ; call specified routine
+
+if DBG
+ add ebx, _KiISRTimeout ; adjust for ISR timeout
+ cmp _KeTickCount, ebx ; Did ISR timeout?
+ jnc kid200
+kid31:
+endif
+
+;
+; Release the service routine spin lock, retrieve the return address,
+; deallocate stack storage, and return.
+;
+
+ RELEASE_SPINLOCK esi
+
+;
+; Do interrupt exit processing
+;
+
+ INTERRUPT_EXIT ; will do an iret
+
+ add esp, 8 ; clean stack
+
+kid_spuriousinterrupt:
+ add esp, 8 ; Irql wasn't raised, exit interrupt
+ SPURIOUS_INTERRUPT_EXIT ; without eoi or lower irql
+
+;
+; Lock is currently owned; spin until free and then attempt to acquire
+; lock again.
+;
+
+ifndef NT_UP
+kid100: SPIN_ON_SPINLOCK esi,kid30,,DbgMp
+endif
+
+;
+; ISR took a long time to complete, abort to debugger
+;
+
+if DBG
+kid200: push InServiceRoutine[edi] ; timed out
+ push offset FLAT:_MsgISRTimeout
+ call _DbgPrint
+ add esp,8
+ int 3
+ jmp kid31 ; continue
+
+kid_interruptoverflow:
+ push [KiInterruptCounts+ebx+4]
+ push InServiceRoutine[edi]
+ push offset FLAT:_MsgISROverflow
+ call _DbgPrint
+ add esp,12
+ int 3
+
+ mov eax, _KeTickCount ; current time
+ and eax, NOT 31 ; mask to closest 1/2 second
+
+kid_overflowreset:
+ mov [KiInterruptCounts+ebx], eax ; initialize time
+ mov [KiInterruptCounts+ebx+4], 0 ; reset count
+ jmp kid_dbg2
+endif
+
+
+
+stdENDP _KiInterruptDispatch
+
+ page ,132
+ subttl "Interrupt Template"
+;++
+;
+; Routine Description:
+;
+; This routine is a template that is copied into each interrupt object. Its
+; function is to save machine state and pass the address of the respective
+; interrupt object and transfer control to the appropriate interrupt
+; dispatcher.
+;
+; Control comes here through i386 interrupt gate and, upon entry, the
+; interrupt is disabled.
+;
+; Note: If the length of this template changed, the corresponding constant
+; defined in Ki.h needs to be updated accordingly.
+;
+; Arguments:
+;
+; None
+;
+; Return Value:
+;
+; edi - addr of interrupt object
+; esp - top of trap frame
+; interrupts are disabled
+;
+;--
+
+_KiShutUpAssembler proc
+
+ public _KiInterruptTemplate
+_KiInterruptTemplate label byte
+
+; Save machine state on trap frame
+
+ ENTER_INTERRUPT kit_a, kit_t
+
+;
+; the following instruction gets the addr of associated interrupt object.
+; the value ? will be replaced by REAL interrupt object address at
+; interrupt object initialization time.
+; mov edi, addr of interrupt object
+
+ public _KiInterruptTemplate2ndDispatch
+_KiInterruptTemplate2ndDispatch equ this dword
+ db MI_MOVEDI ; MOV EDI opcode
+
+ public _KiInterruptTemplateObject
+_KiInterruptTemplateObject equ this dword
+ dd ? ; addr of interrupt object
+
+; the following instruction transfers control to the appropriate dispatcher
+; code. The value ? will be replaced by real InterruptObj.DispatchAddr
+; at interrupt initialization time. The dispatcher routine will be any one
+; of _KiInterruptDispatch, _KiFloatingDispatch, or _KiChainDispatch.
+; jmp [IntObj.DispatchAddr]
+
+ db MI_DIRECTJMP ; indirect near jump opcode
+
+ public _KiInterruptTemplateDispatch
+_KiInterruptTemplateDispatch equ this dword
+ dd -5 ; addr of IntObj.DispatchAddr
+
+ ENTER_DR_ASSIST kit_a, kit_t
+
+; end of _KiInterruptTemplate
+
+if ($ - _KiInterruptTemplate) GT DISPATCH_LENGTH
+ .err
+ %out <InterruptTemplate greater than dispatch_length>
+endif
+
+_KiShutUpAssembler endp
+
+ page ,132
+ subttl "Unexpected Interrupt"
+;++
+;
+; Routine Description:
+;
+; This routine is entered as the result of an interrupt being generated
+; via a vector that is not connected to an interrupt object.
+;
+; For any unconnected vector, its associated 8259 irq is masked out at
+; Initialization time. So, this routine should NEVER be called.
+; If somehow, this routine gets control we simple raise a BugCheck and
+; stop the system.
+;
+; Arguments:
+;
+; None
+; Interrupt is disabled
+;
+; Return Value:
+;
+; None.
+;
+;--
+ public _KiUnexpectedInterrupt
+_KiUnexpectedInterrupt proc
+cPublicFpo 0,0
+
+; stop the system
+ stdCall _KeBugCheck, <TRAP_CAUSE_UNKNOWN>
+ nop
+
+_KiUnexpectedInterrupt endp
+
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/iopm.c b/private/ntos/ke/i386/iopm.c
new file mode 100644
index 000000000..c6e7260e2
--- /dev/null
+++ b/private/ntos/ke/i386/iopm.c
@@ -0,0 +1,529 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ iopm.c
+
+Abstract:
+
+ This module implements interfaces that support manipulation of i386
+ i/o access maps (IOPMs).
+
+ These entry points only exist on i386 machines.
+
+Author:
+
+ Bryan M. Willman (bryanwi) 18-Sep-91
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Our notion of alignment is different, so force use of ours
+//
+
+#undef ALIGN_UP
+#undef ALIGN_DOWN
+#define ALIGN_DOWN(address,amt) ((ULONG)(address) & ~(( amt ) - 1))
+#define ALIGN_UP(address,amt) (ALIGN_DOWN( (address + (amt) - 1), (amt) ))
+
+//
+// Note on synchronization:
+//
+// IOPM edits are always done by code running at synchronization level on
+// the processor whose TSS (map) is being edited.
+//
+// IOPM only affects user mode code. User mode code can never interrupt
+// synchronization level code, therefore, edits and user code never race.
+//
+// Likewise, switching from one map to another occurs on the processor
+// for which the switch is being done by IPI_LEVEL code. The active
+// map could be switched in the middle of an edit of some map, but
+// the edit will always complete before any user code gets run on that
+// processor, therefore, there is no race.
+//
+// Multiple simultaneous calls to Ke386SetIoAccessMap *could* produce
+// weird mixes. Therefore, KiIopmLock must be acquired to
+// globally serialize edits.
+//
+
+//
+// Define forward referenced function prototypes.
+//
+
+VOID
+KiSetIoMap(
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID MapSource,
+ IN PVOID MapNumber,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiLoadIopmOffset(
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+BOOLEAN
+Ke386SetIoAccessMap (
+ ULONG MapNumber,
+ PKIO_ACCESS_MAP IoAccessMap
+ )
+
+/*++
+
+Routine Description:
+
+ The specified i/o access map will be set to match the
+ definition specified by IoAccessMap (i.e. enable/disable
+ those ports) before the call returns. The change will take
+ effect on all processors.
+
+ Ke386SetIoAccessMap does not give any process enhanced I/O
+ access, it merely defines a particular access map.
+
+Arguments:
+
+ MapNumber - Number of access map to set. Map 0 is fixed.
+
+ IoAccessMap - Pointer to bitvector (64K bits, 8K bytes) which
+ defines the specified access map. Must be in
+ non-paged pool.
+
+Return Value:
+
+ TRUE if successful. FALSE if failure (attempt to set a map
+ which does not exist, attempt to set map 0)
+
+--*/
+
+{
+
+ PKPROCESS CurrentProcess;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ PVOID pt;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Reject illegal requests
+ //
+
+ if ((MapNumber > IOPM_COUNT) || (MapNumber == IO_ACCESS_MAP_NONE)) {
+ return FALSE;
+ }
+
+ //
+ // Acquire the context swap lock so a context switch will not occur.
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+ //
+ // Compute set of active processors other than this one, if non-empty
+ // IPI them to set their maps.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+
+#if !defined(NT_UP)
+
+ TargetProcessors = KeActiveProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSetIoMap,
+ IoAccessMap,
+ (PVOID)MapNumber,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Copy the IOPM map and load the map for the current process.
+ //
+
+ pt = &(KiPcr()->TSS->IoMaps[MapNumber-1].IoMap);
+ RtlMoveMemory(pt, (PVOID)IoAccessMap, IOPM_SIZE);
+ CurrentProcess = Prcb->CurrentThread->ApcState.Process;
+ KiPcr()->TSS->IoMapBase = CurrentProcess->IopmOffset;
+
+ //
+ // Wait until all of the target processors have finished copying the
+ // new map.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Restore IRQL and unlock the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ return TRUE;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiSetIoMap(
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID MapSource,
+ IN PVOID MapNumber,
+ IN PVOID Parameter3
+ )
+/*++
+
+Routine Description:
+
+ copy the specified map into this processor's TSS.
+ This procedure runs at IPI level.
+
+Arguments:
+
+ Argument - actually a pointer to a KIPI_SET_IOPM structure
+ ReadyFlag - pointer to flag to set once setiopm has completed
+
+Return Value:
+
+ none
+
+--*/
+
+{
+
+ PKPROCESS CurrentProcess;
+ PKPRCB Prcb;
+ PVOID pt;
+
+ //
+ // Copy the IOPM map and load the map for the current process.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ pt = &(KiPcr()->TSS->IoMaps[((ULONG) MapNumber)-1].IoMap);
+ RtlMoveMemory(pt, MapSource, IOPM_SIZE);
+ CurrentProcess = Prcb->CurrentThread->ApcState.Process;
+ KiPcr()->TSS->IoMapBase = CurrentProcess->IopmOffset;
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+#endif
+
+
+BOOLEAN
+Ke386QueryIoAccessMap (
+ ULONG MapNumber,
+ PKIO_ACCESS_MAP IoAccessMap
+ )
+
+/*++
+
+Routine Description:
+
+ The specified i/o access map will be dumped into the buffer.
+ map 0 is a constant, but will be dumped anyway.
+
+Arguments:
+
+ MapNumber - Number of access map to set. map 0 is fixed.
+
+ IoAccessMap - Pointer to buffer (64K bits, 8K bytes) which
+ is to receive the definition of the access map.
+ Must be in non-paged pool.
+
+Return Value:
+
+ TRUE if successful. FALSE if failure (attempt to query a map
+ which does not exist)
+
+--*/
+
+{
+
+ ULONG i;
+ PVOID Map;
+ KIRQL OldIrql;
+ PUCHAR p;
+
+ //
+ // Reject illegal requests
+ //
+
+ if (MapNumber > IOPM_COUNT) {
+ return FALSE;
+ }
+
+ //
+ // Acquire the context swap lock so a context switch will not occur.
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+ //
+ // Copy out the map
+ //
+
+ if (MapNumber == IO_ACCESS_MAP_NONE) {
+
+ //
+ // no access case, simply return a map of all 1s
+ //
+
+ p = (PUCHAR)IoAccessMap;
+ for (i = 0; i < IOPM_SIZE; i++) {
+ p[i] = (UCHAR)-1;
+ }
+
+ } else {
+
+ //
+ // normal case, just copy the bits
+ //
+
+ Map = (PVOID)&(KiPcr()->TSS->IoMaps[MapNumber-1].IoMap);
+ RtlMoveMemory((PVOID)IoAccessMap, Map, IOPM_SIZE);
+ }
+
+ //
+ // Restore IRQL and unlock the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ return TRUE;
+}
+
+
+BOOLEAN
+Ke386IoSetAccessProcess (
+ PKPROCESS Process,
+ ULONG MapNumber
+ )
+/*++
+
+Routine Description:
+
+ Set the i/o access map which controls user mode i/o access
+ for a particular process.
+
+Arguments:
+
+ Process - Pointer to kernel process object describing the
+ process which for which a map is to be set.
+
+ MapNumber - Number of the map to set. Value of map is
+ defined by Ke386IoSetAccessProcess. Setting MapNumber
+ to IO_ACCESS_MAP_NONE will disallow any user mode i/o
+ access from the process.
+
+Return Value:
+
+ TRUE if success, FALSE if failure (illegal MapNumber)
+
+--*/
+
+{
+
+ USHORT MapOffset;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Reject illegal requests
+ //
+
+ if (MapNumber > IOPM_COUNT) {
+ return FALSE;
+ }
+
+ MapOffset = KiComputeIopmOffset(MapNumber);
+
+ //
+ // Acquire the context swap lock so a context switch will not occur.
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+ //
+ // Store new offset in process object, compute current set of
+ // active processors for process, if this cpu is one, set IOPM.
+ //
+
+ Process->IopmOffset = MapOffset;
+
+ TargetProcessors = Process->ActiveProcessors;
+ Prcb = KeGetCurrentPrcb();
+ if (TargetProcessors & Prcb->SetMember) {
+ KiPcr()->TSS->IoMapBase = MapOffset;
+ }
+
+ //
+ // Compute set of active processors other than this one, if non-empty
+ // IPI them to load their IOPMs, wait for them.
+ //
+
+#if !defined(NT_UP)
+
+ TargetProcessors = TargetProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiLoadIopmOffset,
+ NULL,
+ NULL,
+ NULL);
+
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Restore IRQL and unlock the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ return TRUE;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiLoadIopmOffset(
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ Edit IopmBase of Tss to match that of currently running process.
+
+Arguments:
+
+ Argument - actually a pointer to a KIPI_LOAD_IOPM_OFFSET structure
+ ReadyFlag - Pointer to flag to be set once we are done
+
+Return Value:
+
+ none
+
+--*/
+
+{
+
+ PKPROCESS CurrentProcess;
+ PKPRCB Prcb;
+
+ //
+ // Update IOPM field in TSS from current process
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ CurrentProcess = Prcb->CurrentThread->ApcState.Process;
+ KiPcr()->TSS->IoMapBase = CurrentProcess->IopmOffset;
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+#endif
+
+
+VOID
+Ke386SetIOPL(
+ IN PKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ Gives IOPL to the specified process.
+
+ All threads created from this point on will get IOPL. The current
+ process will get IOPL. Must be called from context of thread and
+ process that are to have IOPL.
+
+ Iopl (to be made a boolean) in KPROCESS says all
+ new threads to get IOPL.
+
+ Iopl (to be made a boolean) in KTHREAD says given
+ thread to get IOPL.
+
+ N.B. If a kernel mode only thread calls this procedure, the
+ result is (a) poinless and (b) will break the system.
+
+Arguments:
+
+ Process - Pointer to the process == IGNORED!!!
+
+Return Value:
+
+ none
+
+--*/
+
+{
+
+ PKTHREAD Thread;
+ PKPROCESS Process2;
+ PKTRAP_FRAME TrapFrame;
+ CONTEXT Context;
+
+ //
+ // get current thread and Process2, set flag for IOPL in both of them
+ //
+
+ Thread = KeGetCurrentThread();
+ Process2 = Thread->ApcState.Process;
+
+ Process2->Iopl = 1;
+ Thread->Iopl = 1;
+
+ //
+ // Force IOPL to be on for current thread
+ //
+
+ TrapFrame = (PKTRAP_FRAME)((PUCHAR)Thread->InitialStack -
+ ALIGN_UP(sizeof(KTRAP_FRAME),KTRAP_FRAME_ALIGN) -
+ sizeof(FLOATING_SAVE_AREA));
+
+ Context.ContextFlags = CONTEXT_CONTROL;
+ KeContextFromKframes(TrapFrame,
+ NULL,
+ &Context);
+
+ Context.EFlags |= (EFLAGS_IOPL_MASK & -1); // IOPL == 3
+
+ KeContextToKframes(TrapFrame,
+ NULL,
+ &Context,
+ CONTEXT_CONTROL,
+ UserMode);
+
+ return;
+}
diff --git a/private/ntos/ke/i386/kernlini.c b/private/ntos/ke/i386/kernlini.c
new file mode 100644
index 000000000..cce9b5248
--- /dev/null
+++ b/private/ntos/ke/i386/kernlini.c
@@ -0,0 +1,1581 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ kernlini.c
+
+Abstract:
+
+ This module contains the code to initialize the kernel data structures
+ and to initialize the idle thread, its process, and the processor control
+ block.
+
+ For the i386, it also contains code to initialize the PCR.
+
+Author:
+
+ David N. Cutler (davec) 21-Apr-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ 24-Jan-1990 shielin
+
+ Changed for NT386
+
+ 20-Mar-1990 bryanwi
+
+ Added KiInitializePcr
+
+--*/
+
+#include "ki.h"
+#include "ki386.h"
+
+#define TRAP332_GATE 0xEF00
+
+VOID
+KiSetProcessorType(
+ VOID
+ );
+
+VOID
+KiSetCR0Bits(
+ VOID
+ );
+
+BOOLEAN
+KiIsNpxPresent(
+ VOID
+ );
+
+VOID
+KiInitializeDblFaultTSS(
+ IN PKTSS Tss,
+ IN ULONG Stack,
+ IN PKGDTENTRY TssDescriptor
+ );
+
+VOID
+KiInitializeTSS2 (
+ IN PKTSS Tss,
+ IN PKGDTENTRY TssDescriptor
+ );
+
+VOID
+KiSwapIDT (
+ VOID
+ );
+
+VOID
+KeSetup80387OrEmulate (
+ IN PVOID *R3EmulatorTable
+ );
+
+ULONG
+KiGetFeatureBits (
+ VOID
+ );
+
+NTSTATUS
+KiMoveRegTree(
+ HANDLE Source,
+ HANDLE Dest
+ );
+
+VOID
+Ki386EnableGlobalPage (
+ IN volatile PLONG Number
+ );
+
+BOOLEAN
+KiInitMachineDependent (
+ VOID
+ );
+
+VOID
+KiInitializeMTRR (
+ IN BOOLEAN LastProcessor
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,KiInitializeKernel)
+#pragma alloc_text(INIT,KiInitializePcr)
+#pragma alloc_text(INIT,KiInitializeDblFaultTSS)
+#pragma alloc_text(INIT,KiInitializeTSS2)
+#pragma alloc_text(INIT,KiSwapIDT)
+#pragma alloc_text(INIT,KeSetup80387OrEmulate)
+#pragma alloc_text(INIT,KiGetFeatureBits)
+#pragma alloc_text(INIT,KiMoveRegTree)
+#pragma alloc_text(INIT,KiInitMachineDependent)
+#endif
+
+
+#if 0
+PVOID KiTrap08;
+#endif
+
+extern PVOID Ki387RoundModeTable;
+extern PVOID Ki386IopmSaveArea;
+extern ULONG KeI386ForceNpxEmulation;
+extern WCHAR CmDisabledFloatingPointProcessor[];
+extern UCHAR CmpCyrixID[];
+
+#define CPU_NONE 0
+#define CPU_INTEL 1
+#define CPU_AMD 2
+#define CPU_CYRIX 3
+
+
+
+
+//
+// Profile vars
+//
+
+extern KIDTENTRY IDT[];
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function gains control after the system has been bootstrapped and
+ before the system has been initialized. Its function is to initialize
+ the kernel data structures, initialize the idle thread and process objects,
+ initialize the processor control block, call the executive initialization
+ routine, and then return to the system startup routine. This routine is
+ also called to initialize the processor specific structures when a new
+ processor is brought on line.
+
+Arguments:
+
+ Process - Supplies a pointer to a control object of type process for
+ the specified processor.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread for
+ the specified processor.
+
+ IdleStack - Supplies a pointer the base of the real kernel stack for
+ idle thread on the specified processor.
+
+ Prcb - Supplies a pointer to a processor control block for the specified
+ processor.
+
+ Number - Supplies the number of the processor that is being
+ initialized.
+
+ LoaderBlock - Supplies a pointer to the loader parameter block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#define INITIAL_KERNEL_STACK_SIZE (((sizeof(FLOATING_SAVE_AREA)+KTRAP_FRAME_LENGTH+KTRAP_FRAME_ROUND) & ~KTRAP_FRAME_ROUND)/sizeof(ULONG))+1
+
+ ULONG KernelStack[INITIAL_KERNEL_STACK_SIZE];
+ LONG Index;
+ ULONG DirectoryTableBase[2];
+ KIRQL OldIrql;
+ PKPCR Pcr;
+ BOOLEAN NpxFlag;
+ ULONG FeatureBits;
+
+ KiSetProcessorType();
+ KiSetCR0Bits();
+ NpxFlag = KiIsNpxPresent();
+
+ Pcr = KeGetPcr();
+
+ //
+ // Initialize DPC listhead and lock.
+ //
+
+ InitializeListHead(&Prcb->DpcListHead);
+ KeInitializeSpinLock(&Prcb->DpcLock);
+ Prcb->DpcRoutineActive = 0;
+ Prcb->DpcQueueDepth = 0;
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+
+ //
+ // Check for unsupported processor revision
+ //
+
+ if (Prcb->CpuType == 3) {
+ KeBugCheckEx(UNSUPPORTED_PROCESSOR,0x386,0,0,0);
+ }
+
+ //
+ // If the initial processor is being initialized, then initialize the
+ // per system data structures.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initial setting for global Cpu & Stepping levels
+ //
+
+ KeI386NpxPresent = NpxFlag;
+ KeI386CpuType = Prcb->CpuType;
+ KeI386CpuStep = Prcb->CpuStep;
+
+ KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_INTEL;
+ KeProcessorLevel = (USHORT)Prcb->CpuType;
+ if (Prcb->CpuID == 0) {
+ KeProcessorRevision = 0xFF00 |
+ (((Prcb->CpuStep >> 4) + 0xa0 ) & 0x0F0) |
+ (Prcb->CpuStep & 0xf);
+ } else {
+ KeProcessorRevision = Prcb->CpuStep;
+ }
+
+ KeFeatureBits = KiGetFeatureBits();
+
+ //
+ // If cmpxchg8b was available at boot, verify its still available
+ //
+
+ if ((KiBootFeatureBits & KF_CMPXCHG8B) && !(KeFeatureBits & KF_CMPXCHG8B)) {
+ KeBugCheckEx (MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED, KF_CMPXCHG8B, 0, 0, 0);
+ }
+
+ //
+ // Lower IRQL to APC level.
+ //
+
+ KeLowerIrql(APC_LEVEL);
+
+
+ //
+ // Initialize kernel internal spinlocks
+ //
+
+ KeInitializeSpinLock(&KiContextSwapLock);
+ KeInitializeSpinLock(&KiDispatcherLock);
+ KeInitializeSpinLock(&KiFreezeExecutionLock);
+
+
+ //
+ // Performance architecture independent initialization.
+ //
+
+ KiInitSystem();
+
+ //
+ // Initialize idle thread process object and then set:
+ //
+ // 1. all the quantum values to the maximum possible.
+ // 2. the process in the balance set.
+ // 3. the active processor mask to the specified process.
+ //
+
+ DirectoryTableBase[0] = 0;
+ DirectoryTableBase[1] = 0;
+ KeInitializeProcess(Process,
+ (KPRIORITY)0,
+ (KAFFINITY)(0xffffffff),
+ &DirectoryTableBase[0],
+ FALSE);
+
+ Process->ThreadQuantum = MAXCHAR;
+
+ } else {
+
+ FeatureBits = KiGetFeatureBits();
+
+ //
+ // Adjust global cpu setting to represent lowest of all processors
+ //
+
+ if (NpxFlag != KeI386NpxPresent) {
+ //
+ // NPX support must be available on all processors or on none
+ //
+
+ KeBugCheckEx (MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED, 0x387, 0, 0, 0);
+ }
+
+ if ((ULONG)(Prcb->CpuType) != KeI386CpuType) {
+
+ if ((ULONG)(Prcb->CpuType) < KeI386CpuType) {
+
+ //
+ // What is the lowest CPU type
+ //
+
+ KeI386CpuType = (ULONG)Prcb->CpuType;
+ KeProcessorLevel = (USHORT)Prcb->CpuType;
+ }
+ }
+
+ if ((KiBootFeatureBits & KF_CMPXCHG8B) && !(FeatureBits & KF_CMPXCHG8B)) {
+ //
+ // cmpxchg8b must be available on all processors, if installed at boot
+ //
+
+ KeBugCheckEx (MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED, KF_CMPXCHG8B, 0, 0, 0);
+ }
+
+ if ((KeFeatureBits & KF_GLOBAL_PAGE) && !(FeatureBits & KF_GLOBAL_PAGE)) {
+ //
+ // Global page support must be available on all processors, if on boot processor
+ //
+
+ KeBugCheckEx (MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED, KF_GLOBAL_PAGE, 0, 0, 0);
+ }
+
+ //
+ // Use lowest stepping value
+ //
+
+ if (Prcb->CpuStep < KeI386CpuStep) {
+ KeI386CpuStep = Prcb->CpuStep;
+ if (Prcb->CpuID == 0) {
+ KeProcessorRevision = 0xFF00 |
+ ((Prcb->CpuStep >> 8) + 'A') |
+ (Prcb->CpuStep & 0xf);
+ } else {
+ KeProcessorRevision = Prcb->CpuStep;
+ }
+ }
+
+ //
+ // Use subset of all NT feature bits available on each processor
+ //
+
+ KeFeatureBits &= FeatureBits;
+
+ //
+ // Lower IRQL to DISPATCH level.
+ //
+
+ KeLowerIrql(DISPATCH_LEVEL);
+
+ }
+
+ //
+ // Update processor features
+ //
+
+ SharedUserData->ProcessorFeatures[PF_MMX_INSTRUCTIONS_AVAILABLE] =
+ (KeFeatureBits & KF_MMX) ? TRUE : FALSE;
+
+ SharedUserData->ProcessorFeatures[PF_COMPARE_EXCHANGE_DOUBLE] =
+ (KeFeatureBits & KF_CMPXCHG8B) ? TRUE : FALSE;
+
+ //
+ // Initialize idle thread object and then set:
+ //
+ // 1. the initial kernel stack to the specified idle stack.
+ // 2. the next processor number to the specified processor.
+ // 3. the thread priority to the highest possible value.
+ // 4. the state of the thread to running.
+ // 5. the thread affinity to the specified processor.
+ // 6. the specified processor member in the process active processors
+ // set.
+ //
+
+ KeInitializeThread(Thread, (PVOID)&KernelStack[INITIAL_KERNEL_STACK_SIZE],
+ (PKSYSTEM_ROUTINE)NULL, (PKSTART_ROUTINE)NULL,
+ (PVOID)NULL, (PCONTEXT)NULL, (PVOID)NULL, Process);
+ Thread->InitialStack = (PVOID)(((ULONG)IdleStack) &0xfffffff0);
+ Thread->StackBase = Thread->InitialStack;
+ Thread->StackLimit = (PVOID)((ULONG)Thread->InitialStack - KERNEL_STACK_SIZE);
+ Thread->NextProcessor = Number;
+ Thread->Priority = HIGH_PRIORITY;
+ Thread->State = Running;
+ Thread->Affinity = (KAFFINITY)(1<<Number);
+ Thread->WaitIrql = DISPATCH_LEVEL;
+ SetMember(Number, Process->ActiveProcessors);
+
+ //
+ // Initialize the processor block. (Note that some fields have been
+ // initialized at KiInitializePcr().
+ //
+
+ Prcb->CurrentThread = Thread;
+ Prcb->NextThread = (PKTHREAD)NULL;
+ Prcb->IdleThread = Thread;
+ Pcr->NtTib.StackBase = Thread->InitialStack;
+
+ //
+ // The following operations need to be done atomically. So we
+ // grab the DispatcherDatabase.
+ //
+
+ KiAcquireSpinLock(&KiDispatcherLock);
+
+ //
+ // Release DispatcherDatabase
+ //
+
+ KiReleaseSpinLock(&KiDispatcherLock);
+
+ //
+ // call the executive initialization routine.
+ //
+
+ try {
+ ExpInitializeExecutive(Number, LoaderBlock);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ KeBugCheck (PHASE0_EXCEPTION);
+ }
+
+ //
+ // If the initial processor is being initialized, then compute the
+ // timer table reciprocal value and reset the PRCB values for the
+ // controllable DPC behavior in order to reflect any registry
+ // overrides.
+ //
+
+ if (Number == 0) {
+ KiTimeIncrementReciprocal = KiComputeReciprocal((LONG)KeMaximumIncrement,
+ &KiTimeIncrementShiftCount);
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+ }
+
+ //
+ // Allocate 8k IOPM bit map saved area to allow BiosCall swap
+ // bit maps.
+ //
+
+ if (Number == 0) {
+ Ki386IopmSaveArea = ExAllocatePool(PagedPool, PAGE_SIZE * 2);
+ if (Ki386IopmSaveArea == NULL) {
+ KeBugCheck(NO_PAGES_AVAILABLE);
+ }
+ }
+
+ //
+ // Set the priority of the specified idle thread to zero, set appropriate
+ // member in KiIdleSummary and return to the system start up routine.
+ //
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeSetPriorityThread(Thread, (KPRIORITY)0);
+
+ //
+ // if a thread has not been selected to run on the current processors,
+ // check to see if there are any ready threads; otherwise add this
+ // processors to the IdleSummary
+ //
+
+ KiAcquireSpinLock(&KiDispatcherLock);
+ if (Prcb->NextThread == (PKTHREAD)NULL) {
+ SetMember(Number, KiIdleSummary);
+ }
+ KiReleaseSpinLock(&KiDispatcherLock);
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // This processor has initialized
+ //
+
+ LoaderBlock->Prcb = (ULONG)NULL;
+
+ return;
+}
+
+VOID
+KiInitializePcr (
+ IN ULONG Processor,
+ IN PKPCR Pcr,
+ IN PKIDTENTRY Idt,
+ IN PKGDTENTRY Gdt,
+ IN PKTSS Tss,
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the PCR for a processor. It
+ simply stuffs values into the PCR. (The PCR is not inited statically
+ because the number varies with the number of processors.)
+
+ Note that each processor has its own IDT, GDT, and TSS as well as PCR!
+
+Arguments:
+
+ Processor - Processor whoes Pcr to initialize.
+
+ Pcr - Linear address of PCR.
+
+ Idt - Linear address of i386 IDT.
+
+ Gdt - Linear address of i386 GDT.
+
+ Tss - Linear address (NOT SELECTOR!) of the i386 TSS.
+
+ Thread - Dummy thread object to use very early on.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ // set version values
+
+ Pcr->MajorVersion = PCR_MAJOR_VERSION;
+ Pcr->MinorVersion = PCR_MINOR_VERSION;
+
+ Pcr->PrcbData.MajorVersion = PRCB_MAJOR_VERSION;
+ Pcr->PrcbData.MinorVersion = PRCB_MINOR_VERSION;
+
+ Pcr->PrcbData.BuildType = 0;
+
+#if DBG
+ Pcr->PrcbData.BuildType |= PRCB_BUILD_DEBUG;
+#endif
+
+#ifdef NT_UP
+ Pcr->PrcbData.BuildType |= PRCB_BUILD_UNIPROCESSOR;
+#endif
+
+ // Basic addressing fields
+
+ Pcr->SelfPcr = Pcr;
+ Pcr->Prcb = &(Pcr->PrcbData);
+
+ // Thread control fields
+
+ Pcr->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
+ Pcr->NtTib.StackBase = 0;
+ Pcr->NtTib.StackLimit = 0;
+ Pcr->NtTib.Self = 0;
+
+ Pcr->PrcbData.CurrentThread = Thread;
+
+ //
+ // Init Prcb.Number and ProcessorBlock such that Ipi will work
+ // as early as possible.
+ //
+
+ Pcr->PrcbData.Number = (UCHAR)Processor;
+ Pcr->PrcbData.SetMember = 1 << Processor;
+ KiProcessorBlock[Processor] = Pcr->Prcb;
+
+ Pcr->Irql = 0;
+
+ // Machine structure addresses
+
+ Pcr->GDT = Gdt;
+ Pcr->IDT = Idt;
+ Pcr->TSS = Tss;
+
+ return;
+}
+
+#if 0
+VOID
+KiInitializeDblFaultTSS(
+ IN PKTSS Tss,
+ IN ULONG Stack,
+ IN PKGDTENTRY TssDescriptor
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the double-fault TSS for a
+ processor. It will set the static fields of the TSS to point to
+ the double-fault handler and the appropriate double-fault stack.
+
+ Note that the IOPM for the double-fault TSS grants access to all
+ ports. This is so the standard HAL's V86-mode callback to reset
+ the display to text mode will work.
+
+Arguments:
+
+ Tss - Supplies a pointer to the double-fault TSS
+
+ Stack - Supplies a pointer to the double-fault stack.
+
+ TssDescriptor - Linear address of the descriptor for the TSS.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PUCHAR p;
+ ULONG i;
+ ULONG j;
+
+ //
+ // Set limit for TSS
+ //
+
+ if (TssDescriptor != NULL) {
+ TssDescriptor->LimitLow = sizeof(KTSS) - 1;
+ TssDescriptor->HighWord.Bits.LimitHi = 0;
+ }
+
+ //
+ // Initialize IOPMs
+ //
+
+ for (i = 0; i < IOPM_COUNT; i++) {
+ p = (PUCHAR)(Tss->IoMaps[i]);
+
+ for (j = 0; j < PIOPM_SIZE; j++) {
+ p[j] = 0;
+ }
+ }
+
+ // Set IO Map base address to indicate no IO map present.
+
+ // N.B. -1 does not seem to be a valid value for the map base. If this
+ // value is used, byte immediate in's and out's will actually go
+ // the hardware when executed in V86 mode.
+
+ Tss->IoMapBase = KiComputeIopmOffset(IO_ACCESS_MAP_NONE);
+
+ // Set flags to 0, which in particular dispables traps on task switches.
+
+ Tss->Flags = 0;
+
+
+ // Set LDT and Ss0 to constants used by NT.
+
+ Tss->LDT = 0;
+ Tss->Ss0 = KGDT_R0_DATA;
+ Tss->Esp0 = Stack;
+ Tss->Eip = (ULONG)KiTrap08;
+ Tss->Cs = KGDT_R0_CODE || RPL_MASK;
+ Tss->Ds = KGDT_R0_DATA;
+ Tss->Es = KGDT_R0_DATA;
+ Tss->Fs = KGDT_R0_DATA;
+
+
+ return;
+
+}
+#endif
+
+
+VOID
+KiInitializeTSS (
+ IN PKTSS Tss
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to intialize the TSS for a processor.
+ It will set the static fields of the TSS. (ie Those fields that
+ the part reads, and for which NT uses constant values.)
+
+ The dynamic fiels (Esp0 and CR3) are set in the context swap
+ code.
+
+Arguments:
+
+ Tss - Linear address of the Task State Segment.
+
+Return Value:
+
+ None.
+
+--*/
+{
+
+ // Set IO Map base address to indicate no IO map present.
+
+ // N.B. -1 does not seem to be a valid value for the map base. If this
+ // value is used, byte immediate in's and out's will actually go
+ // the hardware when executed in V86 mode.
+
+ Tss->IoMapBase = KiComputeIopmOffset(IO_ACCESS_MAP_NONE);
+
+ // Set flags to 0, which in particular dispables traps on task switches.
+
+ Tss->Flags = 0;
+
+
+ // Set LDT and Ss0 to constants used by NT.
+
+ Tss->LDT = 0;
+ Tss->Ss0 = KGDT_R0_DATA;
+
+ return;
+}
+
+VOID
+KiInitializeTSS2 (
+ IN PKTSS Tss,
+ IN PKGDTENTRY TssDescriptor
+ )
+
+/*++
+
+Routine Description:
+
+ Do part of TSS init we do only once.
+
+Arguments:
+
+ Tss - Linear address of the Task State Segment.
+
+ TssDescriptor - Linear address of the descriptor for the TSS.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ PUCHAR p;
+ ULONG i;
+ ULONG j;
+
+ //
+ // Set limit for TSS
+ //
+
+ if (TssDescriptor != NULL) {
+ TssDescriptor->LimitLow = sizeof(KTSS) - 1;
+ TssDescriptor->HighWord.Bits.LimitHi = 0;
+ }
+
+ //
+ // Initialize IOPMs
+ //
+
+ for (i = 0; i < IOPM_COUNT; i++) {
+ p = (PUCHAR)(Tss->IoMaps[i].IoMap);
+
+ for (j = 0; j < PIOPM_SIZE; j++) {
+ p[j] = (UCHAR)-1;
+ }
+ }
+
+ //
+ // Initialize Software Interrupt Direction Maps
+ //
+
+ for (i = 0; i < IOPM_COUNT; i++) {
+ p = (PUCHAR)(Tss->IoMaps[i].DirectionMap);
+ for (j = 0; j < INT_DIRECTION_MAP_SIZE; j++) {
+ p[j] = 0;
+ }
+ }
+
+ //
+ // Initialize the map for IO_ACCESS_MAP_NONE
+ //
+ p = (PUCHAR)(Tss->IntDirectionMap);
+ for (j = 0; j < INT_DIRECTION_MAP_SIZE; j++) {
+ p[j] = 0;
+ }
+
+ return;
+}
+
+VOID
+KiSwapIDT (
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to edit the IDT. It swaps words of the address
+ and access fields around into the format the part actually needs.
+ This allows for easy static init of the IDT.
+
+ Note that this procedure edits the current IDT.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+{
+ LONG Index;
+ USHORT Temp;
+
+ //
+ // Rearrange the entries of IDT to match i386 interrupt gate structure
+ //
+
+ for (Index = 0; Index <= MAXIMUM_IDTVECTOR; Index += 1) {
+ Temp = IDT[Index].Selector;
+ IDT[Index].Selector = IDT[Index].ExtendedOffset;
+ IDT[Index].ExtendedOffset = Temp;
+ }
+}
+
+ULONG
+KiGetFeatureBits ()
+/*++
+
+ Return the NT feature bits supported by this processors
+
+--*/
+{
+ UCHAR Buffer[50];
+ ULONG Junk, ProcessorFeatures, NtBits;
+ ULONG CpuVendor;
+ PKPRCB Prcb;
+
+ NtBits = 0;
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->VendorString[0] = 0;
+
+ if (!Prcb->CpuID) {
+ return NtBits;
+ }
+
+ //
+ // Determine the processor type
+ //
+
+ CPUID (0, &Junk, (PULONG) Buffer+0, (PULONG) Buffer+2, (PULONG) Buffer+1);
+ Buffer[12] = 0;
+
+ //
+ // Copy vendor string to Prcb for debugging
+ //
+
+ strcpy (Prcb->VendorString, Buffer);
+
+ //
+ // Determine OEM type
+ //
+
+ CpuVendor = CPU_NONE;
+ if (strcmp (Buffer, "GenuineIntel") == 0) {
+ CpuVendor = CPU_INTEL;
+ } else if (strcmp (Buffer, "AuthenticAMD") == 0) {
+ CpuVendor = CPU_AMD;
+ } else if (strcmp (Buffer, CmpCyrixID) == 0) {
+ CpuVendor = CPU_CYRIX;
+ }
+
+ //
+ // Determine which NT compatible features are present
+ //
+
+ CPUID (1, &Junk, &Junk, &Junk, &ProcessorFeatures);
+
+ if (CpuVendor == CPU_INTEL || CpuVendor == CPU_AMD || CpuVendor == CPU_CYRIX) {
+ if (ProcessorFeatures & 0x100) {
+ NtBits |= KF_CMPXCHG8B;
+ }
+
+ if (ProcessorFeatures & 0x10) {
+ NtBits |= KF_RDTSC;
+ }
+
+ if (ProcessorFeatures & 0x02) {
+ NtBits |= KF_V86_VIS | KF_CR4;
+ }
+
+ if (ProcessorFeatures & 0x00800000) {
+ NtBits |= KF_MMX;
+ }
+ }
+
+
+ if (CpuVendor == CPU_INTEL || CpuVendor == CPU_CYRIX) {
+
+ if (ProcessorFeatures & 0x08) {
+ NtBits |= KF_LARGE_PAGE | KF_CR4;
+ }
+
+ if (ProcessorFeatures & 0x2000) {
+ NtBits |= KF_GLOBAL_PAGE | KF_CR4;
+ }
+
+ if (ProcessorFeatures & 0x8000) {
+ NtBits |= KF_CMOV;
+ }
+ }
+
+ //
+ // Intel specific stuff
+ //
+
+ if (CpuVendor == CPU_INTEL) {
+ if (ProcessorFeatures & 0x1000) {
+ NtBits |= KF_MTRR;
+ }
+
+ if (Prcb->CpuType == 6) {
+ WRMSR (0x8B, 0);
+ CPUID (1, &Junk, &Junk, &Junk, &ProcessorFeatures);
+ Prcb->UpdateSignature.QuadPart = RDMSR (0x8B);
+ }
+ }
+
+ return NtBits;
+}
+
+#define MAX_ATTEMPTS 10
+
+BOOLEAN
+KiInitMachineDependent (
+ VOID
+ )
+{
+ KAFFINITY ActiveProcessors, CurrentAffinity;
+ ULONG NumberProcessors;
+ IDENTITY_MAP IdentityMap;
+ ULONG Index;
+ ULONG Average;
+ ULONG Junk;
+ struct {
+ LARGE_INTEGER PerfStart;
+ LARGE_INTEGER PerfEnd;
+ LONGLONG PerfDelta;
+ LARGE_INTEGER PerfFreq;
+ LONGLONG TSCStart;
+ LONGLONG TSCEnd;
+ LONGLONG TSCDelta;
+ ULONG MHz;
+ } Samples[MAX_ATTEMPTS], *pSamp;
+
+ //
+ // If PDE large page is supported, enable it.
+ //
+ // We enable large pages before global pages to make TLB invalidation
+ // easier while turning on large pages.
+ //
+
+ if (KeFeatureBits & KF_LARGE_PAGE) {
+ if (Ki386CreateIdentityMap(&IdentityMap)) {
+
+ KiIpiGenericCall (
+ (PKIPI_BROADCAST_WORKER) Ki386EnableTargetLargePage,
+ (ULONG)(&IdentityMap)
+ );
+ }
+
+ //
+ // Always call Ki386ClearIdentityMap() to free any memory allocated
+ //
+
+ Ki386ClearIdentityMap(&IdentityMap);
+ }
+
+ //
+ // If PDE/PTE global page is supported, enable it
+ //
+
+ if (KeFeatureBits & KF_GLOBAL_PAGE) {
+ NumberProcessors = KeNumberProcessors;
+ KiIpiGenericCall (
+ (PKIPI_BROADCAST_WORKER) Ki386EnableGlobalPage,
+ (ULONG)(&NumberProcessors)
+ );
+ }
+
+ ActiveProcessors = KeActiveProcessors;
+ for (CurrentAffinity=1; ActiveProcessors; CurrentAffinity <<= 1) {
+
+ if (ActiveProcessors & CurrentAffinity) {
+
+ //
+ // Switch to that processor, and remove it from the
+ // remaining set of processors
+ //
+
+ ActiveProcessors &= ~CurrentAffinity;
+ KeSetSystemAffinityThread(CurrentAffinity);
+
+ //
+ // Determine the MHz for the processor
+ //
+
+ KeGetCurrentPrcb()->MHz = 0;
+
+ if (KeFeatureBits & KF_RDTSC) {
+
+ Index = 0;
+ pSamp = Samples;
+
+ for (; ;) {
+
+ //
+ // Collect a new sample
+ // Delay the thread a "long" amount and time it with
+ // a time source and RDTSC.
+ //
+
+ CPUID (0, &Junk, &Junk, &Junk, &Junk);
+ pSamp->PerfStart = KeQueryPerformanceCounter (NULL);
+ pSamp->TSCStart = RDTSC();
+ pSamp->PerfFreq.QuadPart = -50000;
+
+ KeDelayExecutionThread (KernelMode, FALSE, &pSamp->PerfFreq);
+
+ CPUID (0, &Junk, &Junk, &Junk, &Junk);
+ pSamp->PerfEnd = KeQueryPerformanceCounter (&pSamp->PerfFreq);
+ pSamp->TSCEnd = RDTSC();
+
+ //
+ // Calculate processors MHz
+ //
+
+ pSamp->PerfDelta = pSamp->PerfEnd.QuadPart - pSamp->PerfStart.QuadPart;
+ pSamp->TSCDelta = pSamp->TSCEnd - pSamp->TSCStart;
+
+ pSamp->MHz = (ULONG) ((pSamp->TSCDelta * pSamp->PerfFreq.QuadPart + 500000L) /
+ (pSamp->PerfDelta * 1000000L));
+
+
+ //
+ // If last 2 samples matched, done
+ //
+
+ if (Index && pSamp->MHz == pSamp[-1].MHz) {
+ break;
+ }
+
+ //
+ // Advance to next sample
+ //
+
+ pSamp += 1;
+ Index += 1;
+
+ //
+ // If too many samples, then something is wrong
+ //
+
+ if (Index >= MAX_ATTEMPTS) {
+
+#if DBG
+ //
+ // Temp breakpoint to see where this is failing
+ // and why
+ //
+
+ DbgBreakPoint();
+#endif
+
+ Average = 0;
+ for (Index = 0; Index < MAX_ATTEMPTS; Index++) {
+ Average += Samples[Index].MHz;
+ }
+ pSamp[-1].MHz = Average / MAX_ATTEMPTS;
+ break;
+ }
+
+ }
+
+ KeGetCurrentPrcb()->MHz = (USHORT) pSamp[-1].MHz;
+ }
+
+ //
+ // If MTRR is supported, initialize per processor
+ //
+
+ if (KeFeatureBits & KF_MTRR) {
+ KiInitializeMTRR ( (BOOLEAN) (ActiveProcessors ? FALSE : TRUE));
+ }
+ }
+ }
+
+ KeRevertToUserAffinityThread();
+ return TRUE;
+}
+
+
+VOID
+KeOptimizeProcessorControlState (
+ VOID
+ )
+{
+ Ke386ConfigureCyrixProcessor ();
+}
+
+
+
+VOID
+KeSetup80387OrEmulate (
+ IN PVOID *R3EmulatorTable
+ )
+
+/*++
+
+Routine Description:
+
+ This routine is called by PS initialization after loading UDLL.
+
+ If this is a 386 system without 387s (all processors must be
+ symmetrical) then this function will set the trap 07 vector on all
+ processors to point to the address passed in (which should be the
+ entry point of the 80387 emulator in UDLL, NPXNPHandler).
+
+Arguments:
+
+ HandlerAddress - Supplies the address of the trap07 handler.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PKINTERRUPT_ROUTINE HandlerAddress;
+ KAFFINITY ActiveProcessors, CurrentAffinity;
+ KIRQL OldIrql;
+ ULONG disposition;
+ HANDLE SystemHandle, SourceHandle, DestHandle;
+ NTSTATUS Status;
+ UNICODE_STRING unicodeString;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ double Dividend, Divisor;
+ BOOLEAN PrecisionErrata;
+
+ if (KeI386NpxPresent) {
+
+ //
+ // A coprocessor is present - check to see if the precision errata exists
+ //
+
+ PrecisionErrata = FALSE;
+
+ ActiveProcessors = KeActiveProcessors;
+ for (CurrentAffinity = 1; ActiveProcessors; CurrentAffinity <<= 1) {
+
+ if (ActiveProcessors & CurrentAffinity) {
+ ActiveProcessors &= ~CurrentAffinity;
+
+ //
+ // Run calculation on each processor.
+ //
+
+ KeSetSystemAffinityThread(CurrentAffinity);
+ _asm {
+
+ ;
+ ; This is going to destroy the state in the coprocesssor,
+ ; but we know that there's no state currently in it.
+ ;
+
+ cli
+ mov eax, cr0
+ mov ecx, eax ; hold original cr0 value
+ and eax, not (CR0_TS+CR0_MP+CR0_EM)
+ mov cr0, eax
+
+ fninit ; to known state
+ }
+
+ Dividend = 4195835.0;
+ Divisor = 3145727.0;
+
+ _asm {
+ fld Dividend
+ fdiv Divisor ; test known faulty divison
+ fmul Divisor ; Multiple quotient by divisor
+ fcomp Dividend ; Compare product and dividend
+ fstsw ax ; Move float conditions to ax
+ sahf ; move to eflags
+
+ mov cr0, ecx ; restore cr0
+ sti
+
+ jc short em10
+ jz short em20
+em10: mov PrecisionErrata, TRUE
+em20:
+ }
+ }
+ }
+
+
+ //
+ // Check to see if the emulator should be used anyway
+ //
+
+ switch (KeI386ForceNpxEmulation) {
+ case 0:
+ //
+ // Use the emulator based on the value in KeI386NpxPresent
+ //
+
+ break;
+
+ case 1:
+ //
+ // Only use the emulator if any processor has the known
+ // Pentium floating point division problem.
+ //
+
+ if (PrecisionErrata) {
+ KeI386NpxPresent = FALSE;
+ }
+ break;
+
+ default:
+
+ //
+ // Unkown setting - use the emulator
+ //
+
+ KeI386NpxPresent = FALSE;
+ break;
+ }
+ }
+
+ //
+ // Setup processor features, and install emulator if needed
+ //
+
+ SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_EMULATED] = KeI386NpxPresent;
+ SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] = PrecisionErrata;
+
+ if (!KeI386NpxPresent) {
+
+ //
+ // MMx not available when emulator is used
+ //
+
+ KeFeatureBits &= ~KF_MMX;
+ SharedUserData->ProcessorFeatures[PF_MMX_INSTRUCTIONS_AVAILABLE] = FALSE;
+
+ //
+ // Errata not present when using emulator
+ //
+
+ SharedUserData->ProcessorFeatures[PF_FLOATING_POINT_PRECISION_ERRATA] = FALSE;
+
+ //
+ // Use the user mode floating point emulator
+ //
+
+ HandlerAddress = (PKINTERRUPT_ROUTINE) ((PULONG) R3EmulatorTable)[0];
+ Ki387RoundModeTable = (PVOID) ((PULONG) R3EmulatorTable)[1];
+
+ ActiveProcessors = KeActiveProcessors;
+ for (CurrentAffinity = 1; ActiveProcessors; CurrentAffinity <<= 1) {
+
+ if (ActiveProcessors & CurrentAffinity) {
+ ActiveProcessors &= ~CurrentAffinity;
+
+ //
+ // Run this code on each processor.
+ //
+
+ KeSetSystemAffinityThread(CurrentAffinity);
+
+ //
+ // Raise IRQL and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Make the trap 07 IDT entry point at the passed-in handler
+ //
+
+ KiSetHandlerAddressToIDT(I386_80387_NP_VECTOR, HandlerAddress);
+ KeGetPcr()->IDT[I386_80387_NP_VECTOR].Selector = KGDT_R3_CODE;
+ KeGetPcr()->IDT[I386_80387_NP_VECTOR].Access = TRAP332_GATE;
+
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+ }
+
+ //
+ // Move any entries from ..\System\FloatingPointProcessor to
+ // ..\System\DisabledFloatingPointProcessor.
+ //
+
+ //
+ // Open system tree
+ //
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &CmRegistryMachineHardwareDescriptionSystemName,
+ OBJ_CASE_INSENSITIVE,
+ NULL,
+ NULL
+ );
+
+ Status = ZwOpenKey( &SystemHandle,
+ KEY_ALL_ACCESS,
+ &ObjectAttributes
+ );
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // Open FloatingPointProcessor key
+ //
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &CmTypeName[FloatingPointProcessor],
+ OBJ_CASE_INSENSITIVE,
+ SystemHandle,
+ NULL
+ );
+
+ Status = ZwOpenKey ( &SourceHandle,
+ KEY_ALL_ACCESS,
+ &ObjectAttributes
+ );
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // Create DisabledFloatingPointProcessor key
+ //
+
+ RtlInitUnicodeString (
+ &unicodeString,
+ CmDisabledFloatingPointProcessor
+ );
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &unicodeString,
+ OBJ_CASE_INSENSITIVE,
+ SystemHandle,
+ NULL
+ );
+
+ Status = ZwCreateKey( &DestHandle,
+ KEY_ALL_ACCESS,
+ &ObjectAttributes,
+ 0,
+ NULL,
+ REG_OPTION_VOLATILE,
+ &disposition
+ );
+
+ if (NT_SUCCESS(Status)) {
+
+ //
+ // Move it
+ //
+
+ KiMoveRegTree (SourceHandle, DestHandle);
+ ZwClose (DestHandle);
+ }
+ ZwClose (SourceHandle);
+ }
+ ZwClose (SystemHandle);
+ }
+ }
+
+ //
+ // Set affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+}
+
+
+
+NTSTATUS
+KiMoveRegTree(
+ HANDLE Source,
+ HANDLE Dest
+ )
+{
+ NTSTATUS Status;
+ PKEY_BASIC_INFORMATION KeyInformation;
+ PKEY_VALUE_FULL_INFORMATION KeyValue;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ HANDLE SourceChild;
+ HANDLE DestChild;
+ ULONG ResultLength;
+ UCHAR buffer[1024]; // hmm....
+ UNICODE_STRING ValueName;
+ UNICODE_STRING KeyName;
+
+
+ KeyValue = (PKEY_VALUE_FULL_INFORMATION)buffer;
+
+ //
+ // Move values from source node to dest node
+ //
+
+ for (; ;) {
+ //
+ // Get first value
+ //
+
+ Status = ZwEnumerateValueKey(Source,
+ 0,
+ KeyValueFullInformation,
+ buffer,
+ sizeof (buffer),
+ &ResultLength);
+
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+
+
+ //
+ // Write value to dest node
+ //
+
+ ValueName.Buffer = KeyValue->Name;
+ ValueName.Length = (USHORT) KeyValue->NameLength;
+ ZwSetValueKey( Dest,
+ &ValueName,
+ KeyValue->TitleIndex,
+ KeyValue->Type,
+ buffer+KeyValue->DataOffset,
+ KeyValue->DataLength
+ );
+
+ //
+ // Delete value and get first value again
+ //
+
+ Status = ZwDeleteValueKey (Source, &ValueName);
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+ }
+
+
+ //
+ // Enumerate node's children and apply ourselves to each one
+ //
+
+ KeyInformation = (PKEY_BASIC_INFORMATION)buffer;
+ for (; ;) {
+
+ //
+ // Open node's first key
+ //
+
+ Status = ZwEnumerateKey(
+ Source,
+ 0,
+ KeyBasicInformation,
+ KeyInformation,
+ sizeof (buffer),
+ &ResultLength
+ );
+
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+
+ KeyName.Buffer = KeyInformation->Name;
+ KeyName.Length = (USHORT) KeyInformation->NameLength;
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &KeyName,
+ OBJ_CASE_INSENSITIVE,
+ Source,
+ NULL
+ );
+
+ Status = ZwOpenKey(
+ &SourceChild,
+ KEY_ALL_ACCESS,
+ &ObjectAttributes
+ );
+
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+
+ //
+ // Create key in dest tree
+ //
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &KeyName,
+ OBJ_CASE_INSENSITIVE,
+ Dest,
+ NULL
+ );
+
+ Status = ZwCreateKey(
+ &DestChild,
+ KEY_ALL_ACCESS,
+ &ObjectAttributes,
+ 0,
+ NULL,
+ REG_OPTION_VOLATILE,
+ NULL
+ );
+
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+
+ //
+ // Move subtree
+ //
+
+ Status = KiMoveRegTree(SourceChild, DestChild);
+
+ ZwClose(DestChild);
+ ZwClose(SourceChild);
+
+ if (!NT_SUCCESS(Status)) {
+ break;
+ }
+
+ //
+ // Loop and get first key. (old first key was delete by the
+ // call to KiMoveRegTree).
+ //
+ }
+
+ //
+ // Remove source node
+ //
+
+ return NtDeleteKey (Source);
+}
diff --git a/private/ntos/ke/i386/ki386.h b/private/ntos/ke/i386/ki386.h
new file mode 100644
index 000000000..9819abd0f
--- /dev/null
+++ b/private/ntos/ke/i386/ki386.h
@@ -0,0 +1,34 @@
+
+
+typedef struct _IDENTITY_MAP {
+ unsigned long IdentityCR3;
+ unsigned long IdentityLabel;
+ PHARDWARE_PTE PageDirectory;
+ PHARDWARE_PTE IdentityMapPT;
+ PHARDWARE_PTE CurrentMapPT;
+} IDENTITY_MAP, *PIDENTITY_MAP;
+
+
+VOID
+Ki386ClearIdentityMap(
+ PIDENTITY_MAP IdentityMap
+ );
+
+VOID
+Ki386EnableTargetLargePage(
+ PIDENTITY_MAP IdentityMap
+ );
+
+BOOLEAN
+Ki386CreateIdentityMap(
+ PIDENTITY_MAP IdentityMap
+ );
+
+VOID
+Ki386EnableCurrentLargePage (
+ IN ULONG IdentityAddr,
+ IN ULONG IdentityCr3
+ );
+
+#define KiGetPdeOffset(va) (((ULONG)(va)) >> 22)
+#define KiGetPteOffset(va) ((((ULONG)(va)) << 10) >> 22)
diff --git a/private/ntos/ke/i386/kimacro.inc b/private/ntos/ke/i386/kimacro.inc
new file mode 100644
index 000000000..1618578e7
--- /dev/null
+++ b/private/ntos/ke/i386/kimacro.inc
@@ -0,0 +1,1288 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; kimacro.inc
+;
+; Abstract:
+;
+; This module contains the macros used by kernel assembler code.
+; It includes macros to manipulate interrupts, support system
+; entry and exit for syscalls, faults, and interrupts, and
+; manipulate floating point state.
+;
+; Author:
+;
+; Shie-Lin (shielint) 24-Jan-1990
+;
+; Revision History:
+;
+; BryanWi 17-Aug-90
+; Replace GENERATE_MACHINE... and RESTORE... with ENTER_...
+; and EXIT_ALL macros.
+;
+;--
+
+;++
+;
+; These constants are used by the fpo directives in this file.
+; This directive causes the assembler to output a .debug$f segment
+; in the obj file. The segment will contain 1 fpo record for each
+; directive present during assembly.
+;
+; Although the assembler will accept all valid values, the value of 7
+; in the FPO_REGS field indicates to the debugger that a trap frame is
+; generated by the function. The value of 7 can be used because the
+; C/C++ compiler puts a maximum value of 3 in the field.
+;
+FPO_LOCALS equ 0 ; 32 bits, size of locals in dwords
+FPO_PARAMS equ 0 ; 32 bits, size of parameters in dwords
+FPO_PROLOG equ 0 ; 12 bits, 0-4095, # of bytes in prolog
+FPO_REGS equ 0 ; 3 bits, 0-7, # regs saved in prolog
+FPO_USE_EBP equ 0 ; 1 bit, 0-1, is ebp used?
+FPO_TRAPFRAME equ 1 ; 2 bits, 0=fpo, 1=trap frame, 2=tss
+;
+;--
+
+
+;++
+;
+; POLL_DEBUGGER
+;
+; Macro Description:
+;
+; Call the debugger so it can check for control-c. If it finds
+; it, it will report our iret address as address of break-in.
+;
+; N.B. This macro should be used when all the caller's registers
+; have been restored. (Otherwise, the kernel debugger register
+; dump will not have correct state.) The only exception is
+; fs. This is because Kd may need to access PCR or PRCB.
+;
+; Arguments:
+;
+; There MUST be an iret frame on the stack when this macro
+; is invoked.
+;
+; Exit:
+;
+; Debugger will iret for us, so we don't usually return from
+; this macro, but remember that it generates nothing for non-DEVL
+; kernels.
+;--
+
+POLL_DEBUGGER macro
+local a, b, c_
+
+if DEVL
+ EXTRNP _DbgBreakPointWithStatus,1
+ stdCall _KdPollBreakIn
+ or al,al
+ jz short c_
+ stdCall _DbgBreakPointWithStatus,<DBG_STATUS_CONTROL_C>
+c_:
+endif ; DEVL
+endm
+
+;++
+;
+; ASSERT_FS
+;
+; Try to catch funky condition wherein we get FS=r3 value while
+; running in kernel mode.
+;
+;--
+
+ASSERT_FS macro
+local a,b
+
+if DBG
+ EXTRNP _KeBugCheck,1
+
+ mov bx,fs
+ cmp bx,KGDT_R0_PCR
+ jnz short a
+
+ cmp dword ptr fs:[0], 0
+ jne short b
+
+a:
+ stdCall _KeBugCheck,<-1>
+align 4
+b:
+endif
+endm
+
+
+
+;++
+;
+;
+; Copy data from various places into base of TrapFrame, net effect
+; is to allow dbg KB command to trace accross trap frame, and to
+; allow user to find arguments to system calls.
+;
+; USE ebx and edi.
+;--
+
+SET_DEBUG_DATA macro
+
+ife FPO
+
+;
+; This macro is used by ENTER_SYSTEM_CALL, ENTER_TRAP and ENTER_INTERRUPT
+; and is used at the end of above macros. It is safe to destroy ebx, edi.
+;
+
+ mov ebx,[ebp]+TsEbp
+ mov edi,[ebp]+TsEip
+ mov [ebp]+TsDbgArgPointer,edx
+ mov [ebp]+TsDbgArgMark,0BADB0D00h
+ mov [ebp]+TsDbgEbp,ebx
+ mov [ebp]+TsDbgEip,edi
+endif
+
+endm
+
+
+;++
+;
+; ENTER_DR_ASSIST EnterLabel, ExitLabel, NoAbiosAssist, NoV86Assist
+;
+; Macro Description:
+;
+; Jumped to by ENTER_ macros to deal with DR register work,
+; abios work and v86 work. The main purpose of this macro is
+; that interrupt/trap/systemCall EnterMacros can jump here to
+; deal with some special cases such that most of the times the
+; main ENTER_ execution flow can proceed without being branched.
+;
+; If (previousmode == usermode) {
+; save DR* in trapframe
+; load DR* from Prcb
+; }
+;
+; Arguments:
+; EnterLabel - label to emit
+; ExitLabel - label to branch to when done
+;
+; Entry-conditions:
+; Dr work:
+; DebugActive == TRUE
+; (esi)->Thread object
+; (esp)->base of trap frame
+; (ebp)->base of trap frame
+;
+; Abios work:
+; v86 work:
+;
+; Exit-conditions:
+; Dr work:
+; Interrupts match input state (this routine doesn't change IEF)
+; (esp)->base of trap frame
+; (ebp)->base of trap frame
+; Preserves entry eax, edx
+; Abios work:
+; v86 work:
+;
+;--
+
+ENTER_DR_ASSIST macro EnterLabel, ExitLabel, NoAbiosAssist, NoV86Assist, V86R
+ local a,b
+
+ public Dr_&EnterLabel
+align 4
+Dr_&EnterLabel:
+
+;
+; Test if we came from user-mode. If not, do nothing.
+;
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz short a
+
+ test dword ptr [ebp]+TsSegCs,MODE_MASK
+ jz Dr_&ExitLabel ; called from kmode, go continue
+
+
+;
+; Save user-mode Dr* regs in TrapFrame
+;
+; We are safe to destroy ebx, ecx, edi because in ENTER_INTERRUPT and
+; ENTER_TRAP these registers are saved already. In ENTER_SYSTEMCALL
+; ebx, edi is saved and ecx is don't-care.
+;
+
+a: mov ebx,dr0
+ mov ecx,dr1
+ mov edi,dr2
+ mov [ebp]+TsDr0,ebx
+ mov [ebp]+TsDr1,ecx
+ mov [ebp]+TsDr2,edi
+ mov ebx,dr3
+ mov ecx,dr6
+ mov edi,dr7
+ mov [ebp]+TsDr3,ebx
+ mov [ebp]+TsDr6,ecx
+ mov [ebp]+TsDr7,edi
+
+;
+; Load KernelDr* into processor
+;
+
+ mov edi,dword ptr fs:[PcPrcb]
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr0
+ mov ecx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr1
+ mov dr0,ebx
+ mov dr1,ecx
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr2
+ mov ecx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr3
+ mov dr2,ebx
+ mov dr3,ecx
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr6
+ mov ecx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr7
+ mov dr6,ebx
+ mov dr7,ecx
+
+ifnb <V86R>
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jz short b
+ jmp Dr_&V86R
+endif
+b:
+ jmp Dr_&ExitLabel
+
+
+ifb <NoAbiosAssist>
+
+ public Abios_&EnterLabel
+align 4
+Abios_&EnterLabel:
+
+;
+; INTERRUPT_STACK16_TO_STACK32
+;
+; This macro remaps current 32bit stack to 16bit stack at interrupt
+; time.
+;
+; Arguments:
+;
+; (esp)->trap frame.
+; (eax)->Entry Esp.
+;
+
+ mov eax, [esp].TsErrCode ; (eax) = Entry Esp
+ mov ecx, KGDT_R0_DATA
+ mov edx, esp
+ shl eax, 16
+ add edx, fs:[PcstackLimit]
+ mov [esp].TsErrCode, eax
+ mov ss, cx
+ mov esp, edx ; Interrupts are off
+ mov ebp, edx
+ jmp Abios_&ExitLabel
+
+endif ; NoAbiosAssist
+
+ifb <NoV86Assist>
+
+ public V86_&EnterLabel
+align 4
+V86_&EnterLabel:
+
+;
+; Move the V86 segment registers to the correct place in the frame
+;
+ mov eax,dword ptr [ebp].TsV86Fs
+ mov ebx,dword ptr [ebp].TsV86Gs
+ mov ecx,dword ptr [ebp].TsV86Es
+ mov edx,dword ptr [ebp].TsV86Ds
+ mov [ebp].TsSegFs,ax
+ mov [ebp].TsSegGs,bx
+ mov [ebp].TsSegEs,cx
+ mov [ebp].TsSegDs,dx
+ jmp V86_&ExitLabel
+
+endif ; NoV86Assist
+
+ endm
+
+;++
+;
+; ENTER_SYSCALL AssistLabel, TagetLabel
+;
+; Macro Description:
+;
+; Build the frame and set registers needed by a system call.
+;
+; Save:
+; Errorpad,
+; Non-volatile regs,
+; FS,
+; ExceptionList,
+; PreviousMode
+;
+; Don't Save:
+; Volatile regs
+; Seg regs
+; Floating point state
+;
+; Set:
+; FS,
+; ExceptionList,
+; PreviousMode,
+; Direction
+;
+; Arguments:
+; AssistLabel - label ENTER_ASSIST macro is at
+; TargetLabel - label to emit for ENTER_ASSIST to jump to
+;
+; Exit-conditions:
+; Interrupts match input state (this routine doesn't change IEF)
+; (esp)->base of trap frame
+; (ebp)->base of trap frame
+; Preserves entry eax, edx
+;
+; Note:
+; The DS: reference to PreviousMode is *required* for correct
+; functioning of lazy selector loads. If you remove this use
+; of DS:, put a DS: override on something.
+;
+;--
+
+ENTER_SYSCALL macro AssistLabel, TargetLabel
+
+
+.FPO ( FPO_LOCALS, FPO_PARAMS, FPO_PROLOG, FPO_REGS, FPO_USE_EBP, FPO_TRAPFRAME )
+
+ifdef KERNELONLY
+
+;
+; Construct trap frame.
+;
+; N.B. The initial part of the trap frame is constructed by pushing values
+; on the stack. If the format of the trap frame is changed, then the
+; following code must alos be changed.
+;
+
+ push 0 ; put pad dword for error on stack
+ push ebp ; save the non-volatile registers
+ push ebx ;
+ push esi ;
+ push edi ;
+ push fs ; save and set FS to PCR.
+ mov ebx,KGDT_R0_PCR ; set PCR segment number
+ mov fs,bx ;
+
+;
+; Save the old exception list in trap frame and initialize a new empty
+; exception list.
+;
+
+ push PCR[PcExceptionList] ; save old exception list
+ mov PCR[PcExceptionList],EXCEPTION_CHAIN_END ; set new empty list
+
+;
+; Save the old previous mode in trap frame, allocate remainder of trap frame,
+; and set the new previous mode.
+;
+
+ mov esi,PCR[PcPrcbData+PbCurrentThread] ; get current thread address
+ push [esi]+ThPreviousMode ; save old previous mode
+ sub esp,TsPreviousPreviousMode ; allocate remainder of trap frame
+ mov ebx,[esp+TsSegCS] ; compute new previous mode
+ and ebx,MODE_MASK ;
+ mov [esi]+ThPreviousMode,bl ; set new previous mode
+
+;
+; Save the old trap frame address and set the new trap frame address.
+;
+
+ mov ebp,esp ; set trap frame address
+ mov ebx,[esi].ThTrapFrame ; save current trap frame address
+ mov [ebp].TsEdx,ebx ;
+ mov [esi].ThTrapFrame,ebp ; set new trap frame address
+ cld ; make sure direction is forward
+
+ SET_DEBUG_DATA ; Note this destroys edi
+
+ test byte ptr [esi]+ThDebugActive,-1 ; test if debugging active
+ jnz Dr_&AssistLabel ; if nz, debugging is active on thread
+
+Dr_&TargetLabel: ;
+ sti ; enable interrupts
+
+else
+ %out ENTER_SYSCAL outside of kernel
+ .err
+endif
+ endm
+
+;++
+;
+; ENTER_INTERRUPT AssistLabel, TargetLabel
+;
+; Macro Description:
+;
+; Build the frame and set registers needed by an interrupt.
+;
+; Save:
+; Errorpad,
+; Non-volatile regs,
+; FS,
+; ExceptionList,
+; PreviousMode
+; Volatile regs
+; Seg regs from V86 mode
+; DS, ES, GS
+;
+; Don't Save:
+; Floating point state
+;
+; Set:
+; FS,
+; ExceptionList,
+; Direction,
+; DS, ES
+;
+; Don't Set:
+; PreviousMode
+;
+; Arguments:
+; AssistLabel - label ENTER_ASSIST macro is at
+; TargetLabel - label to emit for ENTER_ASSIST to jump to
+;
+; Exit-conditions:
+; Interrupts match input state (this routine doesn't change IEF)
+; (esp)->base of trap frame
+; (ebp)->base of trap frame
+; Preserves entry eax, ecx, edx
+;
+;--
+
+ENTER_INTERRUPT macro AssistLabel, TargetLabel, PassParm
+ local b
+
+.FPO ( FPO_LOCALS+2, FPO_PARAMS, FPO_PROLOG, FPO_REGS, FPO_USE_EBP, FPO_TRAPFRAME )
+
+;
+; Fill in parts of frame we care about
+;
+
+ifb <PassParm>
+ push esp ; Use Error code field to save 16bit esp
+endif
+ push ebp ; Save the non-volatile registers
+ push ebx
+ push esi
+ push edi
+
+ sub esp, TsEdi
+ mov ebp,esp
+
+ mov [esp]+TsEax, eax ; Save volatile registers
+ mov [esp]+TsEcx, ecx
+ mov [esp]+TsEdx, edx
+if DBG
+ mov dword ptr [esp]+TsPreviousPreviousMode, -1 ; ThPreviousMode not pushed on interrupt
+endif
+
+ test dword ptr [esp].TsEflags,EFLAGS_V86_MASK
+ jnz V86_&AssistLabel
+
+ cmp word ptr [esp]+TsSegCs, KGDT_R0_CODE
+ jz short @f
+
+ mov [esp]+TsSegFs, fs ; Save and set FS to PCR.
+ mov [esp]+TsSegDs, ds
+ mov [esp]+TsSegEs, es
+ mov [esp]+TsSegGs, gs
+
+V86_&TargetLabel:
+ mov ebx,KGDT_R0_PCR
+ mov eax,KGDT_R3_DATA OR RPL_MASK
+ mov fs, bx
+ mov ds, ax
+ mov es, ax
+@@:
+ mov ebx, fs:[PcExceptionList] ;Save, set ExceptionList
+ mov fs:[PcExceptionList],EXCEPTION_CHAIN_END
+ mov [esp]+TsExceptionList, ebx
+
+ifnb <PassParm>
+ lea eax, [esp].TsErrCode
+ lea ecx, [esp].TsEip ; Move eax to EIP field
+ mov ebx, ss:[eax] ; (ebx) = parameter to pass
+ mov ss:[eax], ecx ; save 16bit esp
+endif
+
+;
+; Remap ABIOS 16 bit stack to 32 bit stack, if necessary.
+;
+
+ cmp esp, 10000h
+ jb Abios_&AssistLabel
+
+ mov dword ptr [esp].TsErrCode, 0 ; Indicate no remapping.
+Abios_&TargetLabel:
+
+;
+; end of Abios stack checking
+;
+
+ cld
+
+ifnb <PassParm>
+ push ebx ; push parameter as argument
+endif
+
+
+ SET_DEBUG_DATA
+
+ test byte ptr PCR[PcDebugActive], -1
+ jnz Dr_&AssistLabel
+
+Dr_&TargetLabel:
+
+ endm
+
+
+;++
+;
+; ENTER_TRAP AssistLabel, TargetLabel
+;
+; Macro Description:
+;
+; Build the frame and set registers needed by a trap or exception.
+;
+; Save:
+; Non-volatile regs,
+; FS,
+; ExceptionList,
+; PreviousMode,
+; Volatile regs
+; Seg Regs from V86 mode
+; DS, ES, GS
+;
+; Don't Save:
+; Floating point state
+;
+; Set:
+; FS,
+; Direction,
+; DS, ES
+;
+; Don't Set:
+; PreviousMode,
+; ExceptionList
+;
+; Arguments:
+; AssistLabel - label ENTER_ASSIST macro is at
+; TargetLabel - label to emit for ENTER_ASSIST to jump to
+;
+; Exit-conditions:
+; Interrupts match input state (this routine doesn't change IEF)
+; (esp)->base of trap frame
+; (ebp)->base of trap frame
+; Preserves entry eax
+;
+;--
+
+ENTER_TRAP macro AssistLabel, TargetLabel
+ local b
+
+.FPO ( FPO_LOCALS, FPO_PARAMS, FPO_PROLOG, FPO_REGS, FPO_USE_EBP, FPO_TRAPFRAME )
+
+;
+; Fill in parts of frame we care about
+;
+
+if DBG
+ifndef _Ki16BitStackException
+ EXTRNP _Ki16BitStackException
+endif
+endif ; DBG
+
+ mov word ptr [esp+2], 0 ; Clear upper word of ErrorCode
+
+ push ebp ; Save the non-volatile registers
+ push ebx
+ push esi
+ push edi
+
+ push fs ; Save and set FS to PCR.
+ mov ebx,KGDT_R0_PCR
+ mov fs,bx
+ mov ebx, fs:[PcExceptionList] ;Save ExceptionList
+ push ebx
+if DBG
+ push -1 ; Don't need to save ThPreviousMode from trap
+else
+ sub esp, 4 ; pad dword
+endif
+ push eax ; Save the volatile registers
+ push ecx
+ push edx
+
+ push ds ; Save segments
+ push es
+ push gs
+
+;
+; Skip allocate reset of trap frame and Set up DS/ES, they may be trash
+;
+
+ mov ax,KGDT_R3_DATA OR RPL_MASK
+ sub esp,TsSegGs
+ mov ds,ax
+ mov es,ax
+
+if DBG
+;
+; The code here check if the exception occurred in ring 0
+; ABIOS code. If yes, this is a fatal condition. We will
+; put out message and bugcheck.
+;
+
+ cmp esp, 10000h ; Is the trap in abios?
+ jb _Ki16BitStackException ; if b, yes, switch stack and bugcheck.
+
+endif ; DBG
+
+ mov ebp,esp
+ test dword ptr [esp].TsEflags,EFLAGS_V86_MASK
+ jnz V86_&AssistLabel
+
+V86_&TargetLabel:
+
+ cld
+ SET_DEBUG_DATA
+
+ test byte ptr PCR[PcDebugActive], -1
+ jnz Dr_&AssistLabel
+
+Dr_&TargetLabel:
+
+ endm
+;++
+;
+; EXIT_ALL NoRestoreSegs, NoRestoreVolatiles, NoPreviousMode
+;
+; Macro Description:
+;
+; Load a syscall frame back into the machine.
+;
+; Restore:
+; Volatile regs, IF NoRestoreVolatiles blank
+; NoPreviousMode,
+; ExceptionList,
+; FS,
+; Non-volatile regs
+;
+; If the frame is a kernel mode frame, AND esp has been edited,
+; then TsSegCs will have a special value. Test for that value
+; and execute special code for that case.
+;
+; N.B. This macro generates an IRET! (i.e. It exits!)
+;
+; Arguments:
+;
+; NoRestoreSegs - non-blank if DS, ES, GS are NOT to be restored
+;
+; NoRestoreVolatiles - non-blank if Volatile regs are NOT to be restored
+;
+; NoPreviousMode - if nb pop ThPreviousMode
+;
+; Entry-conditions:
+;
+; (esp)->base of trap frame
+; (ebp)->Base of trap frame
+;
+; Exit-conditions:
+;
+; Does not exit, returns.
+; Preserves eax, ecx, edx, IFF NoRestoreVolatiles is set
+;
+;--
+
+?adjesp = 0
+?RestoreAll = 1
+
+EXIT_ALL macro NoRestoreSegs, NoRestoreVolatiles, NoPreviousMode
+local a, b, f, x
+local Abios_ExitHelp, Abios_ExitHelp_Target1, Abios_ExitHelp_Target2
+local Dr_ExitHelp, Dr_ExitHelp_Target, V86_ExitHelp, V86_ExitHelp_Target
+local Db_NotATrapFrame, Db_A, Db_NotValidEntry, NonFlatPm_Target
+
+;
+; Sanity check some values and setup globals for macro
+;
+
+?adjesp = TsSegGs
+?RestoreAll = 1
+
+ifnb <NoRestoreSegs>
+ ?RestoreAll = 0
+ ?adjesp = ?adjesp + 12
+endif
+
+ifnb <NoRestoreVolatiles>
+ if ?RestoreAll eq 1
+ %out "EXIT_ALL NoRestoreVolatiles requires NoRestoreSegs"
+ .err
+ endif
+ ?adjesp = ?adjesp + 12
+endif
+
+ifb <NoPreviousMode>
+ifndef KERNELONLY
+ %out EXIT_ALL can not restore previousmode outside kernel
+ .err
+endif
+endif
+
+; All callers are responsible for getting here with interrupts disabled.
+
+if DBG
+ pushfd
+ pop edx
+
+ test edx, EFLAGS_INTERRUPT_MASK
+ jnz Db_NotValidEntry
+
+ cmp esp, ebp ; make sure esp = ebp
+ jne Db_NotValidEntry
+
+; Make sure BADB0D00 sig is present. If not this isn't a trap frame!
+Db_A: sub [esp]+TsDbgArgMark,0BADB0D00h
+ jne Db_NotATrapFrame
+endif
+
+ ASSERT_FS
+
+ mov edx, [esp]+TsExceptionList
+if DBG
+ or edx, edx
+ jnz short @f
+ int 3
+@@:
+endif
+ mov ebx, fs:[PcDebugActive] ; (ebx) = DebugActive flag
+ mov fs:[PcExceptionList], edx ; Restore ExceptionList
+
+ifb <NoPreviousMode>
+ mov ecx, [esp]+TsPreviousPreviousMode ; Restore PreviousMode
+if DBG
+ cmp ecx, -1 ; temporary debugging code
+ jne @f ; to make sure no one tries to pop ThPreviousMode
+ int 3 ; when it wasn't saved
+@@:
+endif
+ mov esi,fs:[PcPrcbData+PbCurrentThread]
+ mov [esi]+ThPreviousMode,cl
+else
+if DBG
+ mov ecx, [esp]+TsPreviousPreviousMode
+ cmp ecx, -1 ; temporary debugging code
+ je @f ; to make sure no one pushed ThPreviousMode and
+ int 3 ; is now exiting without restoreing it
+@@:
+endif
+endif
+
+ test ebx, 0fh
+ jnz Dr_ExitHelp
+
+Dr_ExitHelp_Target:
+
+ test dword ptr [esp].TsEflags,EFLAGS_V86_MASK
+ jnz V86_ExitHelp
+
+ test word ptr [esp]+TsSegCs,FRAME_EDITED
+ jz b ; Edited frame pop out.
+
+
+if ?RestoreAll eq 0
+.errnz MODE_MASK-1
+ cmp word ptr [esp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK ; set/clear ZF
+ bt word ptr [esp]+TsSegCs,0 ; test MODE_MASK set/clear CF
+ cmc ; (CF=1 and ZF=0)
+ ja f ; jmp if CF=0 and ZF=0
+endif
+ifb <NoRestoreVolatiles>
+ifb <NoRestoreSegs> ; must restore eax before any
+ mov eax, [esp].TsEax ; selectors! (see trap0e handler)
+endif
+endif
+
+ifb <NoRestoreVolatiles>
+ mov edx, [ebp]+TsEdx ; Restore volitales
+ mov ecx, [ebp]+TsEcx
+ifb <NoRestoreSegs>
+else
+ mov eax, [ebp]+TsEax
+endif
+endif ; NoRestoreVolatiles
+
+ cmp word ptr [ebp]+TsSegCs, KGDT_R0_CODE
+ jz short @f
+
+ifb <NoRestoreSegs>
+ lea esp, [ebp]+TsSegGs
+ pop gs ; Restore Segs
+ pop es
+ pop ds
+endif
+NonFlatPm_Target:
+ lea esp, [ebp]+TsSegFs
+ pop fs
+@@:
+V86_ExitHelp_Target:
+
+ lea esp, [ebp]+TsEdi ; Skip PreMode, ExceptList and fs
+
+ pop edi ; restore non-volatiles
+ pop esi
+ pop ebx
+ pop ebp
+
+;
+; Esp MUST point to the Error Code on the stack. Because we use it to
+; store the entering esp.
+;
+
+ cmp word ptr [esp+8], 80h ; check for abios code segment?
+ ja Abios_ExitHelp
+
+Abios_ExitHelp_Target1:
+
+ add esp, 4 ; remove error code from trap frame
+
+Abios_ExitHelp_Target2:
+
+;
+; End of ABIOS stack check
+;
+
+ iretd ; return
+
+if DBG
+Db_NotATrapFrame:
+ add [esp]+TsDbgArgMark,0BADB0D00h ; put back the orig value
+Db_NotValidEntry:
+ int 3
+ jmp Db_A
+endif
+
+;
+; EXIT_HELPER
+;
+; if (PreviousMode == UserMode) {
+; DR* regs = TF.Dr* regs
+; }
+;
+; Entry-Conditions:
+;
+; DebugActive == TRUE
+; (ebp)->TrapFrame
+;
+;--
+
+align dword
+Dr_ExitHelp:
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz short x
+
+ test dword ptr [ebp]+TsSegCs,MODE_MASK
+ jz Dr_ExitHelp_Target
+
+x: mov esi,[ebp]+TsDr0
+ mov edi,[ebp]+TsDr1
+ mov ebx,[ebp]+TsDr2
+ mov dr0,esi
+ mov dr1,edi
+ mov dr2,ebx
+ mov esi,[ebp]+TsDr3
+ mov edi,[ebp]+TsDr6
+ mov ebx,[ebp]+TsDr7
+ mov dr3,esi
+ mov dr6,edi
+ mov dr7,ebx
+
+ jmp Dr_ExitHelp_Target
+
+align dword
+Abios_ExitHelp:
+
+;
+; INTERRUPT_STACK32_TO_STACK16
+;
+; This macro remaps current 32bit stack to 16bit stack at interrupt
+; time.
+;
+; Arguments:
+;
+; (esp)->TsEip.
+;
+;
+; PERFNOTE shielint We should check if there is any other H/W interrupt
+; pending. If yes, don't switch back to 16 bit stack. This way
+; we can get better performance.
+;
+
+ cmp word ptr [esp+2], 0 ; (esp+2) = Low word of error code
+ jz short Abios_ExitHelp_Target1
+ cmp word ptr [esp], 0 ; (esp) = High word of error code
+ jnz short Abios_ExitHelp_Target1
+
+ shr dword ptr [esp], 16
+ mov word ptr [esp + 2], KGDT_STACK16
+ lss sp, dword ptr [esp]
+ movzx esp, sp
+ jmp short Abios_ExitHelp_Target2
+
+;
+; Restore volatiles for V86 mode, and move seg regs
+;
+
+align dword
+V86_ExitHelp:
+
+ add esp,TsEdx
+ pop edx
+ pop ecx
+ pop eax
+ jmp V86_ExitHelp_Target
+
+;
+if ?RestoreAll eq 0
+;
+; Restore segs and volatiles for non-flat R3 PM (VDM in PM)
+;
+
+f: mov eax,[esp].TsEax ; restore eax before any selectors
+ ; (see trap0e handler)
+ add esp,TsSegGs
+
+ pop gs
+ pop es
+ pop ds
+
+ pop edx
+ pop ecx
+ jmp NonFlatPm_Target
+
+endif ; not ?RestoreAll
+
+
+;
+; TsSegCs contains the special value that means the frame was edited
+; in a way that affected esp, AND it's a kernel mode frame.
+; (Special value is null selector except for RPL.)
+;
+; Put back the real CS.
+; push eflags, eip onto target stack
+; restore
+; switch to target stack
+; iret
+;
+
+b: mov ebx,[esp]+TsTempSegCs
+ mov [esp]+TsSegCs,ebx
+
+;
+; There is no instruction that will load esp with an arbitrary value
+; (i.e. one out of a frame) and do a return, if no privledge transition
+; is occuring. Therefore, if we are returning to kernel mode, and
+; esp has been edited, we must "emulate" a kind of iretd.
+;
+; We do this by logically pushing the eip,cs,eflags onto the new
+; logical stack, loading that stack, and doing an iretd. This
+; requires that the new logical stack is at least 1 dword higher
+; than the unedited esp would have been. (i.e. It is not legal
+; to edit esp to have a new value < the old value.)
+;
+; KeContextToKframes enforces this rule.
+;
+
+;
+; Compute new logical stack address
+;
+
+ mov ebx,[esp]+TsTempEsp
+ sub ebx,12
+ mov [esp]+TsErrCode,ebx
+
+;
+; Copy eip,cs,eflags to new stack. note we do this high to low
+;
+
+ mov esi,[esp]+TsEflags
+ mov [ebx+8],esi
+ mov esi,[esp]+TsSegCs
+ mov [ebx+4],esi
+ mov esi,[esp]+TsEip
+ mov [ebx],esi
+
+;
+; Do a standard restore sequence.
+;
+; Observe that RestoreVolatiles is honored. Editing a volatile
+; register has no effect when returning from a system call.
+;
+ifb <NoRestoreVolatiles>
+ mov eax,[esp].TsEax
+endif
+; add esp,TsSegGs
+;
+;ifb <NoRestoreSegs>
+; pop gs
+; pop es
+; pop ds
+;else
+; add esp,12
+;endif
+
+ifb <NoRestoreVolatiles>
+ mov edx, [esp]+TsEdx
+ mov ecx, [esp]+TsEcx
+endif
+
+;ifnb <NoPreviousMode>
+; add esp, 4 ; Skip previous mode
+;else
+; pop ebx ; Restore PreviousMode
+; mov esi,fs:[PcPrcbData+PbCurrentThread]
+; mov ss:[esi]+ThPreviousMode,bl
+;endif
+;
+; pop ebx
+;
+; mov fs:[PcExceptionList], ebx ;Restore ExceptionList
+; pop fs
+
+ add esp, TsEdi
+ pop edi ; restore non-volatiles
+ pop esi
+ pop ebx
+ pop ebp
+
+;
+; (esp)->TsErrCode, where we saved the new esp
+;
+
+ mov esp,[esp] ; Do move not push to avoid increment
+ iretd
+
+ endm
+
+
+;++
+;
+; INTERRUPT_EXIT
+;
+; Macro Description:
+;
+; This macro is executed on return from an interrupt vector service
+; service routine. Its function is to restore privileged processor
+; state, and continue thread execution. If control is returning to
+; user mode and there is a user APC pending, then APC level interupt
+; will be requested and control is transfered to the user APC delivery
+; routine, if no higher level interrupt pending.
+;
+; Arguments:
+;
+; (TOS) = previous irql
+; (TOS+4) = irq vector to eoi
+; (TOS+8 ...) = machine_state frame
+; (ebp)-> machine state frame (trap frame)
+;
+;--
+
+INTERRUPT_EXIT macro DebugCheck
+local a
+
+ifnb <DebugCheck>
+ POLL_DEBUGGER
+endif
+if DBG ; save current eip for
+a: mov esi, offset a ; debugging bad trap frames
+endif
+
+ifdef __imp_Kei386EoiHelper@0
+ cli
+ call _HalEndSystemInterrupt@8
+ jmp dword ptr [__imp_Kei386EoiHelper@0]
+
+else
+ cli
+ call dword ptr [__imp__HalEndSystemInterrupt@8]
+ jmp Kei386EoiHelper@0
+endif
+endm
+
+
+;++
+;
+; SPURIOUS_INTERRUPT_EXIT
+;
+; Macro Description:
+;
+; To exit an interrupt without performing the EOI.
+;
+; Arguments:
+;
+; (TOS) = machine_state frame
+; (ebp)-> machine state frame (trap frame)
+;
+;--
+
+SPURIOUS_INTERRUPT_EXIT macro
+local a
+if DBG ; save current eip for
+a: mov esi, offset a ; debugging bad trap frames
+endif
+ifdef __imp_Kei386EoiHelper@0
+ jmp dword ptr [__imp_Kei386EoiHelper@0]
+else
+ jmp Kei386EoiHelper@0
+endif
+endm
+
+;++
+;
+; ENTER_TRAPV86
+;
+; Macro Description:
+;
+; Construct trap frame for v86 mode traps.
+;
+;--
+
+ENTER_TRAPV86 macro DRENTER,V86ENTER,LOADES
+ sub esp, TsErrCode
+ mov word ptr [esp].TsErrCode + 2, 0
+ mov [esp].TsEbx, ebx
+ mov [esp].TsEax, eax
+ mov [esp].TsEbp, ebp
+ mov [esp].TsEsi, esi
+ mov [esp].TsEdi, edi
+ mov ebx, KGDT_R0_PCR
+ mov eax, KGDT_R3_DATA OR RPL_MASK
+ mov [esp].TsEcx, ecx
+ mov [esp].TsEdx, edx
+if DBG
+ mov [esp].TsPreviousPreviousMode, -1
+ mov [esp]+TsDbgArgMark, 0BADB0D00h
+endif
+ mov fs, bx
+ mov ds, ax
+ifnb <LOADES>
+ mov es, ax
+endif
+ mov ebp, esp
+ cld ; CHECKIT_SUDEEP ; do we really need it
+ test byte ptr PCR[PcDebugActive], -1
+ jnz Dr_&DRENTER
+
+Dr_&V86ENTER:
+endm
+
+
+;
+; Taken from ntos\vdm\i386\vdmtb.inc
+;
+
+FIXED_NTVDMSTATE_LINEAR_PC_AT equ 0714H
+FIXED_NTVDMSTATE_LINEAR_PC_98 equ 0614H
+MACHINE_TYPE_MASK equ 0ff00H
+VDM_VIRTUAL_INTERRUPTS equ 0200H
+
+;++
+;
+; EXIT_TRAPV86
+;
+; Macro Description:
+;
+; if UserApc is pending deliver it
+; if User Context is v86 mode
+; Exit from kernel (does not return)
+; else
+; return (expected to execute EXIT_ALL)
+;--
+
+EXIT_TRAPV86 macro
+ local w, x, y, z
+
+z: mov ebx, PCR[PcPrcbData+PbCurrentThread]
+ mov byte ptr [ebx]+ThAlerted, 0
+ cmp byte ptr [ebx]+ThApcState.AsUserApcPending, 0
+ jne short w
+
+ ;
+ ; Kernel exit to V86 mode
+ ;
+
+ add esp,TsEdx
+ pop edx
+ pop ecx
+ pop eax
+ test byte ptr PCR[PcDebugActive], -1
+ jnz short x
+y:
+ add esp,12 ; unused fields
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+ add esp,4 ; clear error code
+ iretd
+
+x: mov esi,[ebp]+TsDr0
+ mov edi,[ebp]+TsDr1
+ mov ebx,[ebp]+TsDr2
+ mov dr0,esi
+ mov dr1,edi
+ mov dr2,ebx
+ mov esi,[ebp]+TsDr3
+ mov edi,[ebp]+TsDr6
+ mov ebx,[ebp]+TsDr7
+ mov dr3,esi
+ mov dr6,edi
+ mov dr7,ebx
+ jmp short y
+
+w:
+ ;
+ ; Dispatch user mode APC
+ ; The APC routine runs with interrupts on and at APC level
+ ;
+
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ stdCall _KiDeliverApc, <1, 0, ebp> ; ebp - Trap frame
+ ; 0 - Null exception frame
+ ; 1 - Previous mode
+
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+
+ cli
+
+ ;
+ ; UserApc may have changed to vdm Monitor context (user flat 32)
+ ; If it has cannot use the v86 only kernel exit
+ ;
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz short z
+
+ ; Exit to do EXIT_ALL
+endm
+
diff --git a/private/ntos/ke/i386/largepag.c b/private/ntos/ke/i386/largepag.c
new file mode 100644
index 000000000..22760cb6f
--- /dev/null
+++ b/private/ntos/ke/i386/largepag.c
@@ -0,0 +1,179 @@
+#include "ki.h"
+#include "ki386.h"
+
+#ifdef ALLOC_PRAGMA
+
+#pragma alloc_text(INIT,Ki386CreateIdentityMap)
+#pragma alloc_text(INIT,Ki386ClearIdentityMap)
+#pragma alloc_text(INIT,Ki386EnableTargetLargePage)
+
+#endif
+
+extern PVOID Ki386LargePageIdentityLabel;
+
+
+BOOLEAN
+Ki386CreateIdentityMap(
+ IN OUT PIDENTITY_MAP IdentityMap
+ )
+{
+/*++
+
+ This function creates a page directory and page tables such that the
+ address "Ki386LargePageIdentityLabel" is mapped with 2 different mappings.
+ The first mapping is the current kernel mapping being used for the label.
+ The second mapping is an identity mapping, such that the physical address
+ of "Ki386LargePageIdentityLabel" is also its linear address.
+ Both mappings are created for 2 code pages.
+
+ This function assumes that the mapping does not require 2 PDE entries.
+ This will happen only while mapping 2 pages from
+ "Ki386LargePageIdentityLabel", if we cross a 4 meg boundary.
+
+Arguments:
+ IdentityMap - Pointer to the structure which will be filled with the newly
+ created Page Directory address and physical = linear address
+ for the label "Ki386LargePageIdentityLabel". It also provides
+ storage for the pointers used in allocating and freeing the
+ memory.
+Return Value:
+
+ TRUE if the function succeeds, FALSE otherwise.
+
+ Note - Ki386ClearIdentityMap() should be called even on FALSE return to
+ free any memory allocated.
+
+--*/
+
+ PHYSICAL_ADDRESS PageDirPhysical, CurrentMapPTPhysical,
+ IdentityMapPTPhysical, IdentityLabelPhysical;
+ PHARDWARE_PTE Pte;
+ ULONG Index;
+
+ IdentityMap->IdentityMapPT = NULL; // set incase of failure
+ IdentityMap->CurrentMapPT = NULL; // set incase of failure
+
+ IdentityMap->PageDirectory = ExAllocatePool(NonPagedPool, PAGE_SIZE);
+ if (IdentityMap->PageDirectory == NULL ) {
+ return(FALSE);
+ }
+
+ // The Page Directory and page tables must be aligned to page boundaries.
+ ASSERT((((ULONG) IdentityMap->PageDirectory) & (PAGE_SIZE-1)) == 0);
+
+ IdentityMap->IdentityMapPT = ExAllocatePool(NonPagedPool, PAGE_SIZE);
+ if (IdentityMap->IdentityMapPT == NULL ) {
+ return(FALSE);
+ }
+
+ ASSERT((((ULONG) IdentityMap->IdentityMapPT) & (PAGE_SIZE-1)) == 0);
+
+ IdentityMap->CurrentMapPT = ExAllocatePool(NonPagedPool, PAGE_SIZE);
+ if (IdentityMap->CurrentMapPT == NULL ) {
+ return(FALSE);
+ }
+
+ ASSERT((((ULONG) IdentityMap->CurrentMapPT) & (PAGE_SIZE-1)) == 0);
+
+ PageDirPhysical = MmGetPhysicalAddress(IdentityMap->PageDirectory);
+ IdentityMapPTPhysical = MmGetPhysicalAddress(IdentityMap->IdentityMapPT);
+ CurrentMapPTPhysical = MmGetPhysicalAddress(IdentityMap->CurrentMapPT);
+ IdentityLabelPhysical = MmGetPhysicalAddress(&Ki386LargePageIdentityLabel);
+
+ if ( (PageDirPhysical.LowPart == 0) ||
+ (IdentityMapPTPhysical.LowPart == 0) ||
+ (CurrentMapPTPhysical.LowPart == 0) ||
+ (IdentityLabelPhysical.LowPart == 0) ) {
+ return(FALSE);
+ }
+
+ // Write the pfn address of current map for Ki386LargePageIdentityLabel in PDE
+ Index = KiGetPdeOffset(&Ki386LargePageIdentityLabel);
+ Pte = &IdentityMap->PageDirectory[Index];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = (CurrentMapPTPhysical.LowPart >> PAGE_SHIFT);
+ Pte->Valid = 1;
+
+ // Write the pfn address of current map for Ki386LargePageIdentityLabel in PTE
+ Index = KiGetPteOffset(&Ki386LargePageIdentityLabel);
+ Pte = &IdentityMap->CurrentMapPT[Index];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = (IdentityLabelPhysical.LowPart >> PAGE_SHIFT);
+ Pte->Valid = 1;
+
+ // Map a second page, just in case the code crosses a page boundary
+ Pte = &IdentityMap->CurrentMapPT[Index+1];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = ((IdentityLabelPhysical.LowPart >> PAGE_SHIFT) + 1);
+ Pte->Valid = 1;
+
+ // Write the pfn address of identity map for Ki386LargePageIdentityLabel in PDE
+ Index = KiGetPdeOffset(IdentityLabelPhysical.LowPart);
+ Pte = &IdentityMap->PageDirectory[Index];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = (IdentityMapPTPhysical.LowPart >> PAGE_SHIFT);
+ Pte->Valid = 1;
+
+ // Write the pfn address of identity map for Ki386LargePageIdentityLabel in PTE
+ Index = KiGetPteOffset(IdentityLabelPhysical.LowPart);
+ Pte = &IdentityMap->IdentityMapPT[Index];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = (IdentityLabelPhysical.LowPart >> PAGE_SHIFT);
+ Pte->Valid = 1;
+
+ // Map a second page, just in case the code crosses a page boundary
+ Pte = &IdentityMap->IdentityMapPT[Index+1];
+ *(PULONG)Pte = 0;
+ Pte->PageFrameNumber = ((IdentityLabelPhysical.LowPart >> PAGE_SHIFT) + 1);
+ Pte->Valid = 1;
+
+ IdentityMap->IdentityCR3 = PageDirPhysical.LowPart;
+ IdentityMap->IdentityLabel = IdentityLabelPhysical.LowPart;
+
+ return(TRUE);
+
+}
+
+
+VOID
+Ki386ClearIdentityMap(
+ IN PIDENTITY_MAP IdentityMap
+ )
+{
+/*++
+
+ This function just frees the page directory and page tables created in
+ Ki386CreateIdentityMap().
+
+--*/
+
+ if (IdentityMap->PageDirectory != NULL ) {
+
+ ExFreePool(IdentityMap->PageDirectory);
+ }
+
+ if (IdentityMap->IdentityMapPT != NULL ) {
+
+ ExFreePool(IdentityMap->IdentityMapPT);
+ }
+
+ if (IdentityMap->CurrentMapPT != NULL ) {
+
+ ExFreePool(IdentityMap->CurrentMapPT);
+ }
+}
+
+VOID
+Ki386EnableTargetLargePage(
+ IN PIDENTITY_MAP IdentityMap
+ )
+{
+/*++
+
+ This function just passes info on to the assembly routine
+ Ki386EnableLargePage().
+
+--*/
+
+ Ki386EnableCurrentLargePage(IdentityMap->IdentityLabel, IdentityMap->IdentityCR3);
+}
diff --git a/private/ntos/ke/i386/ldtsup.c b/private/ntos/ke/i386/ldtsup.c
new file mode 100644
index 000000000..3df09295f
--- /dev/null
+++ b/private/ntos/ke/i386/ldtsup.c
@@ -0,0 +1,392 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ ldtsup.c
+
+Abstract:
+
+ This module implements interfaces that support manipulation of i386 Ldts.
+ These entry points only exist on i386 machines.
+
+Author:
+
+ Bryan M. Willman (bryanwi) 14-May-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Low level assembler support procedures
+//
+
+VOID
+KiLoadLdtr(
+ VOID
+ );
+
+VOID
+KiFlushDescriptors(
+ VOID
+ );
+
+//
+// Local service procedures
+//
+
+VOID
+Ki386LoadTargetLdtr (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+Ki386FlushTargetDescriptors (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+Ke386SetLdtProcess (
+ IN PKPROCESS Process,
+ IN PLDT_ENTRY Ldt,
+ IN ULONG Limit
+ )
+/*++
+
+Routine Description:
+
+ The specified LDT (which may be null) will be made the active Ldt of
+ the specified process, for all threads thereof, on whichever
+ processors they are running. The change will take effect before the
+ call returns.
+
+ An Ldt address of NULL or a Limit of 0 will cause the process to
+ receive the NULL Ldt.
+
+ This function only exists on i386 and i386 compatible processors.
+
+ No checking is done on the validity of Ldt entries.
+
+
+ N.B.
+
+ While a single Ldt structure can be shared amoung processes, any
+ edits to the Ldt of one of those processes will only be synchronized
+ for that process. Thus, processes other than the one the change is
+ applied to may not see the change correctly.
+
+Arguments:
+
+ Process - Pointer to KPROCESS object describing the process for
+ which the Ldt is to be set.
+
+ Ldt - Pointer to an array of LDT_ENTRYs (that is, a pointer to an
+ Ldt.)
+
+ Limit - Ldt limit (must be 0 mod 8)
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KGDTENTRY LdtDescriptor;
+ BOOLEAN LocalProcessor;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Compute the contents of the Ldt descriptor
+ //
+
+ if ((Ldt == NULL) || (Limit == 0)) {
+
+ //
+ // Set up an empty descriptor
+ //
+
+ LdtDescriptor.LimitLow = 0;
+ LdtDescriptor.BaseLow = 0;
+ LdtDescriptor.HighWord.Bytes.BaseMid = 0;
+ LdtDescriptor.HighWord.Bytes.Flags1 = 0;
+ LdtDescriptor.HighWord.Bytes.Flags2 = 0;
+ LdtDescriptor.HighWord.Bytes.BaseHi = 0;
+
+ } else {
+
+ //
+ // Insure that the unfilled fields of the selector are zero
+ // N.B. If this is not done, random values appear in the high
+ // portion of the Ldt limit.
+ //
+
+ LdtDescriptor.HighWord.Bytes.Flags1 = 0;
+ LdtDescriptor.HighWord.Bytes.Flags2 = 0;
+
+ //
+ // Set the limit and base
+ //
+
+ LdtDescriptor.LimitLow = (USHORT) ((ULONG) Limit - 1);
+ LdtDescriptor.BaseLow = (USHORT) ((ULONG) Ldt & 0xffff);
+ LdtDescriptor.HighWord.Bytes.BaseMid = (UCHAR) (((ULONG)Ldt & 0xff0000) >> 16);
+ LdtDescriptor.HighWord.Bytes.BaseHi = (UCHAR) (((ULONG)Ldt & 0xff000000) >> 24);
+
+ //
+ // Type is LDT, DPL = 0
+ //
+
+ LdtDescriptor.HighWord.Bits.Type = TYPE_LDT;
+ LdtDescriptor.HighWord.Bits.Dpl = DPL_SYSTEM;
+
+ //
+ // Make it present
+ //
+
+ LdtDescriptor.HighWord.Bits.Pres = 1;
+
+ }
+
+ //
+ // Acquire the context swap lock so a context switch cannot occur.
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+ //
+ // Set the Ldt fields in the process object.
+ //
+
+ Process->LdtDescriptor = LdtDescriptor;
+
+ //
+ // Tell all processors active for this process to reload their LDTs
+ //
+
+#ifdef NT_UP
+
+ KiLoadLdtr();
+
+#else
+
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = Process->ActiveProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ Ki386LoadTargetLdtr,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+ KiLoadLdtr();
+ if (TargetProcessors != 0) {
+
+ //
+ // Stall until target processor(s) release us
+ //
+
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Restore IRQL and release the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ return;
+}
+
+VOID
+Ki386LoadTargetLdtr (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+/*++
+
+Routine Description:
+
+ Reload local Ldt register and clear signal bit in TargetProcessor mask
+
+Arguments:
+
+ Argument - pointer to a ipi packet structure.
+ ReadyFlag - Pointer to flag to be set once LDTR has been reloaded
+
+Return Value:
+
+ none.
+
+--*/
+{
+
+ //
+ // Reload the LDTR register from currently active process object
+ //
+
+ KiLoadLdtr();
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+VOID
+Ke386SetDescriptorProcess (
+ IN PKPROCESS Process,
+ IN ULONG Offset,
+ IN LDT_ENTRY LdtEntry
+ )
+/*++
+
+Routine Description:
+
+ The specified LdtEntry (which could be 0, not present, etc) will be
+ edited into the specified Offset in the Ldt of the specified Process.
+ This will be synchronzied accross all the processors executing the
+ process. The edit will take affect on all processors before the call
+ returns.
+
+ N.B.
+
+ Editing an Ldt descriptor requires stalling all processors active
+ for the process, to prevent accidental loading of descriptors in
+ an inconsistent state.
+
+Arguments:
+
+ Process - Pointer to KPROCESS object describing the process for
+ which the descriptor edit is to be performed.
+
+ Offset - Byte offset into the Ldt of the descriptor to edit.
+ Must be 0 mod 8.
+
+ LdtEntry - Value to edit into the descriptor in hardware format.
+ No checking is done on the validity of this item.
+
+Return Value:
+
+ none.
+
+--*/
+
+{
+
+ PLDT_ENTRY Ldt;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Compute address of descriptor to edit.
+ //
+
+ Ldt =
+ (PLDT_ENTRY)
+ ((Process->LdtDescriptor.HighWord.Bytes.BaseHi << 24) |
+ ((Process->LdtDescriptor.HighWord.Bytes.BaseMid << 16) & 0xff0000) |
+ (Process->LdtDescriptor.BaseLow & 0xffff));
+ Offset = Offset / 8;
+ MmLockPagedPool(&Ldt[Offset], sizeof(LDT_ENTRY));
+ KiLockContextSwap(&OldIrql);
+
+#ifdef NT_UP
+
+ //
+ // Edit the Ldt.
+ //
+
+ Ldt[Offset] = LdtEntry;
+
+#else
+
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = Process->ActiveProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ Ki386FlushTargetDescriptors,
+ (PVOID)&Prcb->ReverseStall,
+ NULL,
+ NULL);
+
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // All target processors have flushed the segment descriptors and
+ // are waiting to proceed. Edit the ldt on the current processor,
+ // then continue the execution of target processors.
+ //
+
+ Ldt[Offset] = LdtEntry;
+ if (TargetProcessors != 0) {
+ Prcb->ReverseStall += 1;
+ }
+
+#endif
+
+ //
+ // Restore IRQL and release the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ MmUnlockPagedPool(&Ldt[Offset], sizeof(LDT_ENTRY));
+ return;
+}
+
+VOID
+Ki386FlushTargetDescriptors (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Proceed,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the segment descriptors on the current processor.
+
+Arguments:
+
+ Argument - pointer to a _KIPI_FLUSH_DESCRIPTOR structure.
+
+ ReadyFlag - pointer to flag to syncroize with
+
+Return Value:
+
+ none.
+
+--*/
+
+{
+ //
+ // Flush the segment descriptors on the current processor and signal that
+ // the descriptors have been flushed.
+ //
+
+ KiFlushDescriptors();
+ KiIpiSignalPacketDoneAndStall (SignalDone, Proceed);
+ return;
+}
diff --git a/private/ntos/ke/i386/ldtsup2.asm b/private/ntos/ke/i386/ldtsup2.asm
new file mode 100644
index 000000000..5ef9fcd43
--- /dev/null
+++ b/private/ntos/ke/i386/ldtsup2.asm
@@ -0,0 +1,164 @@
+ title "Ldt Support 2 - Low Level"
+;++
+;
+; Copyright (c) 1991 Microsoft Corporation
+;
+; Module Name:
+;
+; ldtsup2.asm
+;
+; Abstract:
+;
+; This module implements procedures to load a new ldt and to flush
+; segment descriptors.
+;
+; Author:
+;
+; Bryan M. Willman (bryanwi) 14-May-1991
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include callconv.inc
+ .list
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+;++
+;
+; VOID
+; KiLoadLdtr(
+; VOID
+; )
+;
+; Routine Description:
+;
+; This routine copies the Ldt descriptor image out of the currently
+; executing process object into the Ldt descriptor, and reloads the
+; the Ldt descriptor into the Ldtr. The effect of this is to provide
+; a new Ldt.
+;
+; If the Ldt descriptor image has a base or limit of 0, then NULL will
+; be loaded into the Ldtr, and no copy to the Gdt will be done.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiLoadLdtr, 0
+
+ push esi
+ push edi
+
+ mov eax,fs:PcPrcbData+PbCurrentThread ; (eax)->CurrentThread
+ mov eax,[eax]+(ThApcState+AsProcess) ; (eax)->CurrentProcess
+
+ lea esi,[eax]+PrLdtDescriptor ; (esi)->Ldt value
+ xor dx,dx ; assume null value
+ cmp word ptr [esi],0 ; limit == 0?
+ jz kill10 ; yes limit 0, go load null
+
+;
+; We have a non-null Ldt Descriptor, copy it into the Gdt
+;
+
+ mov edi,fs:PcGdt
+ add edi,KGDT_LDT ; (edi)->Ldt descriptor
+
+ movsd
+ movsd ; descrip. now matches value
+
+ mov dx,KGDT_LDT
+
+kill10: lldt dx
+
+ pop edi
+ pop esi
+
+ stdCall _KiFlushDescriptors
+
+ stdRET _KiLoadLdtr
+
+stdENDP _KiLoadLdtr
+
+
+
+;++
+;
+; VOID
+; KiFlushDescriptors(
+; VOID
+; )
+;
+; Routine Description:
+;
+; Flush the in-processor descriptor registers for the segment registers.
+; We do this by reloading each segment register.
+;
+; N.B.
+;
+; This procedure is only intended to support Ldt operations.
+; It does not support operations on the Gdt. In particular,
+; neither it nor Ke386SetDescriptorProcess are appropriate for
+; editing descriptors used by 16bit kernel code (i.e. ABIOS.)
+;
+; Since we are in kernel mode, we know that CS and SS do NOT
+; contain Ldt selectors, any such selectors will be save/restored
+; by the interrupt that brought us here from user space.
+;
+; Since we are in kernel mode, DS must contain a flat GDT descriptor,
+; since all entry sequences would have forced a reference to it.
+;
+; Since we are in kernel mode, FS points to the PCR, since all
+; entry sequences force it to.
+;
+; Therefore, only ES and GS need to be flushed.
+;
+; Since no inline kernel code ever uses GS, we know it will be
+; restored from a frame of some caller, or nobody cares. Therefore,
+; we load null into GS. (Fastest possible load.)
+;
+; ES is restored to KGDT_R3_DATA, because kernel exit will not restore
+; it for us. If we do not put the correct value in ES, we may wind
+; up with zero in ES in user mode.
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiFlushDescriptors ,0
+
+ xor ax,ax
+ mov gs,ax
+ push ds
+ pop es
+ stdRET _KiFlushDescriptors
+
+stdENDP _KiFlushDescriptors
+
+
+_TEXT$00 ends
+ end
+
diff --git a/private/ntos/ke/i386/mi.inc b/private/ntos/ke/i386/mi.inc
new file mode 100644
index 000000000..93678d6f9
--- /dev/null
+++ b/private/ntos/ke/i386/mi.inc
@@ -0,0 +1,43 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; MI.INC
+;
+; Abstract:
+;
+; This module contains equates for x86 machine instructions
+;
+; Author:
+;
+; Dave Hastings 2 May 1991
+;
+; Notes:
+;
+; This information used to reside in Trap.asm, but is now needed in
+; multiple source files.
+;
+; Revision History:
+;--
+
+
+MAX_INSTRUCTION_LENGTH EQU 15
+MAX_INSTRUCTION_PREFIX_LENGTH EQU 4
+MI_LOCK_PREFIX EQU 0F0H
+MI_ADDR_PREFIX EQU 067H
+MI_TWO_BYTE EQU 0FH
+MI_HLT EQU 0F4H
+MI_LTR_LLDT EQU 0
+MI_LGDT_LIDT_LMSW EQU 01H
+MI_MODRM_MASK EQU 38H
+MI_LLDT_MASK EQU 10h
+MI_LTR_MASK EQU 18H
+MI_LGDT_MASK EQU 10H
+MI_LIDT_MASK EQU 18H
+MI_LMSW_MASK EQU 30H
+MI_SPECIAL_MOV_MASK EQU 20H
+MI_REP_INS_OUTS EQU 0F3H
+MI_MIN_INS_OUTS EQU 06CH
+MI_MAX_INS_OUTS EQU 06FH
diff --git a/private/ntos/ke/i386/misc.c b/private/ntos/ke/i386/misc.c
new file mode 100644
index 000000000..7449e7a74
--- /dev/null
+++ b/private/ntos/ke/i386/misc.c
@@ -0,0 +1,164 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ misc.c
+
+Abstract:
+
+ This module implements machine dependent miscellaneous kernel functions.
+
+Author:
+
+ Ken Reneris 7-5-95
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE,KeSaveFloatingPointState)
+#pragma alloc_text(PAGE,KeRestoreFloatingPointState)
+#endif
+
+NTSTATUS
+KeSaveFloatingPointState (
+ OUT PKFLOATING_SAVE FloatSave
+ )
+/*++
+
+Routine Description:
+
+ This routine saves the thread's current non-volatile NPX state,
+ and sets a new initial floating point state for the caller.
+
+Arguments:
+
+ FloatSave - receives the current non-volatile npx state for the thread
+
+Return Value:
+
+--*/
+{
+ PKTHREAD Thread;
+ PFLOATING_SAVE_AREA NpxFrame;
+
+ PAGED_CODE ();
+
+ Thread = KeGetCurrentThread();
+ NpxFrame = (PFLOATING_SAVE_AREA)(((ULONG)(Thread->InitialStack) -
+ sizeof(FLOATING_SAVE_AREA)));
+
+ //
+ // If the system is using floating point emulation, then
+ // return an error
+ //
+
+ if (!KeI386NpxPresent) {
+ return STATUS_ILLEGAL_FLOAT_CONTEXT;
+ }
+
+ //
+ // Ensure the thread's current NPX state is in memory
+ //
+
+ KiFlushNPXState ();
+
+ //
+ // Save the non-volatile portion of the thread's NPX state
+ //
+
+ FloatSave->ControlWord = NpxFrame->ControlWord;
+ FloatSave->StatusWord = NpxFrame->StatusWord;
+ FloatSave->ErrorOffset = NpxFrame->ErrorOffset;
+ FloatSave->ErrorSelector = NpxFrame->ErrorSelector;
+ FloatSave->DataOffset = NpxFrame->DataOffset;
+ FloatSave->DataSelector = NpxFrame->DataSelector;
+ FloatSave->Cr0NpxState = NpxFrame->Cr0NpxState;
+
+ //
+ // Load new initial floating point state
+ //
+
+ NpxFrame->ControlWord = 0x27f; // like fpinit but 64bit mode
+ NpxFrame->StatusWord = 0;
+ NpxFrame->TagWord = 0xffff;
+ NpxFrame->ErrorOffset = 0;
+ NpxFrame->ErrorSelector = 0;
+ NpxFrame->DataOffset = 0;
+ NpxFrame->DataSelector = 0;
+ NpxFrame->Cr0NpxState = 0;
+
+ return STATUS_SUCCESS;
+}
+
+
+NTSTATUS
+KeRestoreFloatingPointState (
+ IN PKFLOATING_SAVE FloatSave
+ )
+/*++
+
+Routine Description:
+
+ This routine retores the thread's current non-volatile NPX state,
+ to the passed in state.
+
+Arguments:
+
+ FloatSave - the non-volatile npx state for the thread to restore
+
+Return Value:
+
+--*/
+{
+ PKTHREAD Thread;
+ PFLOATING_SAVE_AREA NpxFrame;
+
+ PAGED_CODE ();
+ ASSERT (KeI386NpxPresent);
+
+ Thread = KeGetCurrentThread();
+ NpxFrame = (PFLOATING_SAVE_AREA)(((ULONG)(Thread->InitialStack) -
+ sizeof(FLOATING_SAVE_AREA)));
+
+ if (FloatSave->Cr0NpxState & ~(CR0_PE|CR0_MP|CR0_EM|CR0_TS)) {
+ ASSERT (FALSE);
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ //
+ // Ensure the thread's current NPX state is in memory
+ //
+
+ KiFlushNPXState ();
+
+ //
+ // Restore the non-volatile portion of the thread's NPX state
+ //
+
+ NpxFrame->ControlWord = FloatSave->ControlWord;
+ NpxFrame->StatusWord = FloatSave->StatusWord;
+ NpxFrame->ErrorOffset = FloatSave->ErrorOffset;
+ NpxFrame->ErrorSelector = FloatSave->ErrorSelector;
+ NpxFrame->DataOffset = FloatSave->DataOffset;
+ NpxFrame->DataSelector = FloatSave->DataSelector;
+ NpxFrame->Cr0NpxState = FloatSave->Cr0NpxState;
+ FloatSave->Cr0NpxState = 0xffffffff;
+
+ //
+ // Clear the volatile floating point state
+ //
+
+ NpxFrame->TagWord = 0xffff;
+
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/ke/i386/mpipia.asm b/private/ntos/ke/i386/mpipia.asm
new file mode 100644
index 000000000..8d86d86a2
--- /dev/null
+++ b/private/ntos/ke/i386/mpipia.asm
@@ -0,0 +1,435 @@
+ title "mpipia"
+;++
+;
+; Copyright (c) 1989-1995 Microsoft Corporation
+;
+; Module Name:
+;
+; mpipia.asm
+;
+; Abstract:
+;
+; This module implements the x86 specific fucntions required to
+; support multiprocessor systems.
+;
+; Author:
+;
+; David N. Cutler (davec) 5-Feb-1995
+;
+; Environment:
+;
+; Krnel mode only.
+;
+; Revision History:
+;
+;--
+
+.486p
+ .xlist
+include ks386.inc
+include mac386.inc
+include callconv.inc
+ .list
+
+ EXTRNP HalRequestSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP HalRequestSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP _HalRequestIpi,1,IMPORT
+ EXTRNP _KiFreezeTargetExecution, 2
+ifdef DBGMP
+ EXTRNP _KiPollDebugger
+endif
+ extrn _KiProcessorBlock:DWORD
+
+
+DELAYCOUNT equ 2000h
+
+
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+;
+; BOOLEAN
+; KiIpiServiceRoutine (
+; IN PKTRAP_FRAME TrapFrame,
+; IN PKEXCEPTION_FRAME ExceptionFrame
+; )
+;
+; Routine Description:
+;
+; This routine is called at IPI level to process any outstanding
+; interporcessor requests for the current processor.
+;
+; Arguments:
+;
+; TrapFrame - Supplies a pointer to a trap frame.
+;
+; ExceptionFrame - Not used.
+;
+; Return Value:
+;
+; A value of TRUE is returned, if one of more requests were service.
+; Otherwise, FALSE is returned.
+;
+;--
+
+cPublicProc _KiIpiServiceRoutine, 2
+
+ifndef NT_UP
+cPublicFpo 2, 1
+ push ebx
+
+ mov ecx, PCR[PcPrcb] ; get current processor block address
+
+ xor ebx, ebx ; get request summary flags
+ cmp [ecx].PbRequestSummary, ebx
+ jz short isr10
+
+ xchg [ecx].PbRequestSummary, ebx
+
+;
+; Check for freeze request.
+;
+
+ test bl, IPI_FREEZE ; check if freeze requested
+ jnz short isr50 ; if nz, freeze requested
+
+;
+; For RequestSummary's other then IPI_FREEZE set return to TRUE
+;
+
+ mov bh, 1
+
+;
+; Check for Packet ready.
+;
+; If a packet is ready, then get the address of the requested function
+; and call the function passing the address of the packet address as a
+; parameter.
+;
+
+isr10: mov eax, [ecx].PbSignalDone ; get source processor block address
+ or eax, eax ; check if packet ready
+ jz short isr20 ; if z set, no packet ready
+
+ mov edx, [esp + 8] ; Current trap frame
+
+ push [eax].PbCurrentPacket + 8 ; push parameters on stack
+ push [eax].PbCurrentPacket + 4 ;
+ push [eax].PbCurrentPacket + 0 ;
+ push eax ; push source processor block address
+ mov eax, [eax]+PbWorkerRoutine ; get worker routine address
+ mov [ecx].PbSignalDone, 0 ; clear packet address
+ mov [ecx].PbIpiFrame, edx ; Save frame address
+ call eax ; call worker routine
+ mov bh, 1 ; return TRUE
+
+;
+; Check for APC interrupt request.
+;
+
+isr20: test bl, IPI_APC ; check if APC interrupt requested
+ jz short isr30 ; if z, APC interrupt not requested
+
+ mov ecx, APC_LEVEL ; request APC interrupt
+ fstCall HalRequestSoftwareInterrupt
+
+;
+; Check for DPC interrupt request.
+;
+
+isr30: test bl, IPI_DPC ; check if DPC interrupt requested
+ jz short isr40 ; if z, DPC interrupt not requested
+
+ mov ecx, DISPATCH_LEVEL ; request DPC interrupt
+ fstCall HalRequestSoftwareInterrupt ;
+
+isr40: mov al, bh ; return status
+ pop ebx
+ stdRET _KiIpiServiceRoutine
+
+;
+; Freeze request is requested
+;
+
+isr50: mov ecx, [esp] + 12 ; get exception frame address
+ mov edx, [esp] + 8 ; get trap frame address
+ stdCall _KiFreezeTargetExecution, <edx, ecx> ; freeze execution
+ mov ecx, PCR[PcPrcb] ; get current processor block address
+ test bl, not IPI_FREEZE ; Any other IPI RequestSummary?
+ setnz bh ; Set return code accordingly
+ jmp short isr10
+else
+ xor eax, eax ; return FALSE
+ stdRET _KiIpiServiceRoutine
+endif
+
+stdENDP _KiIpiServiceRoutine
+
+;++
+;
+; VOID
+; FASTCALL
+; KiIpiSend (
+; IN KAFFINITY TargetProcessors,
+; IN KIPI_REQUEST Request
+; )
+;
+; Routine Description:
+;
+; This function requests the specified operation on the targt set of
+; processors.
+;
+; Arguments:
+;
+; TargetProcessors (ecx) - Supplies the set of processors on which the
+; specified operation is to be executed.
+;
+; IpiRequest (edx) - Supplies the request operation code.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicFastCall KiIpiSend, 2
+
+ifndef NT_UP
+
+cPublicFpo 0, 2
+ push esi ; save registers
+ push edi ;
+ mov esi, ecx ; save target processor set
+
+ shr ecx, 1 ; shift out first bit
+ lea edi, _KiProcessorBlock ; get processor block array address
+ jnc short is20 ; if nc, not in target set
+
+is10: mov eax, [edi] ; get processor block address
+ lock or [eax].PbRequestSummary, edx ; set request summary bit
+
+is20: shr ecx, 1 ; shift out next bit
+ lea edi, [edi+4] ; advance to next processor
+ jc short is10 ; if target, go set summary bit
+ jnz short is20 ; if more, check next
+
+ stdCall _HalRequestIpi, <esi> ; request IPI interrupts on targets
+
+ pop edi ; restore registers
+ pop esi ;
+endif
+ fstRet KiIpiSend
+
+fstENDP KiIpiSend
+
+;++
+;
+; VOID
+; KiIpiSendPacket (
+; IN KAFFINITY TargetProcessors,
+; IN PKIPI_WORKER WorkerFunction,
+; IN PVOID Parameter1,
+; IN PVOID Parameter2,
+; IN PVOID Parameter3
+; )
+;
+; Routine Description:
+;
+; This routine executes the specified worker function on the specified
+; set of processors.
+;
+; Arguments:
+;
+; TargetProcessors [esp + 4] - Supplies the set of processors on which the
+; specfied operation is to be executed.
+;
+; WorkerFunction [esp + 8] - Supplies the address of the worker function.
+;
+; Parameter1 - Parameter3 [esp + 12] - Supplies worker function specific
+; paramters.
+;
+; Return Value:
+;
+; None.
+;
+;--*/
+
+cPublicProc _KiIpiSendPacket, 5
+
+ifndef NT_UP
+
+cPublicFpo 5, 2
+ push esi ; save registers
+ push edi ;
+
+;
+; Store function address and parameters in the packet area of the PRCB on
+; the current processor.
+;
+
+ mov edx, PCR[PcPrcb] ; get current processor block address
+ mov ecx, [esp] + 12 ; set target processor set
+ mov eax, [esp] + 16 ; set worker function address
+ mov edi, [esp] + 20 ; store worker function parameters
+ mov esi, [esp] + 24 ;
+
+ mov [edx].PbTargetSet, ecx
+ mov [edx].PbWorkerRoutine, eax
+
+ mov eax, [esp] + 28
+ mov [edx].PbCurrentPacket, edi
+ mov [edx].PbCurrentPacket + 4, esi
+ mov [edx].PbCurrentPacket + 8, eax
+
+;
+; Loop through the target processors and send the packet to the specified
+; recipients.
+;
+
+ shr ecx, 1 ; shift out first bit
+ lea edi, _KiProcessorBlock ; get processor block array address
+ jnc short isp30 ; if nc, not in target set
+isp10: mov esi, [edi] ; get processor block address
+isp20: mov eax, [esi].PbSignalDone ; check if packet being processed
+ or eax, eax ;
+ jne short isp20 ; if ne, packet being processed
+
+ lock cmpxchg [esi].PbSignalDone, edx ; compare and exchange
+
+ jnz short isp20 ; if nz, exchange failed
+
+isp30: shr ecx, 1 ; shift out next bit
+ lea edi, [edi+4] ; advance to next processor
+ jc short isp10 ; if c, in target set
+ jnz short isp30 ; if nz, more target processors
+
+ mov ecx, [esp] + 12 ; set target processor set
+ stdCall _HalRequestIpi, <ecx> ; send IPI to targets
+
+ pop edi ; restore register
+ pop esi ;
+endif
+
+ stdRet _KiIpiSendPacket
+
+stdENDP _KiIpiSendPacket
+
+;++
+;
+; VOID
+; FASTCALL
+; KiIpiSignalPacketDone (
+; IN PKIPI_CONTEXT Signaldone
+; )
+;
+; Routine Description:
+;
+; This routine signals that a processor has completed a packet by
+; clearing the calling processor's set member of the requesting
+; processor's packet.
+;
+; Arguments:
+;
+; SignalDone (ecx) - Supplies a pointer to the processor block of the
+; sending processor.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicFastCall KiIpiSignalPacketDone, 1
+
+ifndef NT_UP
+
+ mov edx, PCR[PcPrcb] ; get current processor block address
+ mov eax, [edx].PbSetMember ; get processor bit
+
+ lock xor [ecx].PbTargetSet, eax ; clear processor set member
+endif
+ fstRET KiIpiSignalPacketDone
+
+fstENDP KiIpiSignalPacketDone
+
+
+;++
+;
+; VOID
+; FASTCALL
+; KiIpiSignalPacketDoneAndStall (
+; IN PKIPI_CONTEXT Signaldone
+; IN PULONG ReverseStall
+; )
+;
+; Routine Description:
+;
+; This routine signals that a processor has completed a packet by
+; clearing the calling processor's set member of the requesting
+; processor's packet, and then stalls of the reverse stall value
+;
+; Arguments:
+;
+; SignalDone (ecx) - Supplies a pointer to the processor block of the
+; sending processor.
+;
+; ReverseStall (edx) - Supplies a pointer to the reverse stall barrier
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicFastCall KiIpiSignalPacketDoneAndStall, 2
+cPublicFpo 0, 2
+
+ifndef NT_UP
+ push ebx
+ push esi
+
+ mov esi, PCR[PcPrcb] ; get current processor block address
+ mov eax, [esi].PbSetMember ; get processor bit
+ mov ebx, dword ptr [edx] ; get current value of barrier
+
+ lock xor [ecx].PbTargetSet, eax ; clear processor set member
+
+sps10: mov eax, DELAYCOUNT
+sps20: cmp ebx, dword ptr [edx] ; barrier set?
+ jne short sps90 ; yes, all done
+
+ dec eax ; P54C pre C2 workaround
+ jnz short sps20 ; if eax = 0, generate bus cycle
+
+ifdef DBGMP
+ stdCall _KiPollDebugger ; Check for debugger ^C
+endif
+
+;
+; There could be a freeze execution outstanding. Check and clear
+; freeze flag.
+;
+
+.errnz IPI_FREEZE - 4
+ lock btr [esi].PbRequestSummary, 2 ; Generate bus cycle
+ jnc short sps10 ; Freeze pending?
+
+cPublicFpo 0,4
+ push ecx ; save TargetSet address
+ push edx
+ stdCall _KiFreezeTargetExecution, <[esi].PbIpiFrame, 0>
+ pop edx
+ pop ecx
+ jmp short sps10
+
+sps90: pop esi
+ pop ebx
+endif
+ fstRET KiIpiSignalPacketDoneAndStall
+
+fstENDP KiIpiSignalPacketDoneAndStall
+
+_TEXT ends
+ end
diff --git a/private/ntos/ke/i386/mtrr.c b/private/ntos/ke/i386/mtrr.c
new file mode 100644
index 000000000..0156cf908
--- /dev/null
+++ b/private/ntos/ke/i386/mtrr.c
@@ -0,0 +1,1887 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ mtrr.c
+
+Abstract:
+
+ This module implements interfaces that support manipulation of
+ memory range type registers.
+
+ These entry points only exist on i386 machines.
+
+Author:
+
+ Ken Reneris (kenr) 11-Oct-95
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#include "mtrr.h"
+
+#define STATIC
+#define IDBG DBG
+
+#if DBG
+#define DBGMSG(a) DbgPrint(a)
+#else
+#define DBGMSG(a)
+#endif
+
+//
+// Internal declarations
+//
+
+//
+// Range in generic terms
+//
+
+typedef struct _ONE_RANGE {
+ ULONGLONG Base;
+ ULONGLONG Limit;
+ UCHAR Type;
+} ONE_RANGE, *PONE_RANGE;
+
+#define GROW_RANGE_TABLE 4
+
+//
+// Range in specific mtrr terms
+//
+
+typedef struct _MTRR_RANGE {
+ MTRR_VARIABLE_BASE Base;
+ MTRR_VARIABLE_MASK Mask;
+} MTRR_RANGE, *PMTRR_RANGE;
+
+//
+// System static information concerning cached range types
+//
+
+typedef struct _RANGE_INFO {
+
+ //
+ // Global MTRR info
+ //
+
+ MTRR_DEFAULT Default; // h/w mtrr default
+ MTRR_CAPABILITIES Capabilities; // h/w mtrr Capabilities
+ UCHAR DefaultCachedType; // default type for MmCached
+
+ //
+ // Variable MTRR information
+ //
+
+ BOOLEAN RangesValid; // Ranges initialized and valid.
+ BOOLEAN MtrrWorkaround; // Work Around needed/not.
+ UCHAR NoRange; // No ranges currently in Ranges
+ UCHAR MaxRange; // Max size of Ranges
+ PONE_RANGE Ranges; // Current ranges as set into h/w
+
+} RANGE_INFO, *PRANGE_INFO;
+
+
+//
+// Structure used while processing range database
+//
+
+typedef struct _NEW_RANGE {
+ //
+ // Current Status
+ //
+
+ NTSTATUS Status;
+
+ //
+ // Generic info on new range
+ //
+
+ ULONGLONG Base;
+ ULONGLONG Limit;
+ UCHAR Type;
+
+ //
+ // MTRR image to be set into h/w
+ //
+
+ PMTRR_RANGE MTRR;
+
+ //
+ // RangeDatabase before edits were started
+ //
+
+ UCHAR NoRange;
+ PONE_RANGE Ranges;
+
+ //
+ // IPI context to coordinate concurrent processor update
+ //
+
+ ULONG NoMTRR;
+ ULONG Processor;
+ volatile ULONG TargetCount;
+ volatile ULONG *TargetPhase;
+
+} NEW_RANGE, *PNEW_RANGE;
+
+//
+// Prototypes
+//
+
+VOID
+KiInitializeMTRR (
+ IN BOOLEAN LastProcessor
+ );
+
+BOOLEAN
+KiRemoveRange (
+ IN PNEW_RANGE NewRange,
+ IN ULONGLONG Base,
+ IN ULONGLONG Limit,
+ IN PBOOLEAN RemoveThisType
+ );
+
+VOID
+KiAddRange (
+ IN PNEW_RANGE NewRange,
+ IN ULONGLONG Base,
+ IN ULONGLONG Limit,
+ IN UCHAR Type
+ );
+
+VOID
+KiStartEffectiveRangeChange (
+ IN PNEW_RANGE NewRange
+ );
+
+VOID
+KiCompleteEffectiveRangeChange (
+ IN PNEW_RANGE NewRange
+ );
+
+STATIC ULONG
+KiRangeWeight (
+ IN PONE_RANGE Range
+ );
+
+STATIC ULONG
+KiFindFirstSetLeftBit (
+ IN ULONGLONG Set
+ );
+
+STATIC ULONG
+KiFindFirstSetRightBit (
+ IN ULONGLONG Set
+ );
+
+
+VOID
+KiLoadMTRRTarget (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Context,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+NTSTATUS
+KiLoadMTRR (
+ IN PNEW_RANGE Context
+ );
+
+VOID
+KiSynchronizeMTRRLoad (
+ IN PNEW_RANGE Context
+ );
+
+ULONGLONG
+KiMaskToLength (
+ IN ULONGLONG Mask
+ );
+
+ULONGLONG
+KiLengthToMask (
+ IN ULONGLONG Length
+ );
+
+#if IDBG
+VOID
+KiDumpMTRR (
+ PUCHAR DebugString,
+ PMTRR_RANGE MTRR
+ );
+#endif
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(INIT,KiInitializeMTRR)
+#pragma alloc_text(PAGELK,KiRemoveRange)
+#pragma alloc_text(PAGELK,KiAddRange)
+#pragma alloc_text(PAGELK,KiStartEffectiveRangeChange)
+#pragma alloc_text(PAGELK,KiCompleteEffectiveRangeChange)
+#pragma alloc_text(PAGELK,KiRangeWeight)
+#pragma alloc_text(PAGELK,KiFindFirstSetLeftBit)
+#pragma alloc_text(PAGELK,KiFindFirstSetRightBit)
+#pragma alloc_text(PAGELK,KiLoadMTRR)
+#pragma alloc_text(PAGELK,KiLoadMTRRTarget)
+#pragma alloc_text(PAGELK,KiSynchronizeMTRRLoad)
+#pragma alloc_text(PAGELK,KiLengthToMask)
+#pragma alloc_text(PAGELK,KiMaskToLength)
+
+#if IDBG
+#pragma alloc_text(PAGELK,KiDumpMTRR)
+#endif
+
+#endif
+
+//
+// KiRangeLock - Used to synchronize accesses to KiRangeInfo
+//
+
+KSPIN_LOCK KiRangeLock;
+
+//
+// KiRangeInfo - Range type mapping inforation. Details specific h/w support
+// and contains the current range database of how physical
+// addresses have been set
+
+RANGE_INFO KiRangeInfo;
+
+
+BOOLEAN bUseFrameBufferCaching;
+
+VOID
+KiInitializeMTRR (
+ IN BOOLEAN LastProcessor
+ )
+/*++
+
+Routine Description:
+
+ Called to incrementally initialize the physical range
+ database feature. First processor's MTRR set is read into the
+ physical range database.
+
+Arguments:
+
+ LastProcessor - If set this is the last processor to execute this routine
+ such that when this processor finishes, the initialization is complete.
+
+Return Value:
+
+ None - if there was a problem the function
+ KeSetPhysicalCacheTypeRange type is disabled.
+
+--*/
+{
+ BOOLEAN Status;
+ ULONG Index, Size;
+ MTRR_DEFAULT Default;
+ MTRR_CAPABILITIES Capabilities;
+ NEW_RANGE NewRange;
+ MTRR_VARIABLE_BASE MtrrBase;
+ MTRR_VARIABLE_MASK MtrrMask;
+ ULONGLONG Base, Mask, Length;
+ BOOLEAN RemoveThisType[MTRR_TYPE_MAX];
+ NTSTATUS NtStatus;
+ PKPRCB Prcb;
+
+ Status = TRUE;
+ RtlZeroMemory (&NewRange, sizeof (NewRange));
+ NewRange.Status = STATUS_UNSUCCESSFUL;
+
+ //
+ // If this is the first processor, initialize some fields
+ //
+
+ if (KeGetPcr()->Number == 0) {
+ KeInitializeSpinLock (&KiRangeLock);
+
+ KiRangeInfo.Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES);
+ KiRangeInfo.Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
+ KiRangeInfo.DefaultCachedType = MTRR_TYPE_MAX;
+
+ //
+ // If h/w mtrr support is not enabled, disable OS support
+ //
+
+ if (!KiRangeInfo.Default.u.hw.MtrrEnabled ||
+ KiRangeInfo.Capabilities.u.hw.VarCnt == 0) {
+ Status = FALSE;
+ } else {
+ if (KiRangeInfo.Capabilities.u.hw.UswcSupported) {
+ //
+ // If USWC type is supported by the hardware, check the HAL
+ // to see if we are on a "Shared Memory Cluster" machine and
+ // do not want USWC supported.
+ //
+ NtStatus = HalQuerySystemInformation(
+ HalFrameBufferCachingInformation,
+ sizeof (BOOLEAN),
+ &bUseFrameBufferCaching,
+ &Size
+ );
+
+ if (NT_SUCCESS(NtStatus)) {
+
+ if (bUseFrameBufferCaching == FALSE) {
+ DBGMSG ("KiInitializeMTRR: HAL set UseFrameBufferCaching FALSE\n");
+ KiRangeInfo.Capabilities.u.hw.UswcSupported = 0;
+ }
+ }
+ }
+ }
+
+ //
+ // Allocate initial range type database
+ //
+
+ KiRangeInfo.NoRange = 0;
+ KiRangeInfo.MaxRange = (UCHAR) KiRangeInfo.Capabilities.u.hw.VarCnt + GROW_RANGE_TABLE;
+ KiRangeInfo.Ranges = ExAllocatePool (NonPagedPool,
+ sizeof(ONE_RANGE) * KiRangeInfo.MaxRange);
+ RtlZeroMemory (KiRangeInfo.Ranges, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange);
+ }
+
+ //
+ // Workaround for cpu signatures 611, 612, 616 and 617
+ // - if the request for setting a variable MTRR specifies
+ // an address which is not 4M aligned or length is not
+ // a multiple of 4M then possible problem for INVLPG inst.
+ // Detect if workaround is required
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ if (Prcb->CpuType == 6 &&
+ (Prcb->CpuStep == 0x0101 || Prcb->CpuStep == 0x0102 ||
+ Prcb->CpuStep == 0x0106 || Prcb->CpuStep == 0x0107 )) {
+
+ KiRangeInfo.MtrrWorkaround = TRUE;
+ }
+
+ //
+ // If MTRR support disabled on first processor or if
+ // buffer not allocated then fall through
+ //
+
+ if (!KiRangeInfo.Ranges){
+ Status = FALSE;
+ } else {
+
+ //
+ // Verify MTRR support is symmetric
+ //
+
+ Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES);
+
+ if ((Capabilities.u.hw.UswcSupported) &&
+ (bUseFrameBufferCaching == FALSE)) {
+ DBGMSG ("KiInitializeMTRR: setting UswcSupported FALSE\n");
+ Capabilities.u.hw.UswcSupported = 0;
+ }
+
+ Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
+
+ if (Default.u.QuadPart != KiRangeInfo.Default.u.QuadPart ||
+ Capabilities.u.QuadPart != KiRangeInfo.Capabilities.u.QuadPart) {
+ DBGMSG ("KiInitializeMTRR: asymmtric mtrr support\n");
+ Status = FALSE;
+ }
+ }
+
+ NewRange.Status = STATUS_SUCCESS;
+
+ //
+ // MTRR registers should be identically set on each processor.
+ // Ranges should be added to the range database only for one
+ // processor.
+ //
+
+ if (Status && (KeGetPcr()->Number == 0)) {
+#if IDBG
+ KiDumpMTRR ("Processor MTRR:", NULL);
+#endif
+
+ //
+ // Read current MTRR settings for various cached range types
+ // and add them to the range database
+ //
+
+ for (Index=0; Index < Capabilities.u.hw.VarCnt; Index++) {
+
+ MtrrBase.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+Index*2);
+ MtrrMask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+Index*2);
+
+ Mask = MtrrMask.u.QuadPart & MTRR_MASK_MASK;
+ Base = MtrrBase.u.QuadPart & MTRR_MASK_BASE;
+
+ //
+ // Note - the variable MTRR Mask does NOT contain the length
+ // spanned by the variable MTRR. Thus just checking the Valid
+ // Bit should be sufficient for identifying a valid MTRR.
+ //
+
+ if (MtrrMask.u.hw.Valid) {
+
+ Length = KiMaskToLength(Mask);
+
+ //
+ // Check for non-contigous MTRR mask.
+ //
+
+ if ((Mask + Length) & MASK_OVERFLOW_MASK) {
+ DBGMSG ("KiInitializeMTRR: Found non-contiguous MTRR mask!\n");
+ Status = FALSE;
+ }
+
+ //
+ // If this is an uncachable range, handle it in the next pass
+ //
+
+ if (MtrrBase.u.hw.Type == MTRR_TYPE_UC) {
+ continue;
+ }
+
+ //
+ // Add this MTRR to the range database
+ //
+
+ Base &= Mask;
+ KiAddRange (
+ &NewRange,
+ Base,
+ Base + Length - 1,
+ (UCHAR) MtrrBase.u.hw.Type
+ );
+
+ //
+ // Check for default cache type
+ //
+
+ if (MtrrBase.u.hw.Type == MTRR_TYPE_WB) {
+ KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB;
+ }
+
+ if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX &&
+ MtrrBase.u.hw.Type == MTRR_TYPE_WT) {
+ KiRangeInfo.DefaultCachedType = MTRR_TYPE_WT;
+ }
+ }
+ }
+
+ //
+ // Read current MTRR settings, for uncachable ranges and remove
+ // them from the range database
+ //
+
+ memset (RemoveThisType, TRUE, MTRR_TYPE_MAX);
+ for (Index=0; Index < Capabilities.u.hw.VarCnt; Index++) {
+
+ MtrrBase.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+Index*2);
+ MtrrMask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+Index*2);
+
+ Mask = MtrrMask.u.QuadPart & MTRR_MASK_MASK;
+ Base = MtrrBase.u.QuadPart & MTRR_MASK_BASE;
+
+ if (MtrrMask.u.hw.Valid && MtrrBase.u.hw.Type == MTRR_TYPE_UC) {
+
+ //
+ // Remove uncachable region from range database
+ //
+
+ Base &= Mask;
+ Length = KiMaskToLength(Mask);
+
+ KiRemoveRange (
+ &NewRange,
+ Base,
+ Base + Length - 1,
+ RemoveThisType
+ );
+ }
+ }
+
+ //
+ // If a default type for "cached" was not found, assume write-back
+ //
+
+ if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX) {
+ DBGMSG ("KiInitalizeMTRR: assume write-back\n");
+ KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB;
+ }
+ }
+
+ //
+ // Done
+ //
+
+ if (!NT_SUCCESS(NewRange.Status)) {
+ Status = FALSE;
+ }
+
+ if (!Status) {
+ DBGMSG ("KiInitializeMTRR: OS support for MTRRs disabled\n");
+ if (KiRangeInfo.Ranges != NULL) {
+ ExFreePool (KiRangeInfo.Ranges);
+ KiRangeInfo.Ranges = NULL;
+ }
+ } else {
+
+ // if last processor indicate initialization complete
+ if (LastProcessor) {
+ KiRangeInfo.RangesValid = TRUE;
+ }
+ }
+}
+
+NTSTATUS
+KeSetPhysicalCacheTypeRange (
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN ULONG NumberOfBytes,
+ IN MEMORY_CACHING_TYPE CacheType
+ )
+/*++
+
+Routine Description:
+
+ This function sets a physical range to a particular cache type.
+ If the system does not support setting cache policies based on
+ physical ranges, no action is taken.
+
+Arguments:
+
+ PhysicalAddress - The starting address of the range being set
+
+ NumberOfBytes - The length, in bytes, of the range being set
+
+ CacheType - The caching type for which the physical range is
+ to be set to.
+
+ NonCached:
+ Setting ranges to be NonCached is done for
+ book keeping reasons. A return of SUCCESS when
+ setting a range NonCached does not mean it has
+ been physically set to as NonCached. The caller
+ must use a cache-disabled virtual pointer for
+ any NonCached range.
+
+ Cached:
+ A sucessful return indicates that the physical
+ range has been set to cache. This mode requires
+ the caller to be at irql < dispatch_level.
+
+ FrameBuffer:
+ A sucessful return indicates that the physical
+ range has been set to be framebuffer cached.
+ This mode requires the caller to be at irql <
+ dispatch_level.
+
+Return Value:
+
+ STATUS_SUCCESS - if success, the cache attributes of the physical range
+ have been set (or feature not supported or not yet
+ initialized).
+
+ STATUS_NO_SUCH_DEVICE - if FrameBuffer type is requested and is not
+ supported.
+
+ STATUS_UNSUCCESSFUL/STATUS_INTERNAL_ERROR - Requested Physical Range is
+ below 1M or other error.
+
+--*/
+{
+ KIRQL OldIrql;
+ NEW_RANGE NewRange;
+ BOOLEAN RemoveThisType[MTRR_TYPE_MAX];
+ BOOLEAN EffectRangeChange, AddToRangeDatabase;
+
+ //
+ // If processor doesn't have the memory type range feature, or
+ // if request isn't supported return not supported.
+ //
+
+ if (!KiRangeInfo.RangesValid ||
+ PhysicalAddress.LowPart < 1 * 1024 * 1024) {
+
+ return STATUS_NOT_SUPPORTED;
+ }
+
+ //
+ // Workaround for cpu signatures 611, 612, 616 and 617
+ // - if the request for setting a variable MTRR specifies
+ // an address which is not 4M aligned or length is not
+ // a multiple of 4M then return status not supported
+ //
+
+ if ((KiRangeInfo.MtrrWorkaround) &&
+ ((PhysicalAddress.LowPart & 0x3fffff) ||
+ (NumberOfBytes & 0x3fffff))) {
+
+ return STATUS_NOT_SUPPORTED;
+ }
+
+
+ ASSERT (NumberOfBytes != 0);
+
+ RtlZeroMemory (&NewRange, sizeof (NewRange));
+ NewRange.Base = PhysicalAddress.QuadPart;
+ NewRange.Limit = NewRange.Base + NumberOfBytes - 1;
+
+ //
+ // Determine what the new mtrr range type is. If setting NonCached then
+ // the database need not be updated to reflect the virtual change. This
+ // is because non-cached virtual pointers are mapped as cache disabled.
+ //
+
+ EffectRangeChange = TRUE;
+ AddToRangeDatabase = TRUE;
+ switch (CacheType) {
+ case MmNonCached:
+ NewRange.Type = MTRR_TYPE_UC;
+
+ //
+ // NonCached ranges do not need to be reflected into the h/w state
+ // as all non-cached ranges are mapped with cache-disabled pointers.
+ // This also means that cache-disabled ranges do not need to
+ // be put into mtrrs, or held in the range, regardless of the default
+ // range type.
+ //
+
+ EffectRangeChange = FALSE;
+ AddToRangeDatabase = FALSE;
+ break;
+
+ case MmCached:
+ NewRange.Type = KiRangeInfo.DefaultCachedType;
+ break;
+
+ case MmFrameBufferCached:
+ NewRange.Type = MTRR_TYPE_USWC;
+
+ //
+ // If USWC type isn't supported, then request can not be honored
+ //
+
+ if (!KiRangeInfo.Capabilities.u.hw.UswcSupported) {
+ DBGMSG ("KeSetPhysicalCacheTypeRange: USWC not supported\n");
+ return STATUS_NOT_SUPPORTED;
+ }
+ break;
+
+ default:
+ DBGMSG ("KeSetPhysicalCacheTypeRange: no such cache type\n");
+ return STATUS_INVALID_PARAMETER;
+ break;
+ }
+
+ NewRange.Status = STATUS_SUCCESS;
+
+ ASSERT(KiRangeInfo.Default.u.hw.Type == MTRR_TYPE_UC);
+
+ //
+ // The default type is UC thus the range is still mapped using
+ // a Cache Disabled VirtualPointer and hence it need not be added.
+ //
+
+ //
+ // If h/w needs updated, lock down the code required to effect the change
+ //
+
+ if (EffectRangeChange) {
+ if (KeGetCurrentIrql() == DISPATCH_LEVEL) {
+
+ //
+ // Code can not be locked down. Supplying a new range type requires
+ // that the caller calls at irql < dispatch_level.
+ //
+
+ DBGMSG ("KeSetPhysicalCacheTypeRange failed due to calling IRQL == DISPATCH_LEVEL\n");
+ return STATUS_UNSUCCESSFUL;
+ }
+
+ MmLockPagableSectionByHandle(ExPageLockHandle);
+ }
+
+ //
+ // Serialize the range type database
+ //
+
+ KeAcquireSpinLock (&KiRangeLock, &OldIrql);
+
+ //
+ // If h/w is going to need updated, then start an effective range change
+ //
+
+ if (EffectRangeChange) {
+ KiStartEffectiveRangeChange (&NewRange);
+ }
+
+ if (NT_SUCCESS (NewRange.Status)) {
+
+ //
+ // If the new range is NonCached, then don't remove standard memory
+ // cahcing types
+ //
+
+ memset (RemoveThisType, TRUE, MTRR_TYPE_MAX);
+ if (NewRange.Type != MTRR_TYPE_UC) {
+ //
+ // If the requested type is uncached then the physical
+ // memory region is mapped using a cache disabled virtual pointer.
+ // The effective memory type for that region will be the lowest
+ // common denominator of the MTRR type and the cache type in the
+ // PTE. Therefore for a request of type UC, the effective type
+ // will be UC irrespective of the MTRR settings in that range.
+ // Hence it is not necessary to remove the existing MTRR settings
+ // (if any) for that range.
+ //
+
+ //
+ // Clip/remove any ranges in the target area
+ //
+
+ KiRemoveRange (&NewRange, NewRange.Base, NewRange.Limit, RemoveThisType);
+ }
+
+ //
+ // If needed, add new range type
+ //
+
+ if (AddToRangeDatabase) {
+ ASSERT (EffectRangeChange == TRUE);
+ KiAddRange (&NewRange, NewRange.Base, NewRange.Limit, NewRange.Type);
+ }
+
+ //
+ // If this is an effect range change, then complete it
+ //
+
+ if (EffectRangeChange) {
+ KiCompleteEffectiveRangeChange (&NewRange);
+ }
+ }
+
+ KeReleaseSpinLock (&KiRangeLock, OldIrql);
+ if (EffectRangeChange) {
+ MmUnlockPagableImageSection(ExPageLockHandle);
+ }
+
+ return NewRange.Status;
+}
+
+BOOLEAN
+KiRemoveRange (
+ IN PNEW_RANGE NewRange,
+ IN ULONGLONG Base,
+ IN ULONGLONG Limit,
+ IN PBOOLEAN RemoveThisType
+ )
+/*++
+
+Routine Description:
+
+ This function removes any range overlapping with the passed range, of
+ type supplied in RemoveThisType from the global range database.
+
+Arguments:
+
+ NewRange - Context information
+
+ Base - Base & Limit signify the first & last address of a range
+ Limit - which is to be removed from the range database
+
+ RemoveThisType - A TRUE flag for each type which can not overlap the
+ target range
+
+
+Return Value:
+
+ TRUE - if the range database was altered such that it may no longer
+ be sorted.
+
+--*/
+{
+ ULONG i;
+ PONE_RANGE Range;
+ BOOLEAN DatabaseNeedsSorted;
+
+
+ DatabaseNeedsSorted = FALSE;
+
+ //
+ // Check each range
+ //
+
+ for (i=0, Range=KiRangeInfo.Ranges; i < KiRangeInfo.NoRange; i++, Range++) {
+
+ //
+ // If this range type doesn't need to be altered, skip it
+ //
+
+ if (!RemoveThisType[Range->Type]) {
+ continue;
+ }
+
+ //
+ // Check range to see if it overlaps with range being removed
+ //
+
+ if (Range->Base < Base) {
+
+ if (Range->Limit >= Base && Range->Limit <= Limit) {
+
+ //
+ // Truncate range to not overlap with area being removed
+ //
+
+ Range->Limit = Base - 1;
+ }
+
+ if (Range->Limit > Limit) {
+
+ //
+ // Target area is contained totally within this area.
+ // Split into two ranges
+ //
+
+ //
+ // Add range at end
+ //
+
+ DatabaseNeedsSorted = TRUE;
+ KiAddRange (
+ NewRange,
+ Limit+1,
+ Range->Limit,
+ Range->Type
+ );
+
+ //
+ // Turn current range into range at begining
+ //
+
+ Range->Limit = Base - 1;
+ }
+
+ } else {
+
+ // Range->Base >= Base
+
+ if (Range->Base <= Limit) {
+ if (Range->Limit <= Limit) {
+ //
+ // This range is totally within the target area. Remove it.
+ //
+
+ DatabaseNeedsSorted = TRUE;
+ KiRangeInfo.NoRange -= 1;
+ Range->Base = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base;
+ Range->Limit = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit;
+ Range->Type = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type;
+
+ //
+ // recheck at current location
+ //
+
+ i -= 1;
+ Range -= 1;
+
+ } else {
+
+ //
+ // Bump begining past area being removed
+ //
+
+ Range->Base = Limit + 1;
+ }
+ }
+ }
+ }
+
+ if (!NT_SUCCESS (NewRange->Status)) {
+ DBGMSG ("KiRemoveRange: failure\n");
+ }
+
+ return DatabaseNeedsSorted;
+}
+
+
+VOID
+KiAddRange (
+ IN PNEW_RANGE NewRange,
+ IN ULONGLONG Base,
+ IN ULONGLONG Limit,
+ IN UCHAR Type
+ )
+/*++
+
+Routine Description:
+
+ This function adds the passed range to the global range database.
+
+Arguments:
+
+ NewRange - Context information
+
+ Base - Base & Limit signify the first & last address of a range
+ Limit - which is to be added to the range database
+
+ Type - Type of caching required for this range
+
+Return Value:
+
+ None - Context is updated with an error if the table has overflowed
+
+--*/
+{
+ PONE_RANGE Range, OldRange;
+ ULONG size;
+
+ if (KiRangeInfo.NoRange >= KiRangeInfo.MaxRange) {
+
+ //
+ // Table is out of space, get a bigger one
+ //
+
+ OldRange = KiRangeInfo.Ranges;
+ size = sizeof(ONE_RANGE) * (KiRangeInfo.MaxRange + GROW_RANGE_TABLE);
+ Range = ExAllocatePool (NonPagedPool, size);
+
+ if (!Range) {
+ NewRange->Status = STATUS_INSUFFICIENT_RESOURCES;
+ return ;
+ }
+
+ //
+ // Grow table
+ //
+
+ RtlZeroMemory (Range, size);
+ RtlCopyMemory (Range, OldRange, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange);
+ KiRangeInfo.Ranges = Range;
+ KiRangeInfo.MaxRange += GROW_RANGE_TABLE;
+ ExFreePool (OldRange);
+ }
+
+ //
+ // Add new entry to table
+ //
+
+ KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base = Base;
+ KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit = Limit;
+ KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type = Type;
+ KiRangeInfo.NoRange += 1;
+}
+
+
+VOID
+KiStartEffectiveRangeChange (
+ IN PNEW_RANGE NewRange
+ )
+/*++
+
+Routine Description:
+
+ This functions sets up the context information required to
+ track & later effect a range change in hardware
+
+Arguments:
+
+ NewRange - Context information
+
+Return Value:
+
+ None
+
+--*/
+{
+ ULONG size;
+
+ //
+ // Allocate working space for MTRR image
+ //
+
+ size = sizeof(MTRR_RANGE) * ((ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt + 1);
+ NewRange->MTRR = ExAllocatePool (NonPagedPool, size);
+ if (!NewRange->MTRR) {
+ NewRange->Status = STATUS_INSUFFICIENT_RESOURCES;
+ return ;
+ }
+
+ RtlZeroMemory (NewRange->MTRR, size);
+
+ //
+ // Save current range information in case of an error
+ //
+
+ size = sizeof(ONE_RANGE) * KiRangeInfo.NoRange;
+ NewRange->NoRange = KiRangeInfo.NoRange;
+ NewRange->Ranges = ExAllocatePool (NonPagedPool, size);
+ if (!NewRange->Ranges) {
+ NewRange->Status = STATUS_INSUFFICIENT_RESOURCES;
+ return ;
+ }
+
+ RtlCopyMemory (NewRange->Ranges, KiRangeInfo.Ranges, size);
+}
+
+
+VOID
+KiCompleteEffectiveRangeChange (
+ IN PNEW_RANGE NewRange
+ )
+/*++
+
+Routine Description:
+
+ This functions commits the range database to hardware, or backs
+ out the current changes to it.
+
+Arguments:
+
+ NewRange - Context information
+
+Return Value:
+
+ None
+
+--*/
+{
+ BOOLEAN Restart;
+ ULONG Index, Index2, NoMTRR;
+ ULONGLONG BestLength, WhichMtrr;
+ ULONGLONG CurrLength;
+ ULONGLONG l, Base, Length, MLength;
+ PONE_RANGE Range;
+ ONE_RANGE OneRange;
+ PMTRR_RANGE MTRR;
+ BOOLEAN RoundDown;
+ BOOLEAN RemoveThisType[MTRR_TYPE_MAX];
+ PKPRCB Prcb;
+ KIRQL OldIrql, OldIrql2;
+ KAFFINITY TargetProcessors;
+
+
+ ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL);
+ Prcb = KeGetCurrentPrcb();
+
+ //
+ // Round all ranges, according to type, to match what h/w can support
+ //
+
+ for (Index=0; Index < KiRangeInfo.NoRange; Index++) {
+ Range = &KiRangeInfo.Ranges[Index];
+
+ //
+ // Determine rounding for this range type
+ //
+
+ RoundDown = TRUE;
+ if (Range->Type == MTRR_TYPE_UC) {
+ RoundDown = FALSE;
+ }
+
+ //
+ // Apply rounding
+ //
+
+ if (RoundDown) {
+ Range->Base = (Range->Base + MTRR_PAGE_SIZE - 1) & MTRR_PAGE_MASK;
+ Range->Limit = ((Range->Limit+1) & MTRR_PAGE_MASK)-1;
+ } else {
+ Range->Base = (Range->Base & MTRR_PAGE_MASK);
+ Range->Limit = ((Range->Limit + MTRR_PAGE_SIZE) & MTRR_PAGE_MASK)-1;
+ }
+ }
+
+ do {
+ Restart = FALSE;
+
+ //
+ // Sort the ranges by base address
+ //
+
+ for (Index=0; Index < KiRangeInfo.NoRange; Index++) {
+ Range = &KiRangeInfo.Ranges[Index];
+
+ for (Index2=Index+1; Index2 < KiRangeInfo.NoRange; Index2++) {
+
+ if (KiRangeInfo.Ranges[Index2].Base < Range->Base) {
+
+ //
+ // Swap KiRangeInfo.Ranges[Index] with KiRangeInfo.Ranges[Index2]
+ //
+
+ OneRange = *Range;
+ *Range = KiRangeInfo.Ranges[Index2];
+ KiRangeInfo.Ranges[Index2] = OneRange;
+ }
+ }
+ }
+
+ //
+ // Check for adjacent/overlapping ranges
+ //
+
+ for (Index=0; Index < (ULONG) KiRangeInfo.NoRange-1 && !Restart; Index++) {
+ Range = &KiRangeInfo.Ranges[Index];
+
+ l = Range[0].Limit + 1;
+ if (l < Range[0].Limit) {
+ l = Range[0].Limit;
+ }
+
+ //
+ // If ranges overlap or are adjacent and are of the same type, combine them
+ //
+
+ if (l >= Range[1].Base && Range[0].Type == Range[1].Type) {
+
+ //
+ // Increase Range[0] limit to cover Range[1]
+ //
+
+ if (Range[1].Limit > Range[0].Limit) {
+ Range[0].Limit = Range[1].Limit;
+ }
+
+ //
+ // Remove Range[1]
+ //
+
+ if (Index+2 < KiRangeInfo.NoRange) {
+
+ //
+ // Copy everything from current index till end
+ // of range list.
+ //
+
+ RtlCopyMemory(
+ &(Range[1]),
+ &(Range[2]),
+ sizeof(ONE_RANGE) * (KiRangeInfo.NoRange-Index-2)
+ );
+ }
+
+ KiRangeInfo.NoRange -= 1;
+
+ //
+ // Recheck current location
+ //
+
+ Index -= 1;
+ continue;
+ }
+
+ //
+ // If ranges overlap and are not of same type,
+ // then carve them to the best cache type available.
+ //
+
+ if (l > Range[1].Base) {
+
+ //
+ // Pick range which has the cache type which should be used for
+ // the overlapped area
+ //
+
+ Index2 = KiRangeWeight(&Range[0]) > KiRangeWeight(&Range[1]) ? 0 : 1;
+
+ //
+ // Remove ranges of type which do not belong in the overlapped area
+ //
+
+ RtlZeroMemory (RemoveThisType, MTRR_TYPE_MAX);
+ RemoveThisType[Range[Index2 ^ 1].Type] = TRUE;
+
+ //
+ // Remove just the overlapped portion of the range.
+ //
+
+ Restart = KiRemoveRange (
+ NewRange,
+ Range[1].Base,
+ (Range[0].Limit < Range[1].Limit ? Range[0].Limit : Range[1].Limit),
+ RemoveThisType
+ );
+
+ }
+ }
+ } while (Restart);
+
+ //
+ // The range database is now rounded to fit in the h/w and sorted.
+ // Attempt to build MTRR settings which exactly describe the ranges
+ //
+
+ MTRR = NewRange->MTRR;
+ NoMTRR = 0;
+ for (Index=0;NT_SUCCESS(NewRange->Status)&& Index<KiRangeInfo.NoRange;Index++) {
+ Range = &KiRangeInfo.Ranges[Index];
+
+ //
+ // Build MTRRs to fit this range
+ //
+
+ Base = Range->Base;
+ Length = Range->Limit - Base + 1;
+
+ while (Length) {
+
+ //
+ // Compute MTRR length for current range base & length
+ //
+
+ if (Base == 0) {
+ MLength = Length;
+ } else {
+ MLength = (ULONGLONG) 1 << KiFindFirstSetRightBit(Base);
+ }
+ if (MLength > Length) {
+ MLength = Length;
+ }
+
+ l = (ULONGLONG) 1 << KiFindFirstSetLeftBit (MLength);
+ if (MLength > l) {
+ MLength = l;
+ }
+
+ //
+ // Store it in the next MTRR
+ //
+
+ MTRR[NoMTRR].Base.u.QuadPart = Base;
+ MTRR[NoMTRR].Base.u.hw.Type = Range->Type;
+ MTRR[NoMTRR].Mask.u.QuadPart = KiLengthToMask(MLength);
+ MTRR[NoMTRR].Mask.u.hw.Valid = 1;
+ NoMTRR += 1;
+
+ //
+ // Adjust off amount of data covered by that last MTRR
+ //
+
+ Base += MLength;
+ Length -= MLength;
+
+ //
+ // If there are too many MTRRs, try to removing a USWC MTRR.
+ // (ie, convert some MmFrameBufferCached to MmNonCached).
+ //
+
+ if (NoMTRR > (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt) {
+
+ //
+ // Find smallest USWC type and drop it
+ //
+ // This is okay only if the default type is C.
+ // Default type should always be UC unless BIOS changes
+ // it. Still ASSERT!
+ //
+
+ ASSERT(KiRangeInfo.Default.u.hw.Type == MTRR_TYPE_UC);
+
+ BestLength = (ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1);
+ for (Index2=0; Index2 < NoMTRR; Index2++) {
+
+ if (MTRR[Index2].Base.u.hw.Type == MTRR_TYPE_USWC) {
+
+ CurrLength = KiMaskToLength(MTRR[Index2].Mask.u.QuadPart &
+ MTRR_MASK_MASK);
+
+ if (CurrLength < BestLength) {
+ WhichMtrr = Index2;
+ BestLength = Length;
+ }
+ }
+ }
+
+ if (BestLength == ((ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1))) {
+ //
+ // Range was not found which could be dropped. Abort process
+ //
+
+ NewRange->Status = STATUS_INSUFFICIENT_RESOURCES;
+ Length = 0;
+
+ } else {
+ //
+ // Remove WhichMtrr
+ //
+
+ MTRR[WhichMtrr] = MTRR[NoMTRR];
+ NoMTRR -= 1;
+ }
+ }
+ }
+ }
+
+ //
+ // Done building new MTRRs
+ //
+
+ if (NT_SUCCESS(NewRange->Status)) {
+
+ //
+ // Update the MTRRs on all processors
+ //
+
+#if IDBG
+ KiDumpMTRR ("Loading the following MTRR:", NewRange->MTRR);
+#endif
+
+ NewRange->TargetCount = 0;
+ NewRange->TargetPhase = &Prcb->ReverseStall;
+ NewRange->Processor = Prcb->Number;
+
+ //
+ // Previously enabled MTRR's with index > NoMTRR
+ // which could conflict with existing setting should be disabled
+ // This is taken care of by setting NewRange->NoMTRR to total
+ // number of variable MTRR's.
+ //
+
+ NewRange->NoMTRR = (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt;
+
+ //
+ // Synchronize with other IPI functions which may stall
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+#if !defined(NT_UP)
+ //
+ // Collect all the (other) processors
+ //
+
+ TargetProcessors = KeActiveProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+
+ KiIpiSendPacket (
+ TargetProcessors,
+ KiLoadMTRRTarget,
+ (PVOID) NewRange,
+ NULL,
+ NULL
+ );
+
+ //
+ // Wait for all processors to be collected
+ //
+
+ KiIpiStallOnPacketTargets();
+
+ //
+ // All processors are now waiting. Rasie to high level to
+ // ensure this processor doesn't enter the debugger due to
+ // some interrupt service routine.
+ //
+
+ KeRaiseIrql (HIGH_LEVEL, &OldIrql2);
+
+ //
+ // There's no reason for any debug events now, so signal
+ // the other processors that they can all disable interrupts
+ // and being the MTRR update
+ //
+
+ Prcb->ReverseStall += 1;
+ }
+#endif
+
+ //
+ // Update MTRRs
+ //
+
+ KiLoadMTRR (NewRange);
+
+ //
+ // Release ContextSwap lock
+ //
+
+ KiUnlockContextSwap(OldIrql);
+
+
+#if IDBG
+ KiDumpMTRR ("Processor MTRR:", NewRange->MTRR);
+#endif
+
+ } else {
+
+ //
+ // There was an error, put original range database back
+ //
+
+ DBGMSG ("KiCompleteEffectiveRangeChange: mtrr update did not occur\n");
+
+ if (NewRange->Ranges) {
+ KiRangeInfo.NoRange = NewRange->NoRange;
+
+ RtlCopyMemory (
+ KiRangeInfo.Ranges,
+ NewRange->Ranges,
+ sizeof (ONE_RANGE) * KiRangeInfo.NoRange
+ );
+ }
+ }
+
+ //
+ // Cleanup
+ //
+
+ ExFreePool (NewRange->Ranges);
+ ExFreePool (NewRange->MTRR);
+}
+
+
+STATIC ULONG
+KiRangeWeight (
+ IN PONE_RANGE Range
+ )
+/*++
+
+Routine Description:
+
+ This functions returns a weighting of the passed in range's cache
+ type. When two or more regions collide within the same h/w region
+ the types are weighted and that cache type of the higher weight
+ is used for the collision area.
+
+Arguments:
+
+ Range - Range to obtain weighting for
+
+Return Value:
+
+ The weight of the particular cache type
+
+--*/
+{
+ ULONG Weight;
+
+ switch (Range->Type) {
+ case MTRR_TYPE_UC: Weight = 5; break;
+ case MTRR_TYPE_USWC: Weight = 4; break;
+ case MTRR_TYPE_WP: Weight = 3; break;
+ case MTRR_TYPE_WT: Weight = 2; break;
+ case MTRR_TYPE_WB: Weight = 1; break;
+ default: Weight = 0; break;
+ }
+
+ return Weight;
+}
+
+
+STATIC ULONGLONG
+KiMaskToLength (
+ IN ULONGLONG Mask
+ )
+/*++
+
+Routine Description:
+
+ This function returns the length specified by a particular
+ mtrr variable register mask.
+
+--*/
+{
+ if (Mask == 0) {
+ // Zero Mask signifies a length of 2**36
+ return(((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT));
+ } else {
+ return(((ULONGLONG) 1 << KiFindFirstSetRightBit(Mask)));
+ }
+}
+
+STATIC ULONGLONG
+KiLengthToMask (
+ IN ULONGLONG Length
+ )
+/*++
+
+Routine Description:
+
+ This function constructs the mask corresponding to the input length
+ to be set in a variable MTRR register. The length is assumed to be
+ a multiple of 4K.
+
+--*/
+{
+ ULONGLONG FullMask = 0xffffff;
+
+ if (Length == ((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT)) {
+ return(0);
+ } else {
+ return(((FullMask << KiFindFirstSetRightBit(Length)) &
+ MTRR_RESVBIT_MASK));
+ }
+}
+
+STATIC ULONG
+KiFindFirstSetRightBit (
+ IN ULONGLONG Set
+ )
+/*++
+
+Routine Description:
+
+ This function returns a bit position of the least significant
+ bit set in the passed ULONGLONG parameter. Passed parameter
+ must be non-zero.
+
+--*/
+{
+ ULONG bitno;
+
+ ASSERT(Set != 0);
+ for (bitno=0; !(Set & 0xFF); bitno += 8, Set >>= 8) ;
+ return KiFindFirstSetRight[Set & 0xFF] + bitno;
+}
+
+STATIC ULONG
+KiFindFirstSetLeftBit (
+ IN ULONGLONG Set
+ )
+/*++
+
+Routine Description:
+
+ This function returns a bit position of the most significant
+ bit set in the passed ULONGLONG parameter. Passed parameter
+ must be non-zero.
+
+--*/
+{
+ ULONG bitno;
+
+ ASSERT(Set != 0);
+ for (bitno=56;!(Set & 0xFF00000000000000); bitno -= 8, Set <<= 8) ;
+ return KiFindFirstSetLeft[Set >> 56] + bitno;
+}
+
+#if IDBG
+VOID
+KiDumpMTRR (
+ PUCHAR DebugString,
+ PMTRR_RANGE MTRR
+ )
+/*++
+
+Routine Description:
+
+ This function dumps the MTRR information to the debugger
+
+--*/
+{
+ static PUCHAR Type[] = {
+ // 0 1 2 3 4 5 6
+ "UC ", "USWC", "????", "????", "WT ", "WP ", "WB " };
+ MTRR_VARIABLE_BASE Base;
+ MTRR_VARIABLE_MASK Mask;
+ ULONG Index;
+ ULONG i;
+ PUCHAR p;
+
+ DbgPrint ("%s\n", DebugString);
+ for (Index=0; Index < (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt; Index++) {
+ if (MTRR) {
+ Base = MTRR[Index].Base;
+ Mask = MTRR[Index].Mask;
+ } else {
+ Base.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+2*Index);
+ Mask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+2*Index);
+ }
+
+ DbgPrint (" %d. ", Index);
+ if (Mask.u.hw.Valid) {
+ p = "????";
+ if (Base.u.hw.Type < 7) {
+ p = Type[Base.u.hw.Type];
+ }
+
+ DbgPrint ("%s %08x:%08x %08x:%08x",
+ p,
+ (ULONG) (Base.u.QuadPart >> 32),
+ ((ULONG) (Base.u.QuadPart & MTRR_MASK_BASE)),
+ (ULONG) (Mask.u.QuadPart >> 32),
+ ((ULONG) (Mask.u.QuadPart & MTRR_MASK_MASK))
+ );
+
+ }
+ DbgPrint ("\n");
+ }
+}
+#endif
+
+
+VOID
+KiLoadMTRRTarget (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID NewRange,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+{
+ PNEW_RANGE Context;
+
+ Context = (PNEW_RANGE) NewRange;
+
+ //
+ // Wait for all processors to be ready
+ //
+
+ KiIpiSignalPacketDoneAndStall (SignalDone, Context->TargetPhase);
+
+ //
+ // Update MTRRs
+ //
+
+ KiLoadMTRR (Context);
+}
+
+
+
+#define MOV_EAX_CR4 _emit { 0Fh, 20h, E0h }
+#define MOV_CR4_EAX _emit { 0Fh, 22h, E0h }
+
+NTSTATUS
+KiLoadMTRR (
+ IN PNEW_RANGE Context
+ )
+/*++
+
+Routine Description:
+
+ This function loads the memory type range registers into all processors
+
+Arguments:
+
+ Context - Context which include the MTRRs to load
+
+Return Value:
+
+ All processors are set into the new state
+
+--*/
+{
+ MTRR_DEFAULT Default;
+ BOOLEAN Enable;
+ ULONG HldCr0, HldCr4;
+ ULONG Index;
+
+ //
+ // Disable interrupts
+ //
+
+ Enable = KiDisableInterrupts();
+
+ //
+ // Synchronize all processors
+ //
+
+ KiSynchronizeMTRRLoad (Context);
+
+ _asm {
+ ;
+ ; Get current CR0
+ ;
+
+ mov eax, cr0
+ mov HldCr0, eax
+
+ ;
+ ; Disable caching & line fill
+ ;
+
+ and eax, not CR0_NW
+ or eax, CR0_CD
+ mov cr0, eax
+
+ ;
+ ; Flush caches
+ ;
+
+ ;
+ ; wbinvd
+ ;
+
+ _emit 0Fh
+ _emit 09h
+
+ ;
+ ; Get current cr4
+ ;
+
+ _emit 0Fh
+ _emit 20h
+ _emit 0E0h ; mov eax, cr4
+ mov HldCr4, eax
+
+ ;
+ ; Disable global page
+ ;
+
+ and eax, not CR4_PGE
+ _emit 0Fh
+ _emit 22h
+ _emit 0E0h ; mov cr4, eax
+
+ ;
+ ; Flush TLB
+ ;
+
+ mov eax, cr3
+ mov cr3, eax
+ }
+
+ //
+ // Disable MTRRs
+ //
+
+ Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT);
+ Default.u.hw.MtrrEnabled = 0;
+ WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart);
+
+ //
+ // Synchronize all processors
+ //
+
+ KiSynchronizeMTRRLoad (Context);
+
+ //
+ // Load new MTRRs
+ //
+
+ for (Index=0; Index < Context->NoMTRR; Index++) {
+ WRMSR (MTRR_MSR_VARIABLE_BASE+2*Index, Context->MTRR[Index].Base.u.QuadPart);
+ WRMSR (MTRR_MSR_VARIABLE_MASK+2*Index, Context->MTRR[Index].Mask.u.QuadPart);
+ }
+
+ //
+ // Synchronize all processors
+ //
+
+ KiSynchronizeMTRRLoad (Context);
+
+ _asm {
+
+ ;
+ ; Flush caches (this should be a "nop", but it was in the Intel reference algorithm)
+ ; This is required because of aggressive prefetch of both instr + data
+ ;
+
+ ;
+ ; wbinvd
+ ;
+
+ _emit 0Fh
+ _emit 09h
+
+ ;
+ ; Flush TLBs (same comment as above)
+ ; Same explanation as above
+ ;
+
+ mov eax, cr3
+ mov cr3, eax
+ }
+
+ //
+ // Enable MTRRs
+ //
+
+ Default.u.hw.MtrrEnabled = 1;
+ WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart);
+
+ //
+ // Synchronize all processors
+ //
+
+ KiSynchronizeMTRRLoad (Context);
+
+ _asm {
+ ;
+ ; Restore CR4 (global page enable)
+ ;
+
+ mov eax, HldCr4
+ _emit 0Fh
+ _emit 22h
+ _emit 0E0h ; mov cr4, eax
+
+ ;
+ ; Restore CR0 (cache enable)
+ ;
+
+ mov eax, HldCr0
+ mov cr0, eax
+ }
+
+ //
+ // Restore interrupts and return
+ //
+
+ KiRestoreInterrupts (Enable);
+ return STATUS_SUCCESS;
+}
+
+
+VOID
+KiSynchronizeMTRRLoad (
+ IN PNEW_RANGE Context
+ )
+{
+ ULONG CurrentPhase;
+ volatile ULONG *TargetPhase;
+ PKPRCB Prcb;
+
+ TargetPhase = Context->TargetPhase;
+ Prcb = KeGetCurrentPrcb();
+
+ if (Prcb->Number == (CCHAR) Context->Processor) {
+
+ //
+ // Wait for all processors to signal
+ //
+
+ while (Context->TargetCount != (ULONG) KeNumberProcessors - 1) ;
+
+ //
+ // Reset count for next time
+ //
+
+ Context->TargetCount = 0;
+
+ //
+ // Let waiting processor go to next synchronzation point
+ //
+
+ InterlockedIncrement ((PULONG) TargetPhase);
+
+
+ } else {
+
+ //
+ // Get current phase
+ //
+
+ CurrentPhase = *TargetPhase;
+
+ //
+ // Signal that we have completed the current phase
+ //
+
+ InterlockedIncrement ((PULONG) &Context->TargetCount);
+
+ //
+ // Wait for new phase to begin
+ //
+
+ while (*TargetPhase == CurrentPhase) ;
+ }
+}
diff --git a/private/ntos/ke/i386/mtrr.h b/private/ntos/ke/i386/mtrr.h
new file mode 100644
index 000000000..f7cce0557
--- /dev/null
+++ b/private/ntos/ke/i386/mtrr.h
@@ -0,0 +1,124 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ mtrr.h
+
+Abstract:
+
+ This module contains the i386 specific mtrr register
+ hardware definitions.
+
+Author:
+
+ Ken Reneris (kenr) 11-Oct-95
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+//
+// MTRR MSR architecture definitions
+//
+
+#define MTRR_MSR_CAPABILITIES 0x0fe
+#define MTRR_MSR_DEFAULT 0x2ff
+#define MTRR_MSR_VARIABLE_BASE 0x200
+#define MTRR_MSR_VARIABLE_MASK (MTRR_MSR_VARIABLE_BASE+1)
+
+#define MTRR_PAGE_SIZE 4096
+#define MTRR_PAGE_MASK (~(MTRR_PAGE_SIZE-1))
+
+//
+// Memory range types
+//
+
+#define MTRR_TYPE_UC 0
+#define MTRR_TYPE_USWC 1
+#define MTRR_TYPE_WT 4
+#define MTRR_TYPE_WP 5
+#define MTRR_TYPE_WB 6
+#define MTRR_TYPE_MAX 7
+
+//
+// MTRR specific registers - capability register, default
+// register, and variable mask and base register
+//
+
+#include "pshpack1.h"
+
+typedef struct _MTRR_CAPABILITIES {
+ union {
+ struct {
+ ULONG VarCnt:8;
+ ULONG FixSupported:1;
+ ULONG Reserved_0:1;
+ ULONG UswcSupported:1;
+ ULONG Reserved_1:21;
+ ULONG Reserved_2;
+ } hw;
+ ULONGLONG QuadPart;
+ } u;
+} MTRR_CAPABILITIES, *PMTRR_CAPABILITIES;
+
+typedef struct _MTRR_DEFAULT {
+ union {
+ struct {
+ ULONG Type:8;
+ ULONG Reserved_0:2;
+ ULONG FixedEnabled:1;
+ ULONG MtrrEnabled:1;
+ ULONG Reserved_1:20;
+ ULONG Reserved_2;
+ } hw;
+ ULONGLONG QuadPart;
+ } u;
+} MTRR_DEFAULT, *PMTRR_DEFAULT;
+
+typedef struct _MTRR_VARIABLE_BASE {
+ union {
+ struct {
+ ULONG Type:8;
+ ULONG Reserved_0:4;
+ ULONG PhysBase_1:20;
+ ULONG PhysBase_2:4;
+ ULONG Reserved_1:28;
+ } hw;
+ ULONGLONG QuadPart;
+ } u;
+} MTRR_VARIABLE_BASE, *PMTRR_VARIABLE_BASE;
+
+#define MTRR_MASK_BASE 0x0000000ffffff000
+
+typedef struct _MTRR_VARIABLE_MASK {
+ union {
+ struct {
+ ULONG Reserved_0:11;
+ ULONG Valid:1;
+ ULONG PhysMask_1:20;
+ ULONG PhysMask_2:4;
+ ULONG Reserved_1:28;
+ } hw;
+ ULONGLONG QuadPart;
+ } u;
+} MTRR_VARIABLE_MASK, *PMTRR_VARIABLE_MASK;
+
+#define MTRR_MASK_MASK 0x0000000ffffff000
+
+//
+// Masks/constants to check for non-contiguous masks,
+// mask out reserved bits of variable MTRR's,
+// and construct MTRR variable register masks
+//
+
+#define MASK_OVERFLOW_MASK (~0x1000000000)
+#define MTRR_RESVBIT_MASK 0xfffffffff
+#define MTRR_MAX_RANGE_SHIFT 36
+
+#include "poppack.h"
diff --git a/private/ntos/ke/i386/newsysbg.asm b/private/ntos/ke/i386/newsysbg.asm
new file mode 100644
index 000000000..4256f096d
--- /dev/null
+++ b/private/ntos/ke/i386/newsysbg.asm
@@ -0,0 +1,1150 @@
+ title "System Startup"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; systembg.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to initially startup the
+; NT system.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 07-Mar-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+; John Vert (jvert) 25-Jun-1991
+; Major overhaul in order to move into new osloader architecture
+; Removed old debugger hacks
+;
+;--
+.386p
+ .xlist
+include i386\cpu.inc
+include ks386.inc
+include i386\kimacro.inc
+include mac386.inc
+include callconv.inc
+ .list
+
+ option segment:flat
+
+ extrn @ExfInterlockedPopEntrySList@8:DWORD
+ extrn @ExfInterlockedPushEntrySList@12:DWORD
+ extrn @ExInterlockedCompareExchange64@16:DWORD
+ extrn @ExInterlockedPopEntrySList@8:DWORD
+ extrn @ExInterlockedPushEntrySList@12:DWORD
+ extrn @ExpInterlockedCompareExchange64@16:DWORD
+ extrn _ExInterlockedAddLargeInteger@16:DWORD
+ extrn _ExInterlockedExchangeAddLargeInteger@16:DWORD
+ extrn _KiBootFeatureBits:DWORD
+ EXTRNP _KdInitSystem,2
+ EXTRNP KfRaiseIrql,1,IMPORT,FASTCALL
+ EXTRNP KfLowerIrql,1,IMPORT,FASTCALL
+ EXTRNP _KiInitializeKernel,6
+ extrn SwapContext:PROC
+ EXTRNP GetMachineBootPointers
+ EXTRNP _KiInitializePcr,6
+ EXTRNP _KiSwapIDT
+ EXTRNP _KiInitializeTSS,1
+ EXTRNP _KiInitializeTSS2,2
+ EXTRNP _KiInitializeGdtEntry,6
+ extrn _KiTrap08:PROC
+ extrn _KiTrap02:PROC
+ EXTRNP _HalDisplayString,1,IMPORT
+ EXTRNP _KiInitializeAbios,1
+ EXTRNP _KiInitializeMachineType
+ EXTRNP _KeGetCurrentIrql,0,IMPORT
+ EXTRNP _KeBugCheck, 1
+ EXTRNP _KeBugCheckEx, 5
+ EXTRNP _HalInitializeProcessor,1,IMPORT
+ EXTRNP _HalProcessorIdle,0,IMPORT
+ EXTRNP HalClearSoftwareInterrupt,1,IMPORT,FASTCALL
+ EXTRNP _ZwAcceptConnectPort,6
+ EXTRNP _ZwUnmapViewOfSection,2
+
+if NT_INST
+ EXTRNP _KiAcquireSpinLock, 1
+ EXTRNP _KiReleaseSpinLock, 1
+endif
+ extrn _KiFreezeExecutionLock:DWORD
+ extrn _KiDispatcherLock:DWORD
+
+ extrn _IDT:BYTE
+ extrn _IDTLEN:BYTE ; NOTE - really an ABS, linker problems
+
+ extrn _KeNumberProcessors:BYTE
+ extrn _KeActiveProcessors:DWORD
+ extrn _KiIdleSummary:DWORD
+ extrn _KiProcessorBlock:DWORD
+ extrn _KiFindFirstSetRight:BYTE
+
+ EXTRNP _KdPollBreakIn,0
+ extrn _KeLoaderBlock:DWORD
+ extrn _KeI386NpxPresent:DWORD
+ extrn _KeI386CpuType:DWORD
+ extrn _KeI386CpuStep:DWORD
+ extrn _KeTickCount:DWORD
+
+ifndef NT_UP
+ extrn _KiBarrierWait:DWORD
+endif
+
+if DBG
+ extrn _KdDebuggerEnabled:BYTE
+ EXTRNP _DbgBreakPoint,0
+ extrn _DbgPrint:near
+ extrn _KiDPCTimeout:DWORD
+ extrn _MsgDpcTrashedEsp:BYTE
+ extrn _MsgDpcTimeout:BYTE
+endif
+
+;
+; Constants for various variables
+;
+
+_DATA SEGMENT PARA PUBLIC 'DATA'
+
+;
+; Idle thread process object
+;
+
+ align 4
+
+ public _KiIdleProcess
+_KiIdleProcess label byte
+ db ExtendedProcessObjectLength dup(?) ; sizeof (EPROCESS)
+
+;
+; Staticly allocated structures for Bootstrap processor
+; idle thread object for P0
+; idle thread stack for P0
+;
+ align 4
+ public P0BootThread
+P0BootThread label byte
+ db ExtendedThreadObjectLength dup(?) ; sizeof (ETHREAD)
+
+;
+; I don't think it is safe to overlap P0 stack and NMI/DoubleFault stack.
+; The NMI handler may decide to continue. We need to make sure the original
+; stack content is complete.
+; [shielint]
+;
+ align 16
+if DBG
+ public _KiDoubleFaultStack
+ db DOUBLE_FAULT_STACK_SIZE dup (?)
+_KiDoubleFaultStack label byte
+endif
+
+ public P0BootStack
+ db KERNEL_STACK_SIZE dup (?)
+P0BootStack label byte
+
+
+;
+; Double fault task stack
+;
+
+MINIMUM_TSS_SIZE EQU TssIoMaps
+
+ align 16
+
+ public _KiDoubleFaultTSS
+_KiDoubleFaultTSS label byte
+ db MINIMUM_TSS_SIZE dup(0)
+
+ public _KiNMITSS
+_KiNMITSS label byte
+ db MINIMUM_TSS_SIZE dup(0)
+
+;
+; Abios specific definitions
+;
+
+ public _KiCommonDataArea, _KiAbiosPresent
+_KiCommonDataArea dd 0
+_KiAbiosPresent dd 0
+
+_DATA ends
+
+ page ,132
+ subttl "System Startup"
+INIT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+DFInternalError db 'DF Stack internal error', 0
+
+
+;++
+;
+; For processor 0, Routine Description:
+;
+; This routine is called when the NT system begins execution.
+; Its function is to initialize system hardware state, call the
+; kernel initialization routine, and then fall into code that
+; represents the idle thread for all processors.
+;
+; Entry state created by the boot loader:
+; A short-form IDT (0-1f) exists and is active.
+; A complete GDT is set up and loaded.
+; A complete TSS is set up and loaded.
+; Page map is set up with minimal start pages loaded
+; The lower 4Mb of virtual memory are directly mapped into
+; physical memory.
+;
+; The system code (ntoskrnl.exe) is mapped into virtual memory
+; as described by its memory descriptor.
+; DS=ES=SS = flat
+; ESP->a useable boot stack
+; Interrupts OFF
+;
+; For processor > 0, Routine Description:
+;
+; This routine is called when each additional processor begins execution.
+; The entry state for the processor is:
+; IDT, GDT, TSS, stack, selectors, PCR = all valid
+; Page directory is set to the current running direcroty
+; LoaderBlock - parameters for this processors
+;
+; Arguments:
+;
+; PLOADER_PARAMETER_BLOCK LoaderBlock
+;
+; Return Value:
+;
+; None.
+;
+;--
+;
+; Arguments for KiSystemStartupPx
+;
+
+
+KissLoaderBlock equ [ebp+8]
+
+;
+; Local variables
+;
+
+KissGdt equ [ebp-4]
+KissPcr equ [ebp-8]
+KissTss equ [ebp-12]
+KissIdt equ [ebp-16]
+KissIrql equ [ebp-20]
+KissPbNumber equ [ebp-24]
+KissIdleStack equ [ebp-28]
+KissIdleThread equ [ebp-32]
+
+cPublicProc _KiSystemStartup ,1
+
+ push ebp
+ mov ebp, esp
+ sub esp, 32 ; Reserve space for local variables
+
+ mov ebx, dword ptr KissLoaderBlock
+ mov _KeLoaderBlock, ebx ; Get loader block param
+
+ movzx ecx, _KeNumberProcessors ; get number of processors
+ mov KissPbNumber, ecx
+ or ecx, ecx ; Is the the boot processor?
+ jnz @f ; no
+
+ ; P0 uses static memory for these
+ mov dword ptr [ebx].LpbThread, offset P0BootThread
+ mov dword ptr [ebx].LpbKernelStack, offset P0BootStack
+
+ push KGDT_R0_PCR ; P0 needs FS set
+ pop fs
+
+ ; Save processornumber in Prcb
+ mov byte ptr fs:PcPrcbData+PbNumber, cl
+@@:
+ mov eax, dword ptr [ebx].LpbThread
+ mov dword ptr KissIdleThread, eax
+
+ mov eax, dword ptr [ebx].LpbKernelStack
+ mov dword ptr KissIdleStack, eax
+
+ stdCall _KiInitializeMachineType
+ cmp byte ptr KissPbNumber, 0 ; if not p0, then
+ jne kiss_notp0 ; skip a bunch
+
+;
+;+++++++++++++++++++++++
+;
+; Initialize the PCR
+;
+
+ stdCall GetMachineBootPointers
+;
+; Upon return:
+; (edi) -> gdt
+; (esi) -> pcr
+; (edx) -> tss
+; (eax) -> idt
+; Now, save them in our local variables
+;
+
+
+ mov KissGdt, edi
+ mov KissPcr, esi
+ mov KissTss, edx
+ mov KissIdt, eax
+
+;
+; edit TSS to be 32bits. loader gives us a tss, but it's 16bits!
+;
+ lea ecx,[edi]+KGDT_TSS ; (ecx) -> TSS descriptor
+ mov byte ptr [ecx+5],089h ; 32bit, dpl=0, present, TSS32, not busy
+
+; KiInitializeTSS2(
+; Linear address of TSS
+; Linear address of TSS descriptor
+; );
+ stdCall _KiInitializeTSS2, <KissTss, ecx>
+
+ stdCall _KiInitializeTSS, <KissTss>
+
+ mov cx,KGDT_TSS
+ ltr cx
+
+
+;
+; set up 32bit double fault task gate so we can catch double faults.
+;
+
+ mov eax,KissIdt
+ lea ecx,[eax]+40h ; Descriptor 8
+ mov byte ptr [ecx+5],085h ; dpl=0, present, taskgate
+
+ mov word ptr [ecx+2],KGDT_DF_TSS
+
+ lea ecx,[edi]+KGDT_DF_TSS
+ mov byte ptr [ecx+5],089h ; 32bit, dpl=0, present, TSS32, not busy
+
+ mov edx,offset FLAT:_KiDoubleFaultTSS
+ mov eax,edx
+ mov [ecx+KgdtBaseLow],ax
+ shr eax,16
+ mov [ecx+KgdtBaseHi],ah
+ mov [ecx+KgdtBaseMid],al
+ mov eax, MINIMUM_TSS_SIZE
+ mov [ecx+KgdtLimitLow],ax
+
+; KiInitializeTSS(
+; address of double fault TSS
+; );
+ stdCall _KiInitializeTSS, <edx>
+
+ mov eax,cr3
+ mov [edx+TssCr3],eax
+
+if DBG
+ mov eax, offset FLAT:_KiDoubleFaultStack
+else
+; on a retail build we overload the double fault stack to overlay
+; part of the kernel's image. (we overlay the ZW thunks)
+ mov eax, offset FLAT:_ZwUnmapViewOfSection@8 - 4
+ and eax, not 3
+ push eax
+
+ sub eax, offset FLAT:_ZwAcceptConnectPort@24
+ cmp eax, 0a00h ; make sure there's enough stack
+ jnc short @f ; space available
+
+ pushad
+ stdCall _HalDisplayString, <offset FLAT:DFInternalError>
+ popad
+@@:
+ pop eax
+endif
+ mov dword ptr [edx+038h],eax
+ mov dword ptr [edx+TssEsp0],eax
+
+ mov dword ptr [edx+020h],offset FLAT:_KiTrap08
+ mov dword ptr [edx+024h],0 ; eflags
+ mov word ptr [edx+04ch],KGDT_R0_CODE ; set value for CS
+ mov word ptr [edx+058h],KGDT_R0_PCR ; set value for FS
+ mov [edx+050h],ss
+ mov word ptr [edx+048h],KGDT_R3_DATA OR RPL_MASK ; Es
+ mov word ptr [edx+054h],KGDT_R3_DATA OR RPL_MASK ; Ds
+
+;
+; set up 32bit NMI task gate so we can catch NMI faults.
+;
+
+ mov eax,KissIdt
+ lea ecx,[eax]+10h ; Descriptor 2
+ mov byte ptr [ecx+5],085h ; dpl=0, present, taskgate
+
+ mov word ptr [ecx+2],KGDT_NMI_TSS
+
+ lea ecx,[edi]+KGDT_NMI_TSS
+ mov byte ptr [ecx+5],089h ; 32bit, dpl=0, present, TSS32, not busy
+
+ mov edx,offset FLAT:_KiNMITSS
+ mov eax,edx
+ mov [ecx+KgdtBaseLow],ax
+ shr eax,16
+ mov [ecx+KgdtBaseHi],ah
+ mov [ecx+KgdtBaseMid],al
+ mov eax, MINIMUM_TSS_SIZE
+ mov [ecx+KgdtLimitLow],ax
+
+ push edx
+ stdCall _KiInitializeTSS,<edx> ; KiInitializeTSS(
+ ; address TSS
+ ; );
+
+ ; We are using the DoubleFault stack as the DoubleFault stack and the
+ ; NMI Task Gate stack
+
+
+ mov eax,cr3
+ mov [edx+TssCr3],eax
+
+ mov eax, offset FLAT:_KiDoubleFaultTSS
+ mov eax, dword ptr [eax+038h] ; get DF stack
+ mov dword ptr [edx+TssEsp0],eax ; use it for NMI stack
+ mov dword ptr [edx+038h],eax
+
+ mov dword ptr [edx+020h],offset FLAT:_KiTrap02
+ mov dword ptr [edx+024h],0 ; eflags
+ mov word ptr [edx+04ch],KGDT_R0_CODE ; set value for CS
+ mov word ptr [edx+058h],KGDT_R0_PCR ; set value for FS
+ mov [edx+050h],ss
+ mov word ptr [edx+048h],KGDT_R3_DATA OR RPL_MASK ; Es
+ mov word ptr [edx+054h],KGDT_R3_DATA OR RPL_MASK ; Ds
+
+ stdCall _KiInitializePcr, <KissPbNumber,KissPcr,KissIdt,KissGdt,KissTss,KissIdleThread>
+
+;
+; set current process pointer in current thread object
+;
+ mov edx, KissIdleThread
+ mov ecx, offset FLAT:_KiIdleProcess ; (ecx)-> idle process obj
+ mov [edx]+ThApcState+AsProcess, ecx ; set addr of thread's process
+
+
+;
+; set up PCR: Teb, Prcb pointers. The PCR:InitialStack, and various fields
+; of Prcb will be set up in _KiInitializeKernel
+;
+
+ mov dword ptr fs:PcTeb, 0 ; PCR->Teb = 0
+
+;
+; Initialize KernelDr7 and KernelDr6 to 0. This must be done before
+; the debugger is called.
+;
+
+ mov dword ptr fs:PcPrcbData+PbProcessorState+PsSpecialRegisters+SrKernelDr6,0
+ mov dword ptr fs:PcPrcbData+PbProcessorState+PsSpecialRegisters+SrKernelDr7,0
+
+;
+; Since the entries of Kernel IDT have their Selector and Extended Offset
+; fields set up in the wrong order, we need to swap them back to the order
+; which i386 recognizes.
+; This is only done by the bootup processor.
+;
+
+ stdCall _KiSwapIDT ; otherwise, do the work
+
+;
+; Switch to R3 flat selectors that we want loaded so lazy segment
+; loading will work.
+;
+ mov eax,KGDT_R3_DATA OR RPL_MASK ; Set RPL = ring 3
+ mov ds,ax
+ mov es,ax
+
+;
+; Now copy our trap handlers to replace kernel debugger's handlers.
+;
+
+ mov eax, KissIdt ; (eax)-> Idt
+ push dword ptr [eax+40h] ; save double fault's descriptor
+ push dword ptr [eax+44h]
+ push dword ptr [eax+10h] ; save nmi fault's descriptor
+ push dword ptr [eax+14h]
+
+ mov edi,KissIdt
+ mov esi,offset FLAT:_IDT
+ mov ecx,offset FLAT:_IDTLEN ; _IDTLEN is really an abs, we use
+ shr ecx,2
+
+ rep movsd
+ pop dword ptr [eax+14h] ; restore nmi fault's descriptor
+ pop dword ptr [eax+10h]
+ pop dword ptr [eax+44h] ; restore double fault's descriptor
+ pop dword ptr [eax+40h]
+
+kiss_notp0:
+ ;
+ ; A new processor can't come online while execution is frozen
+ ; Take freezelock while adding a processor to the system
+ ; NOTE: don't use SPINLOCK macro - it has debugger stuff in it
+ ;
+
+if NT_INST
+ lea eax, _KiFreezeExecutionLock
+ stdCall _KiAcquireSpinLock, <eax>
+else
+@@: test _KiFreezeExecutionLock, 1
+ jnz short @b
+
+ lock bts _KiFreezeExecutionLock, 0
+ jc short @b
+endif
+
+
+;
+; Add processor to active summary, and update BroadcastMasks
+;
+ mov ecx, dword ptr KissPbNumber ; mark this processor as active
+ mov byte ptr fs:PcNumber, cl
+ mov eax, 1
+ shl eax, cl ; our affinity bit
+ or _KeActiveProcessors, eax ; New affinity of active processors
+
+ mov fs:PcSetMember, eax
+ mov fs:PcPrcbData.PbSetMember, eax
+
+;
+; Initialize the interprocessor interrupt vector and increment ready
+; processor count to enable kernel debugger.
+;
+ stdCall _HalInitializeProcessor, <dword ptr KissPbNumber>
+
+;
+; Initialize ABIOS data structure if present.
+; Note, the KiInitializeAbios MUST be called after the KeLoaderBlock is
+; initialized.
+;
+ stdCall _KiInitializeAbios, <dword ptr KissPbNumber>
+
+ inc _KeNumberProcessors ; One more processor now active
+
+if NT_INST
+ lea eax, _KiFreezeExecutionLock
+ stdCall _KiReleaseSpinLock, <eax>
+else
+ xor eax, eax ; release the executionlock
+ mov _KiFreezeExecutionLock, eax
+endif
+
+ cmp byte ptr KissPbNumber, 0
+ jnz @f
+
+; don't stop in debugger
+ stdCall _KdInitSystem, <_KeLoaderBlock,0>
+
+if DEVL
+;
+; Give debugger an opportunity to gain control.
+;
+
+ POLL_DEBUGGER
+endif ; DEVL
+@@:
+ nop ; leave a spot for int-3 patch
+;
+; Set initial IRQL = HIGH_LEVEL for init
+;
+ mov ecx, HIGH_LEVEL
+ fstCall KfRaiseIrql
+ mov KissIrql, al
+
+;
+; If the target machine does not implement the cmpxchg8b instruction,
+; then patch the routines that use this instrucntion to simply jump
+; to the corresponding routines that use spinlocks.
+;
+ pushfd ; Save flags
+
+ cmp byte ptr KissPbNumber, 0
+ jnz cx8done ; only test on boot processor
+
+ pop ebx ; Get flags into eax
+ push ebx ; Save original flags
+
+ mov ecx, ebx
+ xor ecx, EFLAGS_ID ; flip ID bit
+ push ecx
+ popfd ; load it into flags
+ pushfd ; re-save flags
+ pop ecx ; get flags into eax
+ cmp ebx, ecx ; did bit stay flipped?
+ je short nocx8 ; No, don't try CPUID
+
+ or ebx, EFLAGS_ID
+ push ebx
+ popfd ; Make sure ID bit is set
+.586p
+ mov eax, 1 ; Get feature bits
+ cpuid ; Uses eax, ebx, ecx, edx
+.386p
+ test edx, 100h
+ jz short nocx8
+ or _KiBootFeatureBits, KF_CMPXCHG8B ; We're committed to using
+ jmp short cx8done ; this feature
+
+nocx8:
+ lea eax, @ExInterlockedCompareExchange64@16 ; get target address
+ lea ecx, @ExpInterlockedCompareExchange64@16 ; get source address
+ mov byte ptr [eax], 0e9H ; set jump opcode value
+ lea edx, [eax] + 5 ; get simulated eip value
+ sub ecx, edx ; compute displacement
+ mov [eax] + 1, ecx ; set jump displacement value
+ lea eax, @ExInterlockedPopEntrySList@8 ; get target address
+ lea ecx, @ExfInterlockedPopEntrySList@8 ; get source address
+ mov byte ptr [eax], 0e9H ; set jump opcode value
+ lea edx, [eax] + 5 ; get simulated eip value
+ sub ecx, edx ; compute displacement
+ mov [eax] + 1, ecx ; set jump displacement value
+ lea eax, @ExInterlockedPushEntrySList@12 ; get target address
+ lea ecx, @ExfInterlockedPushEntrySList@12 ; get source address
+ mov byte ptr [eax], 0e9H ; set jump opcode value
+ lea edx, [eax] + 5 ; get simulated eip value
+ sub ecx, edx ; compute displacement
+ mov [eax] + 1, ecx ; set jump displacement value
+ lea eax, _ExInterlockedExchangeAddLargeInteger@16 ; get target address
+ lea ecx, _ExInterlockedAddLargeInteger@16 ; get source address
+ mov byte ptr [eax], 0e9H ; set jump opcode value
+ lea edx, [eax] + 5 ; get simulated eip value
+ sub ecx, edx ; compute displacement
+ mov [eax] + 1, ecx ; set jump displacement value
+
+cx8done:
+ popfd
+
+;
+; Initialize ebp, esp, and argument registers for initializing the kernel.
+;
+ mov ebx, KissIdleThread
+ mov edx, KissIdleStack
+ mov eax, KissPbNumber
+ and edx, NOT 3h ; align stack to 4 byte boundary
+
+ xor ebp, ebp ; (ebp) = 0. No more stack frame
+ mov esp, edx
+ push CR0_EM+CR0_TS+CR0_MP ; make space for Cr0NpxState
+
+; arg6 - LoaderBlock
+; arg5 - processor number
+; arg4 - addr of prcb
+; arg3 - idle thread's stack
+; arg2 - addr of current thread obj
+; arg1 - addr of current process obj
+
+; initialize system data structures
+; and HAL.
+
+ stdCall _KiInitializeKernel,<offset _KiIdleProcess,ebx,edx,dword ptr fs:PcPrcb,eax,_KeLoaderBlock>
+
+
+;
+; Set "shadow" priority value for Idle thread. This will keep the Mutex
+; priority boost/drop code from dropping priority on the Idle thread, and
+; thus avoids leaving a bit set in the ActiveMatrix for the Idle thread when
+; there should never be any such bit.
+;
+
+ mov ebx,fs:PcPrcbData+PbCurrentThread ; (eax)->Thread
+ mov byte ptr [ebx]+ThPriority,LOW_REALTIME_PRIORITY ; set pri.
+
+;
+; Control is returned to the idle thread with IRQL at HIGH_LEVEL. Lower IRQL
+; to DISPATCH_LEVEL and set wait IRQL of idle thread.
+;
+
+ sti
+ mov ecx, DISPATCH_LEVEL
+ fstCall KfLowerIrql
+ mov byte ptr [ebx]+ThWaitIrql, DISPATCH_LEVEL
+
+;
+; The following code represents the idle thread for a processor. The idle
+; thread executes at IRQL DISPATCH_LEVEL and continually polls for work to
+; do. Control may be given to this loop either as a result of a return from
+; the system initialize routine or as the result of starting up another
+; processor in a multiprocessor configuration.
+;
+
+ mov ebx, PCR[PcSelfPcr] ; get address of PCR
+
+;
+; In a multiprocessor system the boot processor proceeds directly into
+; the idle loop. As other processors start executing, however, they do
+; not directly enter the idle loop and spin until all processors have
+; been started and the boot master allows them to proceed.
+;
+
+ifndef NT_UP
+
+@@: cmp _KiBarrierWait, 0 ; check if barrier set
+ jnz short @b ; if nz, barrier set
+
+endif
+
+ jmp KiIdleLoop ; enter idle loop
+
+stdENDP _KiSystemStartup
+
+INIT ends
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE' ; Put IdleLoop in text section
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ page ,132
+ subttl "Idle Loop"
+;++
+;
+; Routine Description:
+;
+; This routine continuously executes the idle loop and never returns.
+;
+; Arguments:
+;
+; ebx - Address of the current processor PCR.
+;
+; Return value:
+;
+; None - routine never returns.
+;
+;--
+
+ public KiIdleLoop
+KiIdleLoop proc
+
+ lea ebp, [ebx].PcPrcbData.PbDpcListHead ; set DPC listhead address
+
+if DBG
+
+ xor edi, edi ; reset poll breakin counter
+
+endif
+
+ jmp short kid20 ; Skip HalIdleProcessor on first iteration
+
+;
+; There are no entries in the DPC list and a thread has not been selected
+; for execution on this processor. Call the HAL so power managment can be
+; performed.
+;
+; N.B. The HAL is called with interrupts disabled. The HAL will return
+; with interrupts enabled.
+;
+; N.B. Use a call instruction instead of a push-jmp, as the call instruction
+; executes faster and won't invalidate the processors call-return stack
+; cache.
+;
+
+kid10: stdCall _HalProcessorIdle ;
+
+
+;
+; Give debugger an opportunity to gain control on debug systems.
+;
+; N.B. On an MP system the lowest numbered idle processor is the only
+; processor that polls for a breakin request.
+;
+
+kid20:
+
+if DBG
+ifndef NT_UP
+
+ mov eax, _KiIdleSummary ; get idle summary
+ mov ecx, [ebx].PcSetMember ; get set member
+ dec ecx ; compute right bit mask
+ and eax, ecx ; check if any lower bits set
+ jnz short CheckDpcList ; if nz, not lowest numbered
+
+endif
+
+ dec edi ; decrement poll counter
+ jg short CheckDpcList ; if g, not time to poll
+
+ POLL_DEBUGGER ; check if break in requested
+endif
+
+kid30:
+
+if DBG
+ifndef NT_UP
+
+ mov edi, 20 * 1000 ; set breakin poll interval
+
+else
+
+ mov edi, 100 ; UP idle loop has a HLT in it
+
+endif
+endif
+
+;
+; Disable interrupts and check if there is any work in the DPC list
+; of the current processor or a target processor.
+;
+
+CheckDpcList: ;
+
+;
+; N.B. The following code enables interrupts for a few cycles, then
+; disables them again for the subsequent DPC and next thread
+; checks.
+;
+
+ sti ; enable interrupts
+ nop ;
+ nop ;
+ cli ; disable interrupts
+
+;
+; Process the deferred procedure call list for the current processor.
+;
+
+ cmp ebp, [ebp].LsFlink ; check if DPC list is empty
+ je short CheckNextThread ; if eq, DPC list is empty
+ mov cl, DISPATCH_LEVEL ; set interrupt level
+ fstCall HalClearSoftwareInterrupt ; clear software interrupt
+ call KiRetireDpcList ; process the current DPC list
+
+if DBG
+
+ xor edi, edi ; clear breakin poll interval
+
+endif
+
+;
+; Check if a thread has been selected to run on the current processor.
+;
+
+CheckNextThread: ;
+ cmp dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; thread selected?
+ je short kid10 ; if eq, no thread selected
+
+;
+; A thread has been selected for execution on this processor. Acquire
+; dispatcher database lock, get the thread address again (it may have
+; changed), clear the address of the next thread in the processor block,
+; and call swap context to start execution of the selected thread.
+;
+; N.B. If the dispatcher database lock cannot be obtained immediately,
+; then attempt to process another DPC rather than spinning on the
+; dispatcher database lock.
+;
+; N.B. This polls for the spinlock by first using a non-interlocked
+; instruction. This way if the lock is busy, and the code is
+; spinning, this processor won't be generating lots of locked cycles
+;
+
+ifndef NT_UP
+
+ lea eax, _KiDispatcherLock ; get address of dispatcher lock
+ TEST_SPINLOCK eax, <short CheckDpcList>
+ ACQUIRE_SPINLOCK eax, <short CheckDpcList>, NoChecking ; acquire dispatcher database lock
+
+endif
+
+;
+; Raise IRQL to synchronization level and enable interrupts.
+;
+
+
+ mov ecx, SYNCH_LEVEL ; raise IRQL to synchronization level
+ fstCall KfRaiseIrql ;
+ sti ; enable interrupts
+ mov esi, [ebx].PcPrcbData.PbNextThread ; get next thread address
+ mov edi, [ebx].PcPrcbData.PbCurrentThread ; set current thread address
+ mov dword ptr [ebx].PcPrcbData.PbNextThread, 0 ; clear next thread address
+ mov [ebx].PcPrcbData.PbCurrentThread, esi ; set current thread address
+
+ mov cl, 1 ; set APC interrupt bypass disable
+ call SwapContext ;
+ mov ecx, DISPATCH_LEVEL ; lower IRQL to dispatch level
+ fstCall KfLowerIrql ;
+
+ lea ebp, [ebx].PcPrcbData.PbDpcListHead ; set DPC listhead address
+ jmp kid30 ;
+
+KiIdleLoop endp
+
+ page ,132
+ subttl "Retire Deferred Procedure Call List"
+;++
+;
+; Routine Description:
+;
+; This routine is called to retire the specified deferred procedure
+; call list. DPC routines are called using the idle thread (current)
+; stack.
+;
+; N.B. Interrupts must be disabled and the DPC list lock held on entry
+; to this routine. Control is returned to the caller with the same
+; conditions true.
+;
+; N.B. The registers ebx and ebp are preserved across the call.
+;
+; Arguments:
+;
+; ebx - Address of the target processor PCR.
+; ebp - Address of the target DPC listhead.
+;
+; Return value:
+;
+; None.
+;
+;--
+
+ public KiRetireDpcList
+KiRetireDpcList proc
+
+ifndef NT_UP
+
+ push esi ; save register
+ lea esi, [ebx].PcPrcbData.PbDpcLock ; get DPC lock address
+
+endif
+
+rdl5: mov PCR[PcPrcbData.PbDpcRoutineActive], esp ; set DPC routine active
+
+
+;
+; Process the DPC List.
+;
+
+
+rdl10: ;
+
+ifndef NT_UP
+
+ ACQUIRE_SPINLOCK esi, rdl50, NoChecking ; acquire DPC lock
+ cmp ebp, [ebp].LsFlink ; check if DPC list is empty
+ je rdl45 ; if eq, DPC list is empty
+
+endif
+
+ mov edx, [ebp].LsFlink ; get address of next entry
+ mov ecx, [edx].LsFlink ; get address of next entry
+ mov [ebp].LsFlink, ecx ; set address of next in header
+ mov [ecx].LsBlink, ebp ; set address of previous in next
+ sub edx, DpDpcListEntry ; compute address od DPC object
+ mov ecx, [edx].DpDeferredRoutine ; get DPC routine address
+
+if DBG
+
+.fpo (5, 0, 0, 0, 0, 0)
+
+ push edi ; save register
+ push ecx ; save DPC routine address
+ push dword ptr PCR[PcPrcbData.PbInterruptCount] ; save interrupt count
+ push dword ptr PCR[PcPrcbData.PbInterruptTime] ; save interrupt time
+ push _KeTickCount ; save current tick count
+ mov edi, esp ; save current stack pointer
+
+endif
+
+.fpo (4, 0, 0, 0, 0, 0)
+
+ push [edx].DpSystemArgument2 ; second system argument
+ push [edx].DpSystemArgument1 ; first system argument
+ push [edx].DpDeferredContext ; get deferred context argument
+ push edx ; address of DPC object
+ mov dword ptr [edx]+DpLock, 0 ; clear DPC inserted state
+ dec dword ptr [ebx].PcPrcbData.PbDpcQueueDepth ; decrement depth
+
+ifndef NT_UP
+
+ RELEASE_SPINLOCK esi, NoChecking ; release DPC lock
+
+endif
+
+ sti ; enable interrupts
+ call ecx ; call DPC routine
+
+if DBG
+
+ stdCall _KeGetCurrentIrql ; get current IRQL
+ cmp al, DISPATCH_LEVEL ; check if still at dispatch level
+ jne rdl55 ; if ne, not at dispatch level
+ cmp esp, edi ; check if stack pointer is correct
+ jne rdl60 ; if ne, stack pointer is not correct
+ mov edi, [esp] ; get starting tick count
+ add edi, _KiDPCTimeout ; adjust for max dpc time allowed
+ cmp _KeTickCount, edi ; check if DPC executed too long
+ jae rdl70 ; if ae, DPC executed too long
+rdl30: add esp, 4 * 4 ; remove parameters from stack
+ pop edi ; restore register
+
+endif
+
+rdl35: cli ; disable interrupts
+ cmp ebp, [ebp].LsFlink ; check if DPC list is empty
+ jne rdl10 ; if ne, DPC list not empty
+rdl40: mov PCR[PcPrcbData.PbDpcRoutineActive], 0 ; clear DPC routine active
+ mov [ebx].PcPrcbData.PbDpcInterruptRequested, 0 ; clear DPC requested
+
+;
+; Check one last time that the DPC list is empty. This is required to
+; close a race condition with the DPC queuing code where it appears that
+; a DPC routine is active (and thus an interrupt is not requested), but
+; this code has decided the DPC list is empty and is clearing the DPC
+; active flag.
+;
+
+ cmp ebp, [ebp].LsFlink ; check if DPC list is empty
+ jne rdl5 ; if ne, DPC list not empty
+
+ifndef NT_UP
+
+ pop esi ; retore register
+
+endif
+
+ ret ; return
+
+;
+; Unlock DPC list and clear DPC active.
+;
+
+rdl45: ;
+
+ifndef NT_UP
+
+ RELEASE_SPINLOCK esi, NoChecking ; release DPC lock
+ jmp short rdl40 ;
+
+endif
+
+ifndef NT_UP
+
+rdl50: sti ; enable interrupts
+ SPIN_ON_SPINLOCK esi, <short rdl35> ; spin until lock is freee
+
+endif
+
+if DBG
+
+rdl55: stdCall _KeBugCheckEx, <IRQL_NOT_GREATER_OR_EQUAL, ebx, eax, 0, 0> ;
+
+rdl60: push dword ptr [edi+12] ; push address of DPC function
+ push offset FLAT:_MsgDpcTrashedEsp ; push message address
+ call _DbgPrint ; print debug message
+ add esp, 8 ; remove arguments from stack
+ int 3 ; break into debugger
+ mov esp, edi ; reset stack pointer
+ jmp rdl30 ;
+
+rdl70: mov edx, PCR[PcPrcbData.PbInterruptTime] ; get staring interrupt time
+ sub edx, [esp+4] ; compute time in DPC routine
+ jc rdl30 ; if c, interrupt time wrapped
+ mov ecx, PCR[PcPrcbData.PbInterruptCount] ; get starting interrupt count
+ sub ecx, [esp+8] ; compute interrupts in while in DPC
+ mov eax, [esp+12] ; get address of DPC function
+ push edx ; push interrupt time
+ push ecx ; push interrupts count
+ push eax ; push address of DPC function
+ push offset FLAT:_MsgDpcTimeout ; push message address
+ call _DbgPrint ; print debug message
+ add esp, 4 * 4 ; remove arguments from stack
+ cmp _KdDebuggerEnabled, 0 ; check if debugger enabled
+ je rdl30 ; if eq, debugger not enabled
+ call _DbgBreakPoint@0 ; break into debugger
+ jmp rdl30 ;
+
+endif
+
+KiRetireDpcList endp
+
+_TEXT$00 ends
+
+_TEXT SEGMENT DWORD PUBLIC 'CODE' ; Put IdleLoop in text section
+
+ page ,132
+ subttl "Set up 80387, or allow for emulation"
+;++
+;
+; Routine Description:
+;
+; This routine is called during kernel initialization once for each
+; processor. It sets EM+TS+MP whether we are emulating or not.
+;
+; If the 387 hardware exists, EM+TS+MP will all be cleared on the
+; first trap 07. Thereafter, EM will never be seen for this thread.
+; MP+TS will only be set when an error is detected (via IRQ 13), and
+; it will be cleared by the trap 07 that will occur on the next FP
+; instruction.
+;
+; If we're emulating, EM+TS+MP are all always set to ensure that all
+; FP instructions trap to the emulator (the trap 07 handler is edited
+; to point to the emulator, rather than KiTrap07).
+;
+; Arguments:
+;
+; None.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiSetCR0Bits ,0
+
+ mov eax, cr0
+;
+; There are two useful bits in CR0 that we want to turn on if the processor
+; is a 486 or above. (They don't exist on the 386)
+;
+; CR0_AM - Alignment mask (so we can turn on alignment faults)
+;
+; CR0_WP - Write protect (so we get page faults if we write to a
+; write-protected page from kernel mode)
+;
+ cmp byte ptr fs:PcPrcbData.PbCpuType, 3h
+ jbe @f
+;
+; The processor is not a 386, (486 or greater) so we assume it is ok to
+; turn on these bits.
+;
+
+ or eax, CR0_WP
+
+@@:
+ mov cr0, eax
+ stdRET _KiSetCR0Bits
+
+stdENDP _KiSetCR0Bits
+
+
+ifdef DBGMP
+cPublicProc _KiPollDebugger,0
+cPublicFpo 0,3
+ push eax
+ push ecx
+ push edx
+ POLL_DEBUGGER
+ pop edx
+ pop ecx
+ pop eax
+ stdRET _KiPollDebugger
+stdENDP _KiPollDebugger
+
+endif
+
+_TEXT ends
+
+ end
diff --git a/private/ntos/ke/i386/p2w.asm b/private/ntos/ke/i386/p2w.asm
new file mode 100644
index 000000000..4f8f356dd
--- /dev/null
+++ b/private/ntos/ke/i386/p2w.asm
@@ -0,0 +1,69 @@
+ .286P
+_TEXT SEGMENT WORD PUBLIC 'CODE'
+_TEXT ENDS
+_DATA SEGMENT WORD PUBLIC 'DATA'
+_DATA ENDS
+CONST SEGMENT WORD PUBLIC 'CONST'
+CONST ENDS
+_BSS SEGMENT WORD PUBLIC 'BSS'
+_BSS ENDS
+ DGROUP GROUP _DATA, CONST, _BSS
+ ASSUME CS:_TEXT, DS:DGROUP, ES:DGROUP, SS:DGROUP
+PUBLIC _p2w
+EXTRN _printf:NEAR
+
+include callconv.inc ; calling convention macros
+
+_DATA SEGMENT
+s1 db ' equ 0',0
+s2 db '%hX%04hXH',0ah,0
+s3 db '%hXH',0ah,0
+_DATA ends
+
+_TEXT segment
+
+;
+; p2w(&ULONG which is value to print)
+;
+; if ([bx+2] != 0)
+; printf(bx+2, bx, %x, %04x)
+; else
+; printf(bx, %x)
+
+_p2w PROC NEAR
+; Line 688
+ push bp
+ mov bp, sp
+ push bx
+ push di
+ push si
+
+ push offset DGROUP:s1
+ call _printf
+ add sp,2
+
+ mov bx,[bp+4]
+ cmp word ptr [bx+2],0
+ jz p2w10
+
+ push [bx]
+ push [bx+2]
+ push offset DGROUP:s2
+ call _printf
+ add sp,6
+ jmp p2w20
+
+p2w10: push [bx]
+ push offset DGROUP:s3
+ call _printf
+ add sp,4
+
+p2w20: pop si
+ pop di
+ pop bx
+ leave
+ stdRET _p2w
+_p2w ENDP
+
+_TEXT ENDS
+END
diff --git a/private/ntos/ke/i386/procstat.asm b/private/ntos/ke/i386/procstat.asm
new file mode 100644
index 000000000..7024ee501
--- /dev/null
+++ b/private/ntos/ke/i386/procstat.asm
@@ -0,0 +1,323 @@
+ title "Processor State Save Restore"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; procstat.asm
+;
+; Abstract:
+;
+; This module implements procedures for saving and restoring
+; processor control state, and processor run&control state.
+; These procedures support debugging of UP and MP systems.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 30-Aug-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include callconv.inc
+ .list
+
+ EXTRNP _KeContextToKframes,5
+ EXTRNP _KeContextFromKframes,3
+ extrn _KeFeatureBits:DWORD
+
+ page ,132
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ subttl "Save Processor State"
+;++
+;
+; KiSaveProcessorState(
+; PKTRAP_FRAME TrapFrame,
+; PKEXCEPTION_FRAME ExceptionFrame
+; );
+;
+; Routine Description:
+;
+; This routine saves the processor state for debugger. When the current
+; processor receives the request of IPI_FREEZE, it saves all the registers
+; in a save area in the PRCB so the debugger can get access to them.
+;
+; Arguments:
+;
+; TrapFrame (esp+4) - Pointer to machine trap frame
+;
+; ExceptionFrame (esp+8) - Pointer to exception frame
+; (IGNORED on the x86!)
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiSaveProcessorState ,2
+
+ mov eax, [esp+4] ; (eax) -> TrapFrame
+
+ mov edx, PCR[PcPrcb] ; (edx)->PrcbData
+ add edx, PbProcessorState ; (edx)->ProcessorState
+ push edx
+;
+; Copy the whole TrapFrame to our ProcessorState
+;
+
+ lea ecx, [edx].PsContextFrame
+ mov dword ptr [ecx].CsContextFlags, CONTEXT_FULL OR CONTEXT_DEBUG_REGISTERS
+
+; ecx - ContextFrame
+; 0 - ExceptionFrame == NULL
+; eax - TrapFrame
+ stdCall _KeContextFromKframes, <eax, 0, ecx>
+
+;
+; Save special registers for debugger
+;
+
+ ; TOS = PKPROCESSOR_STATE
+ call _KiSaveProcessorControlState@4
+
+ stdRET _KiSaveProcessorState
+
+stdENDP _KiSaveProcessorState
+
+
+ page ,132
+ subttl "Save Processor Control State"
+;++
+;
+; KiSaveProcessorControlState(
+; PKPROCESSOR_STATE ProcessorState
+; );
+;
+; Routine Description:
+;
+; This routine saves the control subset of the processor state.
+; (Saves the same information as KiSaveProcessorState EXCEPT that
+; data in TrapFrame/ExceptionFrame=Context record is NOT saved.)
+; Called by the debug subsystem, and KiSaveProcessorState()
+;
+; N.B. This procedure will save Dr7, and then 0 it. This prevents
+; recursive hardware trace breakpoints and allows debuggers
+; to work.
+;
+; Arguments:
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiSaveProcessorControlState ,1
+
+ mov edx, [esp+4] ; ProcessorState
+
+;
+; Save special registers for debugger
+;
+ xor ecx,ecx
+
+ mov eax, cr0
+ mov [edx].PsSpecialRegisters.SrCr0, eax
+ mov eax, cr2
+ mov [edx].PsSpecialRegisters.SrCr2, eax
+ mov eax, cr3
+ mov [edx].PsSpecialRegisters.SrCr3, eax
+
+ mov [edx].PsSpecialRegisters.SrCr4, ecx
+
+ test _KeFeatureBits, KF_CR4
+ jz short @f
+
+.586p
+ mov eax, cr4
+ mov [edx].PsSpecialRegisters.SrCr4, eax
+.486p
+
+@@:
+ mov eax,dr0
+ mov [edx].PsSpecialRegisters.SrKernelDr0,eax
+ mov eax,dr1
+ mov [edx].PsSpecialRegisters.SrKernelDr1,eax
+ mov eax,dr2
+ mov [edx].PsSpecialRegisters.SrKernelDr2,eax
+ mov eax,dr3
+ mov [edx].PsSpecialRegisters.SrKernelDr3,eax
+ mov eax,dr6
+ mov [edx].PsSpecialRegisters.SrKernelDr6,eax
+
+ mov eax,dr7
+ mov dr7,ecx
+ mov [edx].PsSpecialRegisters.SrKernelDr7,eax
+
+ sgdt fword ptr [edx].PsSpecialRegisters.SrGdtr
+ sidt fword ptr [edx].PsSpecialRegisters.SrIdtr
+
+ str word ptr [edx].PsSpecialRegisters.SrTr
+ sldt word ptr [edx].PsSpecialRegisters.SrLdtr
+
+ stdRET _KiSaveProcessorControlState
+
+stdENDP _KiSaveProcessorControlState
+
+ page ,132
+ subttl "Restore Processor State"
+;++
+;
+; KiRestoreProcessorState(
+; PKTRAP_FRAME TrapFrame,
+; PKEXCEPTION_FRAME ExceptionFrame
+; );
+;
+; Routine Description:
+;
+; This routine Restores the processor state for debugger. When the
+; control returns from debugger (UnFreezeExecution), this function
+; restores the entire processor state.
+;
+; Arguments:
+;
+; TrapFrame (esp+4) - Pointer to machine trap frame
+;
+; ExceptionFrame (esp+8) - Pointer to exception frame
+; (IGNORED on the x86!)
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiRestoreProcessorState ,2
+
+ mov eax, [esp+4] ; (eax) -> TrapFrame
+
+ mov edx, PCR[PcPrcb] ; (edx)->PrcbData
+ add edx, PbProcessorState ; (edx)->ProcessorState
+ push edx
+
+;
+; Copy the whole ContextFrame to TrapFrame
+;
+
+ lea ecx, [edx].PsContextFrame
+ mov edx, [edx].PsContextFrame.CsSegCs
+ and edx, MODE_MASK
+
+; edx - Previous mode
+; ecx - ContextFrame
+; 0 - ExceptionFrame == NULL
+; eax - TrapFrame
+ stdCall _KeContextToKframes, <eax,0,ecx,[ecx].CsContextFlags,edx>
+
+;
+; Save special registers for debugger
+;
+
+ ; TOS = KPROCESSOR_STATE
+ call _KiRestoreProcessorControlState@4
+
+ stdRET _KiRestoreProcessorState
+
+stdENDP _KiRestoreProcessorState
+
+
+ page ,132
+ subttl "Restore Processor Control State"
+;++
+;
+; KiRestoreProcessorControlState(
+; );
+;
+; Routine Description:
+;
+; This routine restores the control subset of the processor state.
+; (Restores the same information as KiRestoreProcessorState EXCEPT that
+; data in TrapFrame/ExceptionFrame=Context record is NOT restored.)
+; Called by the debug subsystem, and KiRestoreProcessorState()
+;
+; Arguments:
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiRestoreProcessorControlState,1
+
+ mov edx, [esp+4] ; (edx)->ProcessorState
+
+;
+; Restore special registers for debugger
+;
+
+ mov eax, [edx].PsSpecialRegisters.SrCr0
+ mov cr0, eax
+ mov eax, [edx].PsSpecialRegisters.SrCr2
+ mov cr2, eax
+ mov eax, [edx].PsSpecialRegisters.SrCr3
+ mov cr3, eax
+
+ test _KeFeatureBits, KF_CR4
+ jz short @f
+
+.586p
+ mov eax, [edx].PsSpecialRegisters.SrCr4
+ mov cr4, eax
+.486p
+@@:
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr0
+ mov dr0, eax
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr1
+ mov dr1, eax
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr2
+ mov dr2, eax
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr3
+ mov dr3, eax
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr6
+ mov dr6, eax
+ mov eax, [edx].PsSpecialRegisters.SrKernelDr7
+ mov dr7, eax
+
+ lgdt fword ptr [edx].PsSpecialRegisters.SrGdtr
+ lidt fword ptr [edx].PsSpecialRegisters.SrIdtr
+
+;
+; Force the TSS descriptor into a non-busy state, so we don't fault
+; when we load the TR.
+;
+
+ mov eax, [edx].PsSpecialRegisters.SrGdtr+2 ; (eax)->GDT base
+ xor ecx, ecx
+ mov cx, word ptr [edx].PsSpecialRegisters.SrTr
+ add eax, 5
+ add eax, ecx ; (eax)->TSS Desc. Byte
+ and byte ptr [eax],NOT 2
+ ltr word ptr [edx].PsSpecialRegisters.SrTr
+
+ lldt word ptr [edx].PsSpecialRegisters.SrLdtr
+
+ stdRET _KiRestoreProcessorControlState
+
+stdENDP _KiRestoreProcessorControlState
+
+_TEXT ENDS
+ END
diff --git a/private/ntos/ke/i386/services.nap b/private/ntos/ke/i386/services.nap
new file mode 100644
index 000000000..1cfec241d
--- /dev/null
+++ b/private/ntos/ke/i386/services.nap
@@ -0,0 +1,123 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; services.nap
+;
+; Abstract:
+;
+; This module implements the system service dispatch stub procedures.
+; It also creates a "profile" of each service by counting and
+; timing calls.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 6-Feb-1990
+; Russ Blake (russbl) 22-Apr-1991
+;
+; Environment:
+;
+; User or kernel mode.
+;
+; Revision History:
+;
+;--
+
+include ks386.inc
+
+.386
+STUBS_BEGIN1 macro t
+ TITLE t
+endm
+STUBS_BEGIN2 macro t
+endm
+STUBS_BEGIN3 macro t
+_TEXT SEGMENT DWORD USE32 PUBLIC 'CODE'
+endm
+STUBS_BEGIN4 macro t
+ ASSUME CS:FLAT
+endm
+STUBS_BEGIN5 macro t
+ align 4
+endm
+STUBS_BEGIN6 macro t
+endm
+STUBS_BEGIN7 macro t
+endm
+STUBS_BEGIN8 macro t
+endm
+
+STUBS_END macro t
+_TEXT ENDS
+ end
+endm
+
+SYSSTUBS_ENTRY1 macro ServiceNumber, Name
+ public _Zw&Name
+_Zw&Name proc near
+ mov eax, ServiceNumber ; (eax) = service number
+ lea edx, [esp]+4 ; (edx) -> arguments
+ INT 2Eh ; invoke system service
+ ret
+_Zw&Name endp
+endm
+
+SYSSTUBS_ENTRY2 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY3 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY4 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY5 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY6 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY7 macro ServiceNumber, Name
+endm
+SYSSTUBS_ENTRY8 macro ServiceNumber, Name
+endm
+
+
+USRSTUBS_ENTRY1 macro ServiceNumber, Name
+ public _Zw&Name, _Nt&Name
+_Zw&Name proc near
+_Nt&Name proc near
+
+ mov eax, ServiceNumber ; (eax) = service number
+ lea edx, [esp]+4 ; (edx) -> arguments
+
+
+ call _NapProfileDispatch ; invoke profiled system service
+
+ ret
+_Nt&Name endp
+_Zw&Name endp
+endm
+
+USRSTUBS_ENTRY2 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY3 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY4 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY5 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY6 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY7 macro ServiceNumber, Name
+endm
+USRSTUBS_ENTRY8 macro ServiceNumber, Name
+endm
+
+ STUBS_BEGIN1 <"System Service Stub Procedures">
+ STUBS_BEGIN2 <"System Service Stub Procedures">
+ STUBS_BEGIN3 <"System Service Stub Procedures">
+ STUBS_BEGIN4 <"System Service Stub Procedures">
+ STUBS_BEGIN5 <"System Service Stub Procedures">
+ STUBS_BEGIN6 <"System Service Stub Procedures">
+ STUBS_BEGIN7 <"System Service Stub Procedures">
+ STUBS_BEGIN8 <"System Service Stub Procedures">
+
+EXTRN _NapProfileDispatch:NEAR
diff --git a/private/ntos/ke/i386/services.stb b/private/ntos/ke/i386/services.stb
new file mode 100644
index 000000000..eae2aaa9e
--- /dev/null
+++ b/private/ntos/ke/i386/services.stb
@@ -0,0 +1,131 @@
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; sysstubs.asm
+;
+; Abstract:
+;
+; This module implements the system service dispatch stub procedures.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 6-Feb-1990
+;
+; Environment:
+;
+; User or kernel mode.
+;
+; Revision History:
+;
+;--
+
+include ks386.inc
+include callconv.inc
+
+.386
+STUBS_BEGIN1 macro t
+ TITLE t
+endm
+STUBS_BEGIN2 macro t
+endm
+STUBS_BEGIN3 macro t
+_TEXT SEGMENT DWORD PUBLIC 'CODE'
+endm
+STUBS_BEGIN4 macro t
+endm
+STUBS_BEGIN5 macro t
+ align 4
+endm
+STUBS_BEGIN6 macro t
+endm
+STUBS_BEGIN7 macro t
+endm
+STUBS_BEGIN8 macro t
+endm
+
+STUBS_END macro t
+_TEXT ENDS
+ end
+endm
+
+SYSSTUBS_ENTRY1 macro ServiceNumber, Name, NumArgs
+cPublicProc _Zw&Name,NumArgs
+.FPO ( 0, NumArgs, 0, 0, 0, 0 )
+IFIDN <Name>, <SetHighWaitLowThread>
+ int 2Bh
+ELSE
+IFIDN <Name>, <SetLowWaitHighThread>
+ int 2Ch
+ELSE
+ mov eax, ServiceNumber ; (eax) = service number
+ lea edx, [esp]+4 ; (edx) -> arguments
+ INT 2Eh ; invoke system service
+ENDIF
+ENDIF
+ stdRET _Zw&Name
+stdENDP _Zw&Name
+endm
+
+SYSSTUBS_ENTRY2 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY3 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY4 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY5 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY6 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY7 macro ServiceNumber, Name, NumArgs
+endm
+SYSSTUBS_ENTRY8 macro ServiceNumber, Name, NumArgs
+endm
+
+
+USRSTUBS_ENTRY1 macro ServiceNumber, Name, NumArgs
+local c
+cPublicProc _Zw&Name, NumArgs
+PUBLICP _Nt&Name, NumArgs
+LABELP _Nt&Name, NumArgs
+.FPO ( 0, NumArgs, 0, 0, 0, 0 )
+IFIDN <Name>, <SetHighWaitLowThread>
+ int 2Bh
+ELSE
+IFIDN <Name>, <SetLowWaitHighThread>
+ int 2Ch
+ELSE
+ mov eax, ServiceNumber ; (eax) = service number
+ lea edx, [esp]+4 ; (edx) -> arguments
+ INT 2Eh ; invoke system service
+ENDIF
+ENDIF
+ stdRET _Zw&Name
+stdENDP _Zw&Name
+endm
+
+USRSTUBS_ENTRY2 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY3 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY4 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY5 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY6 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY7 macro ServiceNumber, Name, NumArgs
+endm
+USRSTUBS_ENTRY8 macro ServiceNumber, Name, NumArgs
+endm
+
+ STUBS_BEGIN1 <"System Service Stub Procedures">
+ STUBS_BEGIN2 <"System Service Stub Procedures">
+ STUBS_BEGIN3 <"System Service Stub Procedures">
+ STUBS_BEGIN4 <"System Service Stub Procedures">
+ STUBS_BEGIN5 <"System Service Stub Procedures">
+ STUBS_BEGIN6 <"System Service Stub Procedures">
+ STUBS_BEGIN7 <"System Service Stub Procedures">
+ STUBS_BEGIN8 <"System Service Stub Procedures">
diff --git a/private/ntos/ke/i386/sources b/private/ntos/ke/i386/sources
new file mode 100644
index 000000000..d8b2f8a9b
--- /dev/null
+++ b/private/ntos/ke/i386/sources
@@ -0,0 +1,47 @@
+i386_SOURCES=..\i386\mpipia.asm \
+ ..\i386\abiosa.asm \
+ ..\i386\abiosc.c \
+ ..\i386\allproc.c \
+ ..\i386\apcuser.c \
+ ..\i386\biosa.asm \
+ ..\i386\biosc.c \
+ ..\i386\callback.c \
+ ..\i386\callout.asm \
+ ..\i386\clockint.asm \
+ ..\i386\ctxswap.asm \
+ ..\i386\cpu.asm \
+ ..\i386\cyrix.c \
+ ..\i386\dmpstate.c \
+ ..\i386\emv86.asm \
+ ..\i386\emxcptn.asm \
+ ..\i386\exceptn.c \
+ ..\i386\flush.c \
+ ..\i386\flushtb.c \
+ ..\i386\gdtsup.c \
+ ..\i386\int.asm \
+ ..\i386\intobj.c \
+ ..\i386\intsup.asm \
+ ..\i386\iopm.c \
+ ..\i386\i386init.c \
+ ..\i386\i386pcr.asm \
+ ..\i386\instemul.asm \
+ ..\i386\kernlini.c \
+ ..\i386\largepag.c \
+ ..\i386\ldtsup.c \
+ ..\i386\ldtsup2.asm \
+ ..\i386\newsysbg.asm \
+ ..\i386\misc.c \
+ ..\i386\mtrr.c \
+ ..\i386\procstat.asm \
+ ..\i386\spindbg.asm \
+ ..\i386\spinlock.asm \
+ ..\i386\spininst.asm \
+ ..\i386\sysstubs.asm \
+ ..\i386\systable.asm \
+ ..\i386\threadbg.asm \
+ ..\i386\thredini.c \
+ ..\i386\timindex.asm \
+ ..\i386\trap.asm \
+ ..\i386\trapc.c \
+ ..\i386\vdm.c \
+ ..\i386\vdmint21.c
diff --git a/private/ntos/ke/i386/spindbg.asm b/private/ntos/ke/i386/spindbg.asm
new file mode 100644
index 000000000..bfc7784b4
--- /dev/null
+++ b/private/ntos/ke/i386/spindbg.asm
@@ -0,0 +1,162 @@
+if NT_INST
+else
+ TITLE "Spin Locks"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; spindbg.asm
+;
+; Abstract:
+;
+; Author:
+;
+; Bryan Willman (bryanwi) 13 Dec 89
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+ PAGE
+
+.386p
+
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+
+
+if DBG
+ EXTRNP _KeBugCheck,1
+ EXTRNP _KeGetCurrentIrql,0,IMPORT
+ifdef DBGMP
+ EXTRNP _KiPollDebugger,0
+endif
+ extrn _KeTickCount:DWORD
+ extrn _KiSpinlockTimeout:DWORD
+endif
+
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+;
+; VOID
+; Kii386SpinOnSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; IN ULONG Flag
+; )
+;
+; Routine Description:
+;
+; This function is called on a debug build to spin on a spinlock.
+; It is invoked by the DEBUG version of SPIN_ON_SPINLOCK macro.
+;
+; Warning:
+;
+; Not called with C calling conventions
+; Does not destroy any register
+;
+;--
+
+cPublicProc Kii386SpinOnSpinLock,2
+
+if DBG
+cPublicFpo 2,2
+ push eax
+ push ebx
+
+ mov eax, [esp+12] ; (eax) = LockAddress
+
+ mov ebx, PCR[PcPrcbData.PbCurrentThread]
+ or ebx, 1 ; or on busy bit
+ cmp ebx, [eax] ; current thread the owner?
+ je short ssl_sameid ; Yes, go abort
+
+ssl_10:
+ mov ebx, _KeTickCount ; Current time
+ add ebx, _KiSpinlockTimeout ; wait n ticks
+
+ifdef DBGMP
+ test byte ptr [esp+16], 2 ; poll debugger while waiting?
+ jnz short ssl_30
+endif
+
+;
+; Spin while watching KeTickCount
+;
+
+ssl_20: cmp _KeTickCount, ebx ; check current time
+ jnc short ssl_timeout ; NC, too many ticks have gone by
+
+ test dword ptr [eax], 1
+ jnz short ssl_20
+
+ssl_exit:
+ pop ebx ; Spinlock is not busy, return
+ pop eax
+ stdRET Kii386SpinOnSpinLock
+
+ifdef DBGMP
+;
+; Spin while watching KeTickCount & poll debugger
+;
+
+ssl_30: cmp _KeTickCount, ebx ; check current time
+ jnc short ssl_timeout ; overflowed
+
+ stdCall _KiPollDebugger
+
+ test dword ptr [eax], 1
+ jnz short ssl_30
+
+ pop ebx ; Spinlock is not busy, return
+ pop eax
+ stdRET Kii386SpinOnSpinLock
+endif
+
+;
+; Out of line expection conditions
+;
+
+ssl_sameid:
+ test byte ptr [esp+16], 1 ; ID check enabled?
+ jz short ssl_10 ; no, continue
+
+ stdCall _KeBugCheck,<eax> ; recursed on lock, abort
+
+ssl_timeout:
+ test byte ptr [esp+16], 4 ; Timeout check enabled?
+ jz short ssl_10 ; no, continue
+
+ stdCall _KeGetCurrentIrql ; Check to see what level we're spinning at
+ cmp al, DISPATCH_LEVEL
+ mov eax, [esp+12] ; restore eax
+ jc short ssl_10 ; if < dispatch_level, don't timeout
+
+ test dword ptr [eax], 1 ; Check to see if spinlock was freed
+ jz short ssl_exit
+
+ public SpinLockSpinningForTooLong
+SpinLockSpinningForTooLong:
+
+ int 3 ; Stop here
+ jmp short ssl_10 ; re-wait
+
+else ; DBG
+ stdRET Kii386SpinOnSpinLock
+endif
+stdENDP Kii386SpinOnSpinLock,2
+
+_TEXT$00 ends
+
+endif ; NT_INST
+ end
+
diff --git a/private/ntos/ke/i386/spininst.asm b/private/ntos/ke/i386/spininst.asm
new file mode 100644
index 000000000..d364c35e5
--- /dev/null
+++ b/private/ntos/ke/i386/spininst.asm
@@ -0,0 +1,943 @@
+if NT_INST
+ TITLE "Spin Locks"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; spininst.asm
+;
+; Abstract:
+;
+; This module implements the instrumentation versions of the routines
+; for acquiring and releasing spin locks.
+;
+; Author:
+;
+; Ken Reneris
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;--
+
+ PAGE
+
+.386p
+
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+include mac386.inc
+
+ EXTRNP _KeRaiseIrql,2,IMPORT
+ EXTRNP _KeLowerIrql,1,IMPORT
+ EXTRNP _KeBugCheckEx,5
+
+ifdef NT_UP
+ .err SpinLock instrutmentation requires MP build
+endif
+
+s_SpinLock struc
+ SpinLock dd ? ; Back pointer to spinlock
+ InitAddr dd ? ; Address of KeInitializeSpinLock caller
+ LockValue db ? ; Actual lock varible
+ LockFlags db ? ; Various flags
+ dw ?
+ NoAcquires dd ? ; # of times acquired
+ NoCollides dd ? ; # of times busy on acquire attempt
+ TotalSpinHigh dd ? ; number spins spent waiting on this spinlock
+ TotalSpinLow dd ?
+ HighestSpin dd ? ; max spin ever waited for on this spinlock
+s_SpinLock ends
+
+LOCK_LAZYINIT equ 1h
+LOCK_NOTTRACED equ 2h
+
+
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+
+MAXSPINLOCKS equ 1000h
+SYSTEM_ADDR equ 80000000h
+
+ public _KiNoOfSpinLocks, _KiSpinLockBogus, _KiSpinLockArray
+ public _KiSpinLockFreeList
+_KiNoOfSpinLocks dd 1 ; skip first one
+_KiSpinLockBogus dd 0
+_KiSpinLockLock dd 0
+
+_KiSpinLockArray db ((size s_SpinLock) * MAXSPINLOCKS) dup (0)
+
+_KiSpinLockFreeList dd 0
+
+_DATA ends
+
+
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ PAGE
+ SUBTTL "Acquire Kernel Spin Lock"
+;++
+;
+; VOID
+; KeInializeSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+;
+; Routine Description:
+;
+; This function initializes a SpinLock
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+cPublicProc _KeInitializeSpinLock ,1
+ pushfd
+ cli
+@@: lock bts _KiSpinLockLock, 0
+ jc short @b
+
+ mov eax, _KiSpinLockFreeList
+ or eax, eax
+ jz short isl10
+
+ mov ecx, [eax].InitAddr
+ mov _KiSpinLockFreeList, ecx
+ jmp short isl20
+
+isl10:
+ mov eax, _KiNoOfSpinLocks
+ cmp eax, MAXSPINLOCKS
+ jnc isl_overflow
+
+ inc _KiNoOfSpinLocks
+
+.errnz (size s_SpinLock - (8*4))
+ shl eax, 5
+ add eax, offset _KiSpinLockArray
+
+isl20:
+; (eax) = address of spinlock structure
+ mov ecx, [esp+8]
+ mov [ecx], eax
+
+ mov [eax].SpinLock, ecx
+ mov ecx, [esp+4]
+ mov [eax].InitAddr, ecx
+
+ mov _KiSpinLockLock, 0
+ popfd
+
+ stdRET _KeInitializeSpinLock
+
+isl_overflow:
+ ; Just use non-tracing locks from now on
+ mov eax, [esp+4]
+ mov dword ptr [eax], LOCK_NOTTRACED
+ popfd
+ stdRET _KeInitializeSpinLock
+
+stdENDP _KeInitializeSpinLock
+
+;++
+; VOID
+; SpinLockLazyInit (
+; IN PKSPIN_LOCK SpinLock,
+; )
+;
+; Routine Description:
+;
+; Used internaly to initialize a spinlock which is being used without
+; first being initialized (bad! bad!)
+;
+;--
+
+cPublicProc SpinLockLazyInit,1
+ push eax
+ mov eax, [esp+8] ; Get SpinLock addr
+ test dword ptr [eax], SYSTEM_ADDR
+ jnz slz_10
+
+ push ecx
+ push edx
+ inc _KiSpinLockBogus
+ stdCall _KeInitializeSpinLock, <eax>
+ pop edx
+ pop ecx
+
+ mov eax, [esp+8] ; Get SpinLock addr
+ mov eax, [eax]
+ or [eax].LockFlags, LOCK_LAZYINIT
+ pop eax
+ stdRet SpinLockLazyInit
+
+slz_10:
+ stdCall _KeBugCheckEx,<SPIN_LOCK_INIT_FAILURE,eax,0,0,0>
+
+stdENDP SpinLockLazyInit
+
+;++
+; VOID
+; SpinLockInit (VOID)
+;
+cPublicProc SpinLockInit,0
+ pushad
+ pushf
+ cli
+
+ mov ecx, MAXSPINLOCKS-1
+ mov eax, offset FLAT:_KiSpinLockArray
+ xor edx, edx
+
+@@: mov [eax].NoAcquires, edx
+ mov [eax].NoCollides, edx
+ mov [eax].TotalSpinHigh, edx
+ mov [eax].TotalSpinLow, edx
+ mov [eax].HighestSpin, edx
+
+ add eax, size s_SpinLock
+ dec ecx
+ jnz short @b
+
+ popf
+ popad
+@@: int 3
+ jmp short @b
+
+stdENDP SpinLockInit
+
+
+
+;++
+;
+; VOID
+; KeFreeSpinLock (
+; )
+;
+; Routine Description:
+; Used in instrumentation build to allow spinlocks to be
+; de-allocated if needed.
+;
+;--
+
+cPublicProc _KeFreeSpinLock,1
+ pushfd
+ cli
+@@: lock bts _KiSpinLockLock, 0
+ jc short @b
+
+ mov eax, [esp+8]
+ mov edx, [eax]
+ test edx, SYSTEM_ADDR
+ jz short @f
+
+ mov dword ptr [eax], 0
+
+;
+; Acculate old SpinLock's totals to misc bucket
+;
+ mov eax, [edx].NoAcquires
+ add _KiSpinLockArray.NoAcquires, eax
+
+ mov eax, [edx].NoCollides
+ add _KiSpinLockArray.NoCollides, eax
+
+ mov eax, [edx].TotalSpinLow
+ add _KiSpinLockArray.TotalSpinLow, eax
+ mov eax, [edx].TotalSpinHigh
+ adc _KiSpinLockArray.TotalSpinLow, eax
+
+ mov eax, [edx].HighestSpin
+ cmp _KiSpinLockArray.HighestSpin, eax
+ jnc @f
+ mov _KiSpinLockArray.HighestSpin, eax
+@@:
+ push edi
+ mov edi, edx
+ mov ecx, size s_SpinLock / 4
+ xor eax, eax
+ rep stosd
+ pop edi
+
+ mov ecx, _KiSpinLockFreeList
+ mov [edx].InitAddr, ecx
+ mov _KiSpinLockFreeList, edx
+
+@@:
+ mov _KiSpinLockLock, 0
+ popfd
+ stdRET _KeFreeSpinLock
+stdENDP _KeFreeSpinLock
+
+;++
+;
+; VOID
+; KeInializeSpinLock2 (
+; IN PKSPIN_LOCK SpinLock,
+;
+; Routine Description:
+;
+; This function initializes a non-tracing SpinLock.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+cPublicProc _KeInitializeSpinLock2,1
+ mov eax, [esp+4]
+ mov dword ptr [eax], LOCK_NOTTRACED
+ stdRET _KeInitializeSpinLock2
+stdENDP _KeInitializeSpinLock2,1
+
+
+ PAGE
+ SUBTTL "Acquire Kernel Spin Lock"
+;++
+;
+; VOID
+; KeAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+; OUT PKIRQL OldIrql
+; )
+;
+; Routine Description:
+;
+; This function raises to DISPATCH_LEVEL and then acquires a the
+; kernel spin lock.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+; OldIrql (TOS+8) - pointer to place old irql
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicProc _KeAcquireSpinLock ,2
+ sub esp, 4 ; Make room for OldIrql
+ stdCall _KeRaiseIrql, <DISPATCH_LEVEL, esp>
+
+sl00: mov eax,[esp+8] ; (eax) -> ptr -> spinlock
+ mov eax,[eax] ; (eax) -> Spin structure
+ test eax, SYSTEM_ADDR
+ jz short sl_bogus
+
+
+ xor ecx, ecx ; Initialize spin count
+ xor edx, edx ; Initialize collide count
+
+;
+; Attempt to obtain the lock
+;
+
+sl10: lock bts [eax].LockValue, 0
+ jc short sl30 ; If lock is busy, go wait
+
+;
+; SpinLock is now owned
+;
+ inc [eax].NoAcquires ; accumulate statistic
+ add [eax].NoCollides, edx
+ lock add [eax].TotalSpinLow, ecx
+ adc [eax].TotalSpinHigh, 0
+
+ cmp [eax].HighestSpin, ecx
+ jc short sl20
+
+sl15: mov eax, [esp+12] ; pOldIrql
+ pop ecx ; OldIrql
+ mov byte ptr [eax], cl
+
+ stdRet _KeAcquireSpinLock
+
+align 4
+sl20: mov [eax].HighestSpin, ecx ; set new highest spin mark
+ jmp short sl15
+
+sl30: inc edx ; one more collide
+
+;
+; SpinLoop is kept small in order to get counts based on PcStallCunt
+;
+align 4
+sl50: inc ecx ; one more spin
+ test [eax].LockValue, 1 ; is it free?
+ jnz short sl50 ; no, loop
+
+ jmp short sl10 ; Go try again
+
+;
+; SpinLock was bogus - it's either a lock being used without being
+; initialized, or it's a lock we don't care to trace
+;
+
+sl_bogus:
+ mov eax, [esp+8]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short sl_lazyinit
+
+sl60: lock bts dword ptr [eax], 0 ; attempt to acquire non-traced lock
+ jnc short sl15 ; if got it, return
+
+ xor ecx, ecx
+sl65: inc ecx
+ test dword ptr [eax], 1 ; wait for lock to be un-busy
+ jnz short sl65
+
+ lock add _KiSpinLockArray.TotalSpinLow, ecx
+ adc _KiSpinLockArray.TotalSpinHigh, 0
+ jmp short sl60
+
+;
+; Someone is using a lock which was not properly initialized, go do it now
+;
+
+sl_lazyinit:
+ stdCall SpinLockLazyInit,<eax>
+ jmp short sl00
+
+stdENDP _KeAcquireSpinLock
+
+
+ PAGE
+ SUBTTL "Release Kernel Spin Lock"
+;++
+;
+; VOID
+; KeReleaseSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+; IN KIRQL NewIrql
+; )
+;
+; Routine Description:
+;
+; This function releases a kernel spin lock and lowers to the new irql
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an executive spin lock.
+; NewIrql (TOS+8) - New irql value to set
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicProc _KeReleaseSpinLock ,2
+
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax] ; SpinLock structure
+ test eax, SYSTEM_ADDR
+ jz short rsl_bogus
+
+ mov [eax].LockValue, 0 ; clear busy bit
+
+rsl10: pop eax ; (eax) = ret. address
+ mov [esp],eax ; set stack so we can jump directly
+ jmp _KeLowerIrql@4 ; to KeLowerIrql
+
+rsl_bogus:
+ mov eax, [esp+4]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short rsl_lazyinit
+
+ btr dword ptr [eax], 0 ; clear lock bit on non-tracing lock
+ jmp short rsl10
+
+rsl_lazyinit: ; go initialize lock now
+ stdCall SpinLockLazyInit, <eax>
+ jmp short _KeReleaseSpinLock
+stdENDP _KeReleaseSpinLock
+
+ PAGE
+ SUBTTL "Ki Acquire Kernel Spin Lock"
+
+;++
+;
+; VOID
+; KiAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function acquires a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicProc _KiAcquireSpinLock ,1
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax] ; (eax) -> Spin structure
+ test eax, SYSTEM_ADDR
+ jz short asl_bogus
+
+ xor ecx, ecx ; Initialize spin count
+ xor edx, edx ; Initialize collide count
+
+;
+; Attempt to obtain the lock
+;
+
+asl10: lock bts [eax].LockValue, 0
+ jc short asl40 ; If lock is busy, go wait
+
+;
+; SpinLock is owned
+;
+ inc [eax].NoAcquires ; accumulate statistics
+ add [eax].NoCollides, edx
+ lock add [eax].TotalSpinLow, ecx
+ adc [eax].TotalSpinHigh, 0
+
+ cmp [eax].HighestSpin, ecx
+ jc short asl20
+
+ stdRet _KiAcquireSpinLock
+
+align 4
+asl20: mov [eax].HighestSpin, ecx ; set new highest spin mark
+asl30: stdRet _KiAcquireSpinLock
+
+asl40: inc edx ; one more collide
+
+;
+; SpinLoop is kept small in order to get counts based on PcStallCunt
+;
+align 4
+asl50: inc ecx ; one more spin
+ test [eax].LockValue, 1 ; is it free?
+ jnz short asl50 ; no, loop
+ jmp short asl10 ; Go try again
+
+;
+; This is a non-initialized lock.
+;
+asl_bogus:
+ mov eax, [esp+4]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz asl_lazyinit
+
+asl60: lock bts dword ptr [eax], 0 ; attempt to acquire non-traced lock
+ jnc short asl30 ; if got it, return
+
+ xor ecx, ecx
+asl65: inc ecx
+ test dword ptr [eax], 1 ; wait for lock to be un-busy
+ jnz short asl65
+
+ lock add _KiSpinLockArray.TotalSpinLow, eax
+ adc _KiSpinLockArray.TotalSpinHigh, 0
+ jmp short asl60
+
+asl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ jmp short _KiAcquireSpinLock
+stdENDP _KiAcquireSpinLock
+
+ PAGE
+ SUBTTL "Ki Release Kernel Spin Lock"
+;++
+;
+; VOID
+; KiReleaseSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function releases a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an executive spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+align 16
+cPublicProc _KiReleaseSpinLock ,1
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax]
+ test eax, SYSTEM_ADDR
+ jz short irl_bogus
+
+ mov [eax].LockValue, 0
+ stdRET _KiReleaseSpinLock
+
+irl_bogus:
+ mov eax,[esp+4]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short irl_lazyinit
+
+ btr dword ptr [eax], 0 ; clear busy bit on non-traced lock
+ stdRET _KiReleaseSpinLock
+
+irl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ stdRet _KiReleaseSpinLock
+stdENDP _KiReleaseSpinLock
+
+ PAGE
+ SUBTTL "Try to acquire Kernel Spin Lock"
+;++
+;
+; BOOLEAN
+; KeTryToAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+; OUT PKIRQL OldIrql
+; )
+;
+; Routine Description:
+;
+; This function attempts acquires a kernel spin lock. If the
+; spinlock is busy, it is not acquire and FALSE is returned.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+; OldIrql (TOS+8) = Location to store old irql
+;
+; Return Value:
+; TRUE - Spinlock was acquired & irql was raise
+; FALSE - SpinLock was not acquired - irql is unchanged.
+;
+;--
+
+align dword
+cPublicProc _KeTryToAcquireSpinLock ,2
+
+;
+; This function is currently only used by the debugger, so we don't
+; keep stats on it
+;
+
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax]
+ test eax, SYSTEM_ADDR
+ jz short tts_bogus
+
+
+;
+; First check the spinlock without asserting a lock
+;
+
+ test [eax].LockValue, 1
+ jnz short ttsl10
+
+;
+; Spinlock looks free raise irql & try to acquire it
+;
+
+ mov eax, [esp+8] ; (eax) -> ptr to OldIrql
+
+;
+; raise to dispatch_level
+;
+
+ stdCall _KeRaiseIrql, <DISPATCH_LEVEL, eax>
+
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax]
+ lock bts [eax].LockValue, 0
+ jc short ttsl20
+
+ mov eax, 1 ; spinlock was acquired, return TRUE
+ stdRET _KeTryToAcquireSpinLock
+
+ttsl10:
+ xor eax, eax ; return FALSE
+ stdRET _KeTryToAcquireSpinLock
+
+ttsl20:
+ mov eax, [esp+8] ; spinlock was busy, restore irql
+ stdCall _KeLowerIrql, <dword ptr [eax]>
+
+ xor eax, eax ; return FALSE
+ stdRET _KeTryToAcquireSpinLock
+
+tts_bogus:
+ mov eax,[esp+4]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jnz short tts_bogus2
+
+ stdCall SpinLockLazyInit, <eax>
+ jmp short _KeTryToAcquireSpinLock
+
+tts_bogus2:
+ stdCall _KeBugCheckEx,<SPIN_LOCK_INIT_FAILURE,eax,0,0,0> ; Not supported for now
+
+stdENDP _KeTryToAcquireSpinLock
+
+ PAGE
+ SUBTTL "Ki Try to acquire Kernel Spin Lock"
+;++
+;
+; BOOLEAN
+; KiTryToAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function attempts acquires a kernel spin lock. If the
+; spinlock is busy, it is not acquire and FALSE is returned.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+; TRUE - Spinlock was acquired
+; FALSE - SpinLock was not acquired
+;
+;--
+align dword
+cPublicProc _KiTryToAcquireSpinLock ,1
+;
+; This function is currently only used by the debugger, so we don't
+; keep stats on it
+;
+
+ mov eax,[esp+4] ; (eax) -> ptr -> spinlock
+ mov eax,[eax]
+ test eax, SYSTEM_ADDR
+ jz short atsl_bogus
+
+
+;
+; First check the spinlock without asserting a lock
+;
+
+ test [eax].LockValue, 1
+ jnz short atsl10
+
+;
+ lock bts [eax].LockValue, 0
+ jc short atsl10
+
+atsl05:
+ mov eax, 1 ; spinlock was acquired, return TRUE
+ stdRET _KiTryToAcquireSpinLock
+
+atsl10:
+ xor eax, eax ; return FALSE
+ stdRET _KiTryToAcquireSpinLock
+
+atsl_bogus:
+ mov eax,[esp+4]
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short atsl_lazyinit
+
+ test dword ptr [eax], 1
+ jnz short atsl10
+
+ lock bts dword ptr [eax], 0
+ jnc short atsl05
+ jmp short atsl10
+
+atsl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ jmp short _KiTryToAcquireSpinLock
+
+stdENDP _KiTryToAcquireSpinLock
+
+
+;++
+;
+; KiInst_AcquireSpinLock
+;
+; Routine Description:
+; The NT_INST version of the macro ACQUIRE_SPINLOCK.
+; The macro thunks to this function so stats can be kept
+;
+; Arguments:
+; (eax) - SpinLock to acquire
+;
+; Return value:
+; CY - SpinLock was not acquired
+; NC - SpinLock was acquired
+;
+;--
+align dword
+cPublicProc KiInst_AcquireSpinLock, 0
+ test dword ptr [eax], SYSTEM_ADDR
+ jz short iasl_bogus
+
+ mov eax, [eax] ; Get SpinLock structure
+ lock bts [eax].LockValue, 0
+ jc short iasl_10 ; was busy, return CY
+
+ inc [eax].NoAcquires
+ mov eax, [eax].SpinLock
+ stdRET KiInst_AcquireSpinLock
+
+iasl_10:
+ inc [eax].NoCollides
+ mov eax, [eax].SpinLock
+ stdRET KiInst_AcquireSpinLock
+
+iasl_bogus:
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short iasl_lazyinit
+
+ lock bts dword ptr [eax], 0
+ stdRET KiInst_AcquireSpinLock
+
+iasl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ jmp short KiInst_AcquireSpinLock
+
+stdENDP KiInst_AcquireSpinLock
+
+
+;++
+;
+; KiInst_SpinOnSpinLock
+;
+; Routine Description:
+; The NT_INST version of the macro SPIN_ON_SPINLOCK.
+; The macro thunks to this function so stats can be kept
+;
+; Arguments:
+; (eax) - SpinLock to acquire
+;
+; Return value:
+; Returns when spinlock appears to be free
+;
+;--
+align dword
+cPublicProc KiInst_SpinOnSpinLock, 0
+ test dword ptr [eax], SYSTEM_ADDR
+ jz short issl_bogus
+
+ push ecx
+ mov eax, [eax] ; Get SpinLock structure
+ xor ecx, ecx ; initialize spincount
+
+align 4
+issl10: inc ecx ; one more spin
+ test [eax].LockValue, 1 ; is it free?
+ jnz short issl10 ; no, loop
+
+ lock add [eax].TotalSpinLow, ecx ; accumulate spin
+ adc [eax].TotalSpinHigh, 0
+
+ cmp [eax].HighestSpin, ecx
+ jc short issl20
+
+ mov eax, [eax].SpinLock ; restore eax
+ pop ecx
+ stdRet KiInst_SpinOnSpinLock
+
+issl20:
+ mov [eax].HighestSpin, ecx ; set new highest spin mark
+ mov eax, [eax].SpinLock ; restore eax
+ pop ecx
+ stdRet KiInst_SpinOnSpinLock
+
+issl_bogus:
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short issl_lazyinit
+
+ push ecx
+ xor ecx, ecx
+
+issl30: inc ecx
+ test dword ptr [eax], 1
+ jnz short issl30
+
+ lock add _KiSpinLockArray.TotalSpinLow, ecx
+ lock adc _KiSpinLockArray.TotalSpinHigh, 0
+ pop ecx
+
+ stdRet KiInst_SpinOnSpinLock
+
+issl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ stdRet KiInst_SpinOnSpinLock
+
+
+stdENDP KiInst_SpinOnSpinLock
+
+
+;++
+;
+; KiInst_ReleaseSpinLock
+;
+; Routine Description:
+; The NT_INST version of the macro ACQUIRE_SPINLOCK.
+; The macro thunks to this function so stats can be kept
+;
+; Arguments:
+; (eax) - SpinLock to acquire
+;
+; Return value:
+;
+;--
+align dword
+cPublicProc KiInst_ReleaseSpinLock, 0
+ test dword ptr [eax], SYSTEM_ADDR
+ jz short rssl_bogus
+
+ mov eax, [eax] ; Get SpinLock structure
+ mov [eax].LockValue, 0 ; Free it
+ mov eax, [eax].SpinLock ; Restore eax
+ stdRET KiInst_ReleaseSpinLock
+
+rssl_bogus:
+ test dword ptr [eax], LOCK_NOTTRACED
+ jz short rssl_lazyinit
+
+ btr dword ptr [eax], 0
+
+rssl_lazyinit:
+ stdCall SpinLockLazyInit, <eax>
+ stdRET KiInst_ReleaseSpinLock
+stdENDP KiInst_ReleaseSpinLock
+
+_TEXT$00 ends
+
+endif
+
+ end
+
+
diff --git a/private/ntos/ke/i386/spinlock.asm b/private/ntos/ke/i386/spinlock.asm
new file mode 100644
index 000000000..110a4429f
--- /dev/null
+++ b/private/ntos/ke/i386/spinlock.asm
@@ -0,0 +1,466 @@
+if NT_INST
+else
+ TITLE "Spin Locks"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; spinlock.asm
+;
+; Abstract:
+;
+; This module implements the routines for acquiring and releasing
+; spin locks.
+;
+; Author:
+;
+; Bryan Willman (bryanwi) 13 Dec 89
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+; Ken Reneris (kenr) 22-Jan-1991
+; Removed KeAcquireSpinLock macros, and made functions
+;--
+
+ PAGE
+
+.386p
+
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+include mac386.inc
+
+ EXTRNP KfRaiseIrql,1,IMPORT,FASTCALL
+ EXTRNP KfLowerIrql,1,IMPORT,FASTCALL
+ EXTRNP _KeGetCurrentIrql,0,IMPORT
+ EXTRNP _KeBugCheck,1
+
+
+_TEXT$00 SEGMENT PARA PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ PAGE
+ SUBTTL "Acquire Kernel Spin Lock"
+;++
+;
+; VOID
+; KeInializeSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+;
+; Routine Description:
+;
+; This function initializes a SpinLock
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+cPublicProc _KeInitializeSpinLock ,1
+cPublicFpo 1,0
+ mov eax, dword ptr [esp+4]
+ mov dword ptr [eax], 0
+ stdRET _KeInitializeSpinLock
+stdENDP _KeInitializeSpinLock
+
+
+
+ PAGE
+ SUBTTL "Ke Acquire Spin Lock At DPC Level"
+
+;++
+;
+; VOID
+; KefAcquireSpinLockAtDpcLevel (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function acquires a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; (ecx) SpinLock - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicFastCall KefAcquireSpinLockAtDpcLevel, 1
+cPublicFpo 0, 0
+if DBG
+ push ecx
+ stdCall _KeGetCurrentIrql
+ pop ecx
+
+ cmp al, DISPATCH_LEVEL
+ jne short asld50
+endif
+
+ifdef NT_UP
+ fstRET KefAcquireSpinLockAtDpcLevel
+else
+;
+; Attempt to assert the lock
+;
+
+asld10: ACQUIRE_SPINLOCK ecx,<short asld20>
+ fstRET KefAcquireSpinLockAtDpcLevel
+
+;
+; Lock is owned, spin till it looks free, then go get it again.
+;
+
+align 4
+asld20: SPIN_ON_SPINLOCK ecx,<short asld10>
+
+endif
+
+if DBG
+asld50: stdCall _KeBugCheck, <IRQL_NOT_GREATER_OR_EQUAL>
+endif
+
+fstENDP KefAcquireSpinLockAtDpcLevel
+
+
+;++
+;
+; VOID
+; KeAcquireSpinLockAtDpcLevel (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; Thunk for standard call callers
+;
+;--
+
+cPublicProc _KeAcquireSpinLockAtDpcLevel, 1
+cPublicFpo 1,0
+
+ifndef NT_UP
+ mov ecx,[esp+4] ; SpinLock
+
+aslc10: ACQUIRE_SPINLOCK ecx,<short aslc20>
+ stdRET _KeAcquireSpinLockAtDpcLevel
+
+aslc20: SPIN_ON_SPINLOCK ecx,<short aslc10>
+endif
+ stdRET _KeAcquireSpinLockAtDpcLevel
+stdENDP _KeAcquireSpinLockAtDpcLevel
+
+
+ PAGE
+ SUBTTL "Ke Release Spin Lock From Dpc Level"
+;++
+;
+; VOID
+; KefReleaseSpinLockFromDpcLevel (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function releases a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; (ecx) SpinLock - Supplies a pointer to an executive spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+align 16
+cPublicFastCall KefReleaseSpinLockFromDpcLevel ,1
+cPublicFpo 0,0
+ifndef NT_UP
+ RELEASE_SPINLOCK ecx
+endif
+ fstRET KefReleaseSpinLockFromDpcLevel
+
+fstENDP KefReleaseSpinLockFromDpcLevel
+
+;++
+;
+; VOID
+; KeReleaseSpinLockFromDpcLevel (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; Thunk for standard call callers
+;
+;--
+
+cPublicProc _KeReleaseSpinLockFromDpcLevel, 1
+cPublicFpo 1,0
+ifndef NT_UP
+ mov ecx, [esp+4] ; (ecx) = SpinLock
+ RELEASE_SPINLOCK ecx
+endif
+ stdRET _KeReleaseSpinLockFromDpcLevel
+stdENDP _KeReleaseSpinLockFromDpcLevel
+
+
+
+ PAGE
+ SUBTTL "Ki Acquire Kernel Spin Lock"
+
+;++
+;
+; VOID
+; FASTCALL
+; KiAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function acquires a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; (ecx) SpinLock - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+align 16
+cPublicFastCall KiAcquireSpinLock ,1
+cPublicFpo 0,0
+ifndef NT_UP
+
+;
+; Attempt to assert the lock
+;
+
+asl10: ACQUIRE_SPINLOCK ecx,<short asl20>
+ fstRET KiAcquireSpinLock
+
+;
+; Lock is owned, spin till it looks free, then go get it again.
+;
+
+align 4
+asl20: SPIN_ON_SPINLOCK ecx,<short asl10>
+
+else
+ fstRET KiAcquireSpinLock
+endif
+
+fstENDP KiAcquireSpinLock
+
+ PAGE
+ SUBTTL "Ki Release Kernel Spin Lock"
+;++
+;
+; VOID
+; FASTCALL
+; KiReleaseSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function releases a kernel spin lock.
+;
+; N.B. This function assumes that the current IRQL is set properly.
+; It neither raises nor lowers IRQL.
+;
+; Arguments:
+;
+; (ecx) SpinLock - Supplies a pointer to an executive spin lock.
+;
+; Return Value:
+;
+; None.
+;
+;--
+align 16
+cPublicFastCall KiReleaseSpinLock ,1
+cPublicFpo 0,0
+ifndef NT_UP
+
+ RELEASE_SPINLOCK ecx
+
+endif
+ fstRET KiReleaseSpinLock
+
+fstENDP KiReleaseSpinLock
+
+ PAGE
+ SUBTTL "Try to acquire Kernel Spin Lock"
+
+;++
+;
+; BOOLEAN
+; KeTryToAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock,
+; OUT PKIRQL OldIrql
+; )
+;
+; Routine Description:
+;
+; This function attempts acquires a kernel spin lock. If the
+; spinlock is busy, it is not acquire and FALSE is returned.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+; OldIrql (TOS+8) = Location to store old irql
+;
+; Return Value:
+; TRUE - Spinlock was acquired & irql was raise
+; FALSE - SpinLock was not acquired - irql is unchanged.
+;
+;--
+
+align dword
+cPublicProc _KeTryToAcquireSpinLock ,2
+cPublicFpo 2,0
+
+ifdef NT_UP
+; UP Version of KeTryToAcquireSpinLock
+
+ mov ecx, DISPATCH_LEVEL
+ fstCall KfRaiseIrql
+
+ mov ecx, [esp+8] ; (ecx) -> ptr to OldIrql
+ mov [ecx], al ; save OldIrql
+
+ mov eax, 1 ; Return TRUE
+ stdRET _KeTryToAcquireSpinLock
+
+else
+; MP Version of KeTryToAcquireSpinLock
+
+ mov edx,[esp+4] ; (edx) -> spinlock
+
+;
+; First check the spinlock without asserting a lock
+;
+
+ TEST_SPINLOCK edx,<short ttsl10>
+
+;
+; Spinlock looks free raise irql & try to acquire it
+;
+
+;
+; raise to dispatch_level
+;
+
+ mov ecx, DISPATCH_LEVEL
+ fstCall KfRaiseIrql
+
+ mov edx, [esp+4] ; (edx) -> spinlock
+ mov ecx, [esp+8] ; (ecx) = Return OldIrql
+
+ ACQUIRE_SPINLOCK edx,<short ttsl20>
+
+ mov [ecx], al ; save OldIrql
+ mov eax, 1 ; spinlock was acquired, return TRUE
+
+ stdRET _KeTryToAcquireSpinLock
+
+ttsl10:
+ xor eax, eax ; return FALSE
+ stdRET _KeTryToAcquireSpinLock
+
+ttsl20:
+ mov cl, al ; (cl) = OldIrql
+ fstCall KfLowerIrql ; spinlock was busy, restore irql
+ xor eax, eax ; return FALSE
+ stdRET _KeTryToAcquireSpinLock
+endif
+
+stdENDP _KeTryToAcquireSpinLock
+
+ PAGE
+ SUBTTL "Ki Try to acquire Kernel Spin Lock"
+;++
+;
+; BOOLEAN
+; KiTryToAcquireSpinLock (
+; IN PKSPIN_LOCK SpinLock
+; )
+;
+; Routine Description:
+;
+; This function attempts acquires a kernel spin lock. If the
+; spinlock is busy, it is not acquire and FALSE is returned.
+;
+; Arguments:
+;
+; SpinLock (TOS+4) - Supplies a pointer to an kernel spin lock.
+;
+; Return Value:
+; TRUE - Spinlock was acquired
+; FALSE - SpinLock was not acquired
+;
+;--
+align dword
+cPublicProc _KiTryToAcquireSpinLock ,1
+cPublicFpo 1,0
+
+ifndef NT_UP
+ mov eax,[esp+4] ; (eax) -> spinlock
+
+;
+; First check the spinlock without asserting a lock
+;
+
+ TEST_SPINLOCK eax,<short atsl20>
+
+;
+; Spinlock looks free try to acquire it
+;
+
+ ACQUIRE_SPINLOCK eax,<short atsl20>
+endif
+ mov eax, 1 ; spinlock was acquired, return TRUE
+ stdRET _KiTryToAcquireSpinLock
+
+ifndef NT_UP
+atsl20:
+ xor eax, eax ; return FALSE
+ stdRET _KiTryToAcquireSpinLock
+endif
+stdENDP _KiTryToAcquireSpinLock
+
+
+_TEXT$00 ends
+
+endif ; NT_INST
+ end
diff --git a/private/ntos/ke/i386/table.stb b/private/ntos/ke/i386/table.stb
new file mode 100644
index 000000000..cc05775b9
--- /dev/null
+++ b/private/ntos/ke/i386/table.stb
@@ -0,0 +1,102 @@
+0 ; This is the number of in register arguments
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; systable.asm
+;
+; Abstract:
+;
+; This module implements the system service dispatch table.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 6-Feb-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+
+;
+; To add a system service simply add the name of the service to the below
+; table. If the system service has arguments, then immediately
+; follow the name of the serice with a comma and following that the number
+; of bytes of in memory arguments, e.g. CreateObject,40.
+;
+
+;ifdef i386
+
+.386p
+include callconv.inc
+TABLE_BEGIN1 macro t
+ TITLE t
+endm
+TABLE_BEGIN2 macro t
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+ ASSUME DS:FLAT
+endm
+TABLE_BEGIN3 macro t
+ align 4
+endm
+TABLE_BEGIN4 macro t
+ public _KiServiceTable
+_KiServiceTable label dword
+endm
+TABLE_BEGIN5 macro t
+endm
+TABLE_BEGIN6 macro t
+endm
+TABLE_BEGIN7 macro t
+endm
+TABLE_BEGIN8 macro t
+endm
+
+TABLE_ENTRY macro l,bias,numargs
+ Local Bytes
+
+ Bytes = numargs*4
+
+ EXTRNP _Nt&l,&numargs
+IFDEF STD_CALL
+ ComposeInst <dd offset FLAT:>,_Nt,l,<@>,%(Bytes)
+ELSE
+ dd offset FLAT:_Nt&l
+ENDIF
+endm
+
+TABLE_END macro n
+ public _KiServiceLimit
+_KiServiceLimit dd n+1
+endm
+
+ARGTBL_BEGIN macro
+ public _KiArgumentTable
+_KiArgumentTable label dword
+endm
+
+ARGTBL_ENTRY macro e0,e1,e2,e3,e4,e5,e6,e7
+ db e0,e1,e2,e3,e4,e5,e6,e7
+endm
+
+ARGTBL_END macro
+_DATA ENDS
+ end
+endm
+
+;endif
+
+ TABLE_BEGIN1 <"System Service Dispatch Table">
+ TABLE_BEGIN2 <"System Service Dispatch Table">
+ TABLE_BEGIN3 <"System Service Dispatch Table">
+ TABLE_BEGIN4 <"System Service Dispatch Table">
+ TABLE_BEGIN5 <"System Service Dispatch Table">
+ TABLE_BEGIN6 <"System Service Dispatch Table">
+ TABLE_BEGIN7 <"System Service Dispatch Table">
+ TABLE_BEGIN8 <"System Service Dispatch Table">
+
diff --git a/private/ntos/ke/i386/threadbg.asm b/private/ntos/ke/i386/threadbg.asm
new file mode 100644
index 000000000..08631b86e
--- /dev/null
+++ b/private/ntos/ke/i386/threadbg.asm
@@ -0,0 +1,99 @@
+ title "Thread Startup"
+
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; threadbg.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to startup a thread in kernel
+; mode.
+;
+; Author:
+;
+; Bryan Willman (bryanwi) 22-Feb-1990, derived from DaveC's code.
+;
+; Environment:
+;
+; Kernel mode only, IRQL APC_LEVEL.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include i386\kimacro.inc
+include callconv.inc
+ .list
+
+ EXTRNP KfLowerIrql,1,IMPORT, FASTCALL
+ EXTRNP _KeBugCheck,1
+ extrn _KiServiceExit2:PROC
+
+ page ,132
+ subttl "Thread Startup"
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+;++
+;
+; Routine Description:
+;
+; This routine is called at thread startup. Its function is to call the
+; initial thread procedure. If control returns from the initial thread
+; procedure and a user mode context was established when the thread
+; was initialized, then the user mode context is restored and control
+; is transfered to user mode. Otherwise a bug check will occur.
+;
+;
+; Arguments:
+;
+; (TOS) = SystemRoutine - address of initial system routine.
+; (TOS+4) = StartRoutine - Initial thread routine.
+; (TOS+8) = StartContext - Context parm for initial thread routine.
+; (TOS+12) = UserContextFlag - 0 if no user context, !0 if there is one
+; (TOS+16) = Base of KTrapFrame if and only if there's a user context.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+cPublicProc _KiThreadStartup ,1
+
+ xor ebx,ebx ; clear registers
+ xor esi,esi ;
+ xor edi,edi ;
+ xor ebp,ebp ;
+ mov ecx, APC_LEVEL
+ fstCall KfLowerIrql ; KeLowerIrql(APC_LEVEL)
+
+ pop eax ; (eax)->SystemRoutine
+ call eax ; SystemRoutine(StartRoutine, StartContext)
+IFNDEF STD_CALL
+ add esp,8 ; Clear off args
+ENDIF
+
+ pop ecx ; (ecx) = UserContextFlag
+ or ecx, ecx
+ jz short kits10 ; No user context, go bugcheck
+
+ mov ebp,esp ; (bp) -> TrapFrame holding UserContext
+
+ jmp _KiServiceExit2
+
+kits10: stdCall _KeBugCheck, <NO_USER_MODE_CONTEXT>
+
+stdENDP _KiThreadStartup
+
+_TEXT$00 ends
+ end
+
diff --git a/private/ntos/ke/i386/thredini.c b/private/ntos/ke/i386/thredini.c
new file mode 100644
index 000000000..30dfc9f91
--- /dev/null
+++ b/private/ntos/ke/i386/thredini.c
@@ -0,0 +1,634 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ thredini.c
+
+Abstract:
+
+ This module implements the machine dependent function to set the initial
+ context and data alignment handling mode for a process or thread object.
+
+Author:
+
+ David N. Cutler (davec) 31-Mar-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ 3 April 90 bryan willman
+
+ This version ported to 386.
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macros are used to check that an input object is
+// really the proper type.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+//
+// Our notion of alignment is different, so force use of ours
+//
+#undef ALIGN_UP
+#undef ALIGN_DOWN
+#define ALIGN_DOWN(address,amt) ((ULONG)(address) & ~(( amt ) - 1))
+#define ALIGN_UP(address,amt) (ALIGN_DOWN( (address + (amt) - 1), (amt) ))
+
+//
+// The function prototype for the special APC we use to set the
+// hardware alignment state for a thread
+//
+
+VOID
+KepSetAlignmentSpecialApc(
+ IN PKAPC Apc,
+ IN PKNORMAL_ROUTINE *NormalRoutine,
+ IN PVOID *NormalContext,
+ IN PVOID *SystemArgument1,
+ IN PVOID *SystemArgument2
+ );
+
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextFrame OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the machine dependent context of a thread object.
+
+ N.B. This function does not check the accessibility of the context record.
+ It is assumed the the caller of this routine is either prepared to
+ handle access violations or has probed and copied the context record
+ as appropriate.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextFrame - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PFLOATING_SAVE_AREA NpxFrame;
+ PKSWITCHFRAME SwitchFrame;
+ PKTRAP_FRAME TrFrame;
+ PULONG PSystemRoutine;
+ PULONG PStartRoutine;
+ PULONG PStartContext;
+ PULONG PUserContextFlag;
+ ULONG ContextFlags;
+ CONTEXT Context2;
+ PCONTEXT ContextFrame2 = NULL;
+
+ //
+ // If a context frame is specified, then initialize a trap frame and
+ // and an exception frame with the specified user mode context.
+ //
+
+ if (ARGUMENT_PRESENT(ContextFrame)) {
+
+ RtlMoveMemory(&Context2, ContextFrame, sizeof(CONTEXT));
+ ContextFrame2 = &Context2;
+ ContextFlags = CONTEXT_CONTROL;
+
+ //
+ // The 80387 save area is at the very base of the kernel stack.
+ //
+
+ NpxFrame = (PFLOATING_SAVE_AREA)(((ULONG)(Thread->InitialStack) -
+ sizeof(FLOATING_SAVE_AREA)));
+
+ //
+ // Load up an initial NPX state.
+ //
+
+ ContextFrame2->FloatSave.ControlWord = 0x27f; // like fpinit but 64bit mode
+ ContextFrame2->FloatSave.StatusWord = 0;
+ ContextFrame2->FloatSave.TagWord = 0xffff;
+ ContextFrame2->FloatSave.ErrorOffset = 0;
+ ContextFrame2->FloatSave.ErrorSelector = 0;
+ ContextFrame2->FloatSave.DataOffset = 0;
+ ContextFrame2->FloatSave.DataSelector = 0;
+
+
+ if (KeI386NpxPresent) {
+ ContextFrame2->FloatSave.Cr0NpxState = 0;
+ NpxFrame->Cr0NpxState = 0;
+ ContextFlags |= CONTEXT_FLOATING_POINT;
+
+ //
+ // Threads NPX state is not in the coprocessor.
+ //
+
+ Thread->NpxState = NPX_STATE_NOT_LOADED;
+
+ } else {
+ NpxFrame->Cr0NpxState = CR0_EM;
+
+ //
+ // Threads NPX state is not in the coprocessor.
+ // In the emulator case, do not set the CR0_EM bit as their
+ // emulators may not want exceptions on FWAIT instructions.
+ //
+
+ Thread->NpxState = NPX_STATE_NOT_LOADED & ~CR0_MP;
+ }
+
+ //
+ // Force debug registers off. They won't work anyway from an
+ // initial frame, debuggers must set a hard breakpoint in the target
+ //
+
+ ContextFrame2->Dr0 = 0;
+ ContextFrame2->Dr1 = 0;
+ ContextFrame2->Dr2 = 0;
+ ContextFrame2->Dr3 = 0;
+ ContextFrame2->Dr6 = 0;
+ ContextFrame2->Dr7 = 0;
+ ContextFrame2->ContextFlags &= ~(CONTEXT_DEBUG_REGISTERS);
+#if 0
+ //
+ // If AutoAlignment is FALSE, we want to set the Alignment Check bit
+ // in Eflags, so we will get alignment faults.
+ //
+
+ if (Thread->AutoAlignment == FALSE) {
+ ContextFrame2->EFlags |= EFLAGS_ALIGN_CHECK;
+ }
+#endif
+ //
+ // If the thread is set
+
+ TrFrame = (PKTRAP_FRAME)(((ULONG)NpxFrame - KTRAP_FRAME_LENGTH));
+
+ // Space for arguments to KiThreadStartup. Order is important,
+ // Since args are passed on stack through KiThreadStartup to
+ // PStartRoutine with PStartContext as an argument.
+
+ PUserContextFlag = (PULONG)TrFrame - 1;
+ PStartContext = PUserContextFlag - 1;
+ PStartRoutine = PStartContext - 1;
+ PSystemRoutine = PStartRoutine - 1;
+
+ SwitchFrame = (PKSWITCHFRAME)((PUCHAR)PSystemRoutine -
+ sizeof(KSWITCHFRAME));
+
+ //
+ // Copy information from the specified context frame to the trap and
+ // exception frames.
+ //
+
+ KeContextToKframes(TrFrame, NULL, ContextFrame2,
+ ContextFrame2->ContextFlags | ContextFlags,
+ UserMode);
+
+ TrFrame->HardwareSegSs |= RPL_MASK;
+ TrFrame->SegDs |= RPL_MASK;
+ TrFrame->SegEs |= RPL_MASK;
+
+#if DBG
+ TrFrame->DbgArgMark = 0xBADB0D00;
+#endif
+
+ //
+ // Tell KiThreadStartup that a user context is present.
+ //
+
+ *PUserContextFlag = 1;
+
+
+ //
+ // Initialize the kernel mode ExceptionList pointer
+ //
+
+ TrFrame->ExceptionList = EXCEPTION_CHAIN_END;
+
+ //
+ // Initialize the saved previous processor mode.
+ //
+
+ TrFrame->PreviousPreviousMode = UserMode;
+
+ //
+ // Set the previous mode in thread object to user.
+ //
+
+ Thread->PreviousMode = UserMode;
+
+
+ } else {
+
+ //
+ // Dummy floating save area. Kernel threads don't have or use
+ // the floating point - the dummy save area is make the stacks
+ // consistent.
+ //
+
+ NpxFrame = (PFLOATING_SAVE_AREA)(((ULONG)(Thread->InitialStack) -
+ sizeof(FLOATING_SAVE_AREA)));
+
+
+ //
+ // Load up an initial NPX state.
+ //
+
+ NpxFrame->ControlWord = 0x27f; // like fpinit but 64bit mode
+ NpxFrame->StatusWord = 0;
+ NpxFrame->TagWord = 0xffff;
+ NpxFrame->ErrorOffset = 0;
+ NpxFrame->ErrorSelector = 0;
+ NpxFrame->DataOffset = 0;
+ NpxFrame->DataSelector = 0;
+
+ NpxFrame->Cr0NpxState = 0;
+
+ //
+ // Threads NPX state is not in the coprocessor.
+ //
+
+ Thread->NpxState = NPX_STATE_NOT_LOADED;
+
+ //
+ // Space for arguments to KiThreadStartup.
+ // Order of fields in the switchframe is important,
+ // Since args are passed on stack through KiThreadStartup to
+ // PStartRoutine with PStartContext as an argument.
+ //
+
+ PUserContextFlag = (PULONG)((ULONG)NpxFrame) - 1;
+
+ PStartContext = PUserContextFlag - 1;
+ PStartRoutine = PStartContext - 1;
+ PSystemRoutine = PStartRoutine - 1;
+
+ SwitchFrame = (PKSWITCHFRAME)((PUCHAR)PSystemRoutine -
+ sizeof(KSWITCHFRAME));
+
+
+ //
+ // Tell KiThreadStartup that a user context is NOT present.
+ //
+
+ *PUserContextFlag = 0;
+
+
+ //
+ // Set the previous mode in thread object to kernel.
+ //
+
+ Thread->PreviousMode = KernelMode;
+ }
+
+ //
+ // Set up thread start parameters.
+ // (UserContextFlag set above)
+ //
+
+ *PStartContext = (ULONG)StartContext;
+ *PStartRoutine = (ULONG)StartRoutine;
+ *PSystemRoutine = (ULONG)SystemRoutine;
+
+
+ //
+ // Set up switch frame. Assume the thread doesn't use the 80387;
+ // if it ever does (and there is one), these flags will get reset.
+ // Each thread starts with these same flags set, regardless of
+ // whether the hardware exists or not.
+ //
+
+ SwitchFrame->RetAddr = (ULONG)KiThreadStartup;
+
+ SwitchFrame->Eflags = EFLAGS_INTERRUPT_MASK;
+
+#if 0
+ //
+ // If AutoAlignment is FALSE, we want to set the Alignment Check bit
+ // in Eflags, so we will get alignment faults.
+ //
+
+ if (Thread->AutoAlignment == FALSE) {
+ SwitchFrame->Eflags |= EFLAGS_ALIGN_CHECK;
+ }
+#endif
+
+ SwitchFrame->ExceptionList = (ULONG)(EXCEPTION_CHAIN_END);
+
+ //
+ // Set the initial kernel stack pointer.
+ //
+
+ Thread->KernelStack = (PVOID)SwitchFrame;
+ return;
+}
+
+BOOLEAN
+KeSetAutoAlignmentProcess (
+ IN PKPROCESS Process,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ process and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the process. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Process->AutoAlignment;
+ Process->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
+
+BOOLEAN
+KeSetAutoAlignmentThread (
+ IN PKTHREAD Thread,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ thread and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the specified thread. A value of TRUE causes
+ all data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Previous;
+ PKAPC Apc;
+ PKEVENT Event;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Thread->AutoAlignment;
+ Thread->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+#if 0
+ Apc = ExAllocatePool(NonPagedPoolMustSucceed, sizeof(KAPC));
+ Event = ExAllocatePool(NonPagedPoolMustSucceed, sizeof(KEVENT));
+
+ KeInitializeEvent(Event, NotificationEvent, FALSE);
+
+ if ( Thread == KeGetCurrentThread() ) {
+
+ Apc->SystemArgument1 = Thread;
+ Apc->SystemArgument2 = Event;
+
+ KeRaiseIrql(APC_LEVEL, &Irql);
+ KepSetAlignmentSpecialApc( Apc, NULL, NULL,
+ &Apc->SystemArgument1,
+ &Apc->SystemArgument2 );
+ KeLowerIrql(Irql);
+ } else {
+ KeInitializeApc( Apc,
+ Thread,
+ CurrentApcEnvironment,
+ KepSetAlignmentSpecialApc,
+ NULL,
+ NULL,
+ KernelMode,
+ NULL );
+
+ if (!KeInsertQueueApc( Apc,
+ Thread,
+ Event,
+ 2 ) ) {
+ //
+ // We couldn't queue the APC, so we will not be able to change
+ // the AutoAlignment. Update the thread object so that it
+ // stays in sync with the hardware state.
+ //
+#if DBG
+ DbgPrint("KeSetAutoAlignmentThread: unable to change thread's context\n");
+#endif
+ Thread->AutoAlignment = Previous;
+ }
+
+ KeWaitForSingleObject( Event,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL );
+ }
+
+ ExFreePool(Apc);
+ ExFreePool(Event);
+#endif
+
+ return(Previous);
+}
+
+#if 0
+
+VOID
+KepSetAlignmentSpecialApc(
+ IN PKAPC Apc,
+ IN PKNORMAL_ROUTINE *NormalRoutine,
+ IN PVOID *NormalContext,
+ IN PVOID *SystemArgument1,
+ IN PVOID *SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function updates the alignment check bit of the current thread's
+ EFLAGS to reflect the AutoAlignment setting of the thread object.
+
+Arguments:
+
+ Apc - Supplies a pointer to the APC control object that caused entry
+ into this routine.
+
+ NormalRoutine - Supplies a pointer to a pointer to the normal routine
+ function that was specifed when the APC was initialized.
+
+ NormalContext - Supplies a pointer to a pointer to an arbitrary data
+ structure that was specified when the APC was initialized.
+
+ SystemArgument1 - Supplies a pointer to a PKTHREAD
+
+ SystemArgument2 - Supplies a pointer to a PKEVENT
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ PKTHREAD Thread;
+ PKEVENT Event;
+ PKTRAP_FRAME TrapFrame;
+ CONTEXT ContextFrame;
+
+ Thread = *(PKTHREAD *)SystemArgument1;
+ Event = *(PKEVENT *)SystemArgument2;
+
+ ASSERT( Thread == KeGetCurrentThread() );
+
+ //
+ // Find the trap frame on the stack, so we can get the thread context
+ //
+ TrapFrame = (PKTRAP_FRAME)((PUCHAR)Thread->InitialStack -
+ ALIGN_UP(sizeof(KTRAP_FRAME),KTRAP_FRAME_ALIGN) -
+ sizeof(FLOATING_SAVE_AREA));
+
+ ContextFrame.ContextFlags = CONTEXT_CONTROL;
+
+ KeContextFromKframes( TrapFrame,
+ NULL,
+ &ContextFrame );
+
+ //
+ // If AutoAlignment is TRUE, we want the processor to transparently fixup
+ // all alignment faults, so we clear the Alignment Check bit. If
+ // AutoAlignment is FALSE, we set the bit, so 486 processors will
+ // give us alignment faults.
+ //
+
+ if (Thread->AutoAlignment) {
+ ContextFrame.EFlags &= (~EFLAGS_ALIGN_CHECK);
+ } else {
+ ContextFrame.EFlags |= EFLAGS_ALIGN_CHECK;
+ }
+
+ //
+ // Replace the modified EFlags in the trap frame. When the thread returns
+ // to user mode, it will be running with the new alignment setting.
+ //
+
+ KeContextToKframes( TrapFrame,
+ NULL,
+ &ContextFrame,
+ CONTEXT_CONTROL,
+ KeGetPreviousMode() );
+
+ KeSetEvent(Event,0,FALSE);
+}
+#endif
diff --git a/private/ntos/ke/i386/timindex.asm b/private/ntos/ke/i386/timindex.asm
new file mode 100644
index 000000000..36aa3ed70
--- /dev/null
+++ b/private/ntos/ke/i386/timindex.asm
@@ -0,0 +1,171 @@
+ TITLE "Compute Timer Table Index"
+;++
+;
+; Copyright (c) 1993 Microsoft Corporation
+;
+; Module Name:
+;
+; timindex.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to compute the timer table
+; index for a timer.
+;
+; Author:
+;
+; David N. Cutler (davec) 19-May-1993
+;
+; Environment:
+;
+; Any mode.
+;
+; Revision History:
+;
+;--
+
+.386p
+ .xlist
+include ks386.inc
+include callconv.inc ; calling convention macros
+ .list
+
+ extrn _KiTimeIncrementReciprocal:dword
+ extrn _KiTimeIncrementShiftCount:BYTE
+
+_TEXT$00 SEGMENT DWORD PUBLIC 'CODE'
+ ASSUME DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+ page
+ subttl "Compute Timer Table Index"
+;++
+;
+; ULONG
+; KiComputeTimerTableIndex (
+; IN LARGE_INTEGER Interval,
+; IN LARGE_INTEGER CurrentTime,
+; IN PKTIMER Timer
+; )
+;
+; Routine Description:
+;
+; This function computes the timer table index for the specified timer
+; object and stores the due time in the timer object.
+;
+; N.B. The interval parameter is guaranteed to be negative since it is
+; expressed as relative time.
+;
+; The formula for due time calculation is:
+;
+; Due Time = Current Time - Interval
+;
+; The formula for the index calculation is:
+;
+; Index = (Due Time / Maximum time) & (Table Size - 1)
+;
+; The time increment division is performed using reciprocal multiplication.
+;
+; Arguments:
+;
+; Interval - Supplies the relative time at which the timer is to
+; expire.
+;
+; CurrentCount - Supplies the current system tick count.
+;
+; Timer - Supplies a pointer to a dispatch object of type timer.
+;
+; Return Value:
+;
+; The time table index is returned as the function value and the due
+; time is stored in the timer object.
+;
+;--
+
+LocalStack equ 20
+
+Interval equ [esp+LocalStack+4]
+CurrentTime equ [esp+LocalStack+12]
+Timer equ [esp+LocalStack+20]
+
+cPublicProc _KiComputeTimerTableIndex ,5
+ sub esp, LocalStack
+ mov [esp+16], ebx
+ mov ebx,CurrentTime ; get low current time
+ mov ecx,CurrentTime + 4 ; get high current time
+ sub ebx,Interval ; subtract low parts
+ sbb ecx,Interval + 4 ; subtract high parts and borrow
+ mov eax,Timer ; get address of timer object
+ mov [eax].TiDueTime.LiLowPart,ebx ; set low part of due time
+ mov [eax].TiDueTime.LiHighPart,ecx ; set high part of due time
+
+;
+; Compute low 32-bits of dividend times low 32-bits of divisor.
+;
+
+ mov eax,ebx ; copy low 32-bits of dividend
+ mul [_KiTimeIncrementReciprocal] ; multiply by low 32-bits of divisor
+ mov [esp+12], edx ; save high order 32-bits of product
+
+;
+; Compute low 32-bits of dividend times high 32-bits of divisor.
+;
+
+ mov eax,ebx ; copy low 32-bits of dividend
+ mul [_KiTimeIncrementReciprocal+4] ;multiply by high 32-bits of divisor
+ mov [esp+8], eax ; save full 64-bit product
+ mov [esp+4], edx ;
+
+;
+; Compute high 32-bits of dividend times low 32-bits of divisor.
+;
+
+ mov eax,ecx ; copy high 32-bits of dividend
+ mul [_KiTimeIncrementReciprocal] ; multiply by low 32-bits of divisor
+ mov [esp+0], edx ; save high 32-bits of product
+
+;
+; Compute carry out of low 64-bits of 128-bit product.
+;
+
+ xor ebx,ebx ; clear carry accumlator
+ add eax,[esp]+8 ; generate carry
+ adc ebx,0 ; accumlate carry
+ add eax,[esp]+12 ; generate carry
+ adc ebx,0 ; accumulate carry
+
+;
+; Compute high 32-bits of dividend times high 32-bits of divisor.
+;
+
+ mov eax,ecx ; copy high 32-bits of dividend
+ mul [_KiTimeIncrementReciprocal+4] ; multiply by high 32-bits of divisor
+
+;
+; Compute high 64-bits of 128-bit product.
+;
+
+ add eax,ebx ; add carry from low 64-bits
+ adc edx,0 ; propagate carry
+ add eax,[esp]+0 ; add and generate carry
+ adc edx,0 ; propagate carry
+ add eax,[esp]+4 ; add and generate carry
+ adc edx,0 ; propagate carry
+
+;
+; Right shift the result by the specified shift ocunt and mask off extra
+; bits.
+;
+
+ mov cl,[_KiTimeIncrementShiftCount] ; get shift count value
+ shrd eax,edx,cl ; extract appropriate product bits
+
+ mov ebx, [esp+16] ; restore register
+ add esp, LocalStack ; trim stack
+ and eax,(TIMER_TABLE_SIZE-1); reduce to size of table
+
+ stdRET _KicomputeTimerTableIndex
+
+stdENDP _KiComputeTimerTableIndex
+
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/trap.asm b/private/ntos/ke/i386/trap.asm
new file mode 100644
index 000000000..b47a5eda1
--- /dev/null
+++ b/private/ntos/ke/i386/trap.asm
@@ -0,0 +1,5486 @@
+ title "Trap Processing"
+;++
+;
+; Copyright (c) 1989 Microsoft Corporation
+;
+; Module Name:
+;
+; trap.asm
+;
+; Abstract:
+;
+; This module implements the code necessary to field and process i386
+; trap conditions.
+;
+; Author:
+;
+; Shie-Lin Tzong (shielint) 4-Feb-1990
+;
+; Environment:
+;
+; Kernel mode only.
+;
+; Revision History:
+;
+;--
+.386p
+ .xlist
+KERNELONLY equ 1
+include ks386.inc
+include callconv.inc ; calling convention macros
+include i386\kimacro.inc
+include mac386.inc
+include i386\mi.inc
+include ..\..\vdm\i386\vdm.inc
+include ..\..\vdm\i386\vdmtb.inc
+ .list
+
+FAST_BOP equ 1
+FAST_V86_TRAP equ 1
+
+
+ page ,132
+ extrn _KeGdiFlushUserBatch:DWORD
+ extrn _KeTickCount:DWORD
+ extrn _ExpTickCountMultiplier:DWORD
+ extrn _KiDoubleFaultTSS:dword
+ extrn _KiNMITSS:dword
+ extrn _KeServiceDescriptorTable:dword
+ extrn _KiHardwareTrigger:dword
+ extrn _KiBugCheckData:dword
+ extrn _KdpOweBreakpoint:dword
+ extrn Ki386BiosCallReturnAddress:near
+ EXTRNP _KiDeliverApc,3
+ EXTRNP KfRaiseIrql,1,IMPORT,FASTCALL
+ EXTRNP KfLowerIrql,1,IMPORT,FASTCALL
+ EXTRNP _KeGetCurrentIrql,0,IMPORT
+ EXTRNP _PsConvertToGuiThread,0
+ EXTRNP _ZwUnmapViewOfSection,2
+
+ EXTRNP _HalHandleNMI,1,IMPORT
+ EXTRNP _HalBeginSystemInterrupt,3,IMPORT
+ EXTRNP _HalEndSystemInterrupt,2,IMPORT
+ EXTRNP _KiDispatchException,5
+if DEVL
+ EXTRNP _PsWatchWorkingSet,3
+ extrn _PsWatchEnabled:byte
+endif
+ EXTRNP _MmAccessFault,3
+ EXTRNP _KeBugCheck,1
+ EXTRNP _KeBugCheckEx,5
+ EXTRNP _KeTestAlertThread,1
+ EXTRNP _KiContinue,3
+ EXTRNP _KiRaiseException,5
+ EXTRNP _Ki386DispatchOpcode,0
+ EXTRNP _Ki386DispatchOpcodeV86,0
+ EXTRNP _VdmDispatchPageFault,3
+ EXTRNP _Ki386VdmReflectException,1
+ EXTRNP _Ki386VdmSegmentNotPresent,0
+ extrn _DbgPrint:proc
+ EXTRNP _KdSetOwedBreakpoints
+ extrn _KiFreezeFlag:dword
+ EXTRNP _Ki386CheckDivideByZeroTrap,1
+ EXTRNP _Ki386CheckDelayedNpxTrap,2
+ extrn SwapContext:near
+ EXTRNP _VdmDispatchIRQ13, 1
+
+ extrn VdmDispatchBop:near
+ extrn _KeI386VdmIoplAllowed:dword
+ extrn _KeI386VirtualIntExtensions:dword
+ EXTRNP _NTFastDOSIO,2
+ EXTRNP _NtSetLdtEntries,6
+ extrn OpcodeIndex:byte
+
+; JAPAN - SUPPORT Intel CPU/Non PC/AT machine
+ extrn _VdmFixedStateLinear:dword
+
+;
+; Equates for exceptions which cause system fatal error
+;
+
+EXCEPTION_DIVIDED_BY_ZERO EQU 0
+EXCEPTION_DEBUG EQU 1
+EXCEPTION_NMI EQU 2
+EXCEPTION_INT3 EQU 3
+EXCEPTION_BOUND_CHECK EQU 5
+EXCEPTION_INVALID_OPCODE EQU 6
+EXCEPTION_NPX_NOT_AVAILABLE EQU 7
+EXCEPTION_DOUBLE_FAULT EQU 8
+EXCEPTION_NPX_OVERRUN EQU 9
+EXCEPTION_INVALID_TSS EQU 0AH
+EXCEPTION_SEGMENT_NOT_PRESENT EQU 0BH
+EXCEPTION_STACK_FAULT EQU 0CH
+EXCEPTION_GP_FAULT EQU 0DH
+EXCEPTION_RESERVED_TRAP EQU 0FH
+EXCEPTION_NPX_ERROR EQU 010H
+EXCEPTION_ALIGNMENT_CHECK EQU 011H
+
+;
+; Exception flags
+;
+
+EXCEPT_UNKNOWN_ACCESS EQU 0H
+EXCEPT_LIMIT_ACCESS EQU 10H
+
+;
+; Equates for some opcodes and instruction prefixes
+;
+
+IOPL_MASK EQU 3000H
+IOPL_SHIFT_COUNT EQU 12
+
+;
+; page fault read/write mask
+;
+
+ERR_0E_STORE EQU 2
+
+;
+; Debug register 6 (dr6) BS (single step) bit mask
+;
+
+DR6_BS_MASK EQU 4000H
+
+;
+; EFLAGS single step bit
+;
+
+EFLAGS_TF_BIT EQU 100h
+EFLAGS_OF_BIT EQU 4000H
+
+;
+; The mask of selecot's table indicator (ldt or gdt)
+;
+
+TABLE_INDICATOR_MASK EQU 4
+
+;
+; Opcode for Pop SegReg and iret instructions
+;
+
+POP_DS EQU 1FH
+POP_ES EQU 07h
+POP_FS EQU 0A10FH
+POP_GS EQU 0A90FH
+IRET_OP EQU 0CFH
+CLI_OP EQU 0FAH
+STI_OP EQU 0FBH
+PUSHF_OP EQU 9CH
+POPF_OP EQU 9DH
+INTNN_OP EQU 0CDH
+FRSTOR_ECX EQU 021DD9Bh
+FWAIT_OP EQU 09bh
+
+;
+; Force assume into place
+;
+
+_TEXT$00 SEGMENT PARA PUBLIC 'CODE'
+ ASSUME DS:NOTHING, ES:NOTHING, SS:NOTHING, FS:NOTHING, GS:NOTHING
+_TEXT$00 ENDS
+
+_DATA SEGMENT DWORD PUBLIC 'DATA'
+
+;
+; Definitions for gate descriptors
+;
+
+GATE_TYPE_386INT EQU 0E00H
+GATE_TYPE_386TRAP EQU 0F00H
+GATE_TYPE_TASK EQU 0500H
+D_GATE EQU 0
+D_PRESENT EQU 8000H
+D_DPL_3 EQU 6000H
+D_DPL_0 EQU 0
+
+;
+; Definitions for present 386 trap and interrupt gate attributes
+;
+
+D_TRAP032 EQU D_PRESENT+D_DPL_0+D_GATE+GATE_TYPE_386TRAP
+D_TRAP332 EQU D_PRESENT+D_DPL_3+D_GATE+GATE_TYPE_386TRAP
+D_INT032 EQU D_PRESENT+D_DPL_0+D_GATE+GATE_TYPE_386INT
+D_INT332 EQU D_PRESENT+D_DPL_3+D_GATE+GATE_TYPE_386INT
+D_TASK EQU D_PRESENT+D_DPL_0+D_GATE+GATE_TYPE_TASK
+
+;
+; This is the protected mode interrupt descriptor table.
+;
+
+if DBG
+;
+; NOTE - embedded enlish messages won't fly for NLS! (OK for debug code only)
+;
+
+BadInterruptMessage db 0ah,7,7,'!!! Unexpected Interrupt %02lx !!!',0ah,00
+
+KiNMIMessage db 0ah,'Non-Maskable-Interrupt (NMI) EIP = %08lx',0ah,00
+
+Ki16BitStackTrapMessage db 0ah,'Exception inside of 16bit stack',0ah,00
+endif
+
+;++
+;
+; DEFINE_SINGLE_EMPTY_VECTOR - helper for DEFINE_EMPTY_VECTORS
+;
+;--
+
+DEFINE_SINGLE_EMPTY_VECTOR macro number
+IDTEntry _KiUnexpectedInterrupt&number, D_INT032
+_TEXT$00 SEGMENT
+ public _KiUnexpectedInterrupt&number
+_KiUnexpectedInterrupt&number proc
+ push dword ptr (&number + PRIMARY_VECTOR_BASE)
+ jmp _KiUnexpectedInterruptTail
+_KiUnexpectedInterrupt&number endp
+_TEXT$00 ENDS
+
+ endm
+
+FPOFRAME macro a, b
+.FPO ( a, b, 0, 0, 0, FPO_TRAPFRAME )
+endm
+
+;++
+;
+; DEFINE_EMPTY_VECTORS emits an IDTEntry macro (and thus and IDT entry)
+; into the data segment. It then emits an unexpected interrupt target
+; with push of a constant into the code segment. Labels in the code
+; segment are defined to bracket the unexpected interrupt targets so
+; that KeConnectInterrupt can correctly test for them.
+;
+; Empty vectors will be defined from 30 to ff, which is the hardware
+; vector set.
+;
+;--
+
+NUMBER_OF_IDT_VECTOR EQU 0ffH
+
+DEFINE_EMPTY_VECTORS macro
+
+;
+; Set up
+;
+
+ empty_vector = 00H
+
+_TEXT$00 SEGMENT
+IFDEF STD_CALL
+ public _KiStartUnexpectedRange@0
+_KiStartUnexpectedRange@0 equ $
+ELSE
+ public _KiStartUnexpectedRange
+_KiStartUnexpectedRange equ $
+ENDIF
+_TEXT$00 ENDS
+
+ rept (NUMBER_OF_IDT_VECTOR - (($ - _IDT)/8)) + 1
+
+ DEFINE_SINGLE_EMPTY_VECTOR %empty_vector
+ empty_vector = empty_vector + 1
+
+ endm ;; rept
+
+_TEXT$00 SEGMENT
+IFDEF STD_CALL
+ public _KiEndUnexpectedRange@0
+_KiEndUnexpectedRange@0 equ $
+ELSE
+ public _KiEndUnexpectedRange
+_KiEndUnexpectedRange equ $
+ENDIF
+
+_TEXT$00 ENDS
+
+ endm ;; DEFINE_EMPTY_VECTORS macro
+
+IDTEntry macro name,access
+ dd offset FLAT:name
+ dw access
+ dw KGDT_R0_CODE
+ endm
+
+INIT SEGMENT DWORD PUBLIC 'CODE'
+
+;
+; The IDT table is put into the INIT code segment so the memory
+; can be reclaimed afer bootup
+;
+
+ALIGN 4
+ public _IDT, _IDTLEN, _IDTEnd
+_IDT label byte
+
+IDTEntry _KiTrap00, D_INT032 ; 0: Divide Error
+IDTEntry _KiTrap01, D_INT032 ; 1: DEBUG TRAP
+IDTEntry _KiTrap02, D_INT032 ; 2: NMI/NPX Error
+IDTEntry _KiTrap03, D_INT332 ; 3: Breakpoint
+IDTEntry _KiTrap04, D_INT332 ; 4: INTO
+IDTEntry _KiTrap05, D_INT032 ; 5: BOUND/Print Screen
+IDTEntry _KiTrap06, D_INT032 ; 6: Invalid Opcode
+IDTEntry _KiTrap07, D_INT032 ; 7: NPX Not Available
+IDTEntry _KiTrap08, D_INT032 ; 8: Double Exception
+IDTEntry _KiTrap09, D_INT032 ; 9: NPX Segment Overrun
+IDTEntry _KiTrap0A, D_INT032 ; A: Invalid TSS
+IDTEntry _KiTrap0B, D_INT032 ; B: Segment Not Present
+IDTEntry _KiTrap0C, D_INT032 ; C: Stack Fault
+IDTEntry _KiTrap0D, D_INT032 ; D: General Protection
+IDTEntry _KiTrap0E, D_INT032 ; E: Page Fault
+IDTEntry _KiTrap0F, D_INT032 ; F: Intel Reserved
+
+IDTEntry _KiTrap10, D_INT032 ;10: 486 coprocessor error
+IDTEntry _KiTrap11, D_INT032 ;11: 486 alignment
+IDTEntry _KiTrap0F, D_INT032 ;12: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;13: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;14: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;15: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;16: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;17: Intel Reserved
+
+IDTEntry _KiTrap0F, D_INT032 ;18: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;19: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1A: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1B: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1C: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1D: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1E: Intel Reserved
+IDTEntry _KiTrap0F, D_INT032 ;1F: Reserved for APIC
+
+;
+; Note IDTEntry 0x21 is reserved for WOW apps.
+;
+
+ rept 2AH - (($ - _IDT)/8)
+IDTEntry 0, 0 ;invalid IDT entry
+ endm
+IDTEntry _KiGetTickCount, D_INT332 ;2A: KiGetTickCount service
+IDTEntry _KiCallbackReturn, D_INT332 ;2B: KiCallbackReturn
+IDTEntry _KiSetLowWaitHighThread, D_INT332 ;2C: KiSetLowWaitHighThread service
+IDTEntry _KiDebugService, D_INT332 ;2D: debugger calls
+IDTEntry _KiSystemService, D_INT332 ;2E: system service calls
+IDTEntry _KiTrap0F, D_INT032 ;2F: Reserved for APIC
+
+;
+; Generate per-vector unexpected interrupt entries for 30 - ff
+;
+ DEFINE_EMPTY_VECTORS
+
+_IDTLEN equ $ - _IDT
+_IDTEnd equ $
+
+INIT ends
+
+ public _KiUnexpectedEntrySize
+_KiUnexpectedEntrySize dd _KiUnexpectedInterrupt1 - _KiUnexpectedInterrupt0
+
+;
+; defines all the possible instruction prefix
+;
+
+PrefixTable label byte
+ db 0f2h ; rep prefix
+ db 0f3h ; rep ins/outs prefix
+ db 67h ; addr prefix
+ db 0f0h ; lock prefix
+ db 66h ; operand prefix
+ db 2eh ; segment override prefix:cs
+ db 3eh ; ds
+ db 26h ; es
+ db 64h ; fs
+ db 65h ; gs
+ db 36h ; ss
+
+PREFIX_REPEAT_COUNT EQU 11 ; Prefix table length
+
+;
+; defines all the possible IO privileged IO instructions
+;
+
+IOInstructionTable label byte
+; db 0fah ; cli
+; db 0fdh ; sti
+ db 0e4h, 0e5h, 0ech, 0edh ; IN
+ db 6ch, 6dh ; INS
+ db 0e6h, 0e7h, 0eeh, 0efh ; OUT
+ db 6eh, 6fh ; OUTS
+
+IO_INSTRUCTION_TABLE_LENGTH EQU 12
+
+if FAST_V86_TRAP
+
+ALIGN 4
+; V86DispatchTable - table of routines used to emulate instructions
+; in v86 mode.
+
+dtBEGIN V86DispatchTable,V86PassThrough
+ dtS VDM_INDEX_PUSHF , V86Pushf
+ dtS VDM_INDEX_POPF , V86Popf
+ dtS VDM_INDEX_INTnn , V86Intnn
+ dtS VDM_INDEX_IRET , V86Iret
+ dtS VDM_INDEX_CLI , V86Cli
+ dtS VDM_INDEX_STI , V86Sti
+dtEND MAX_VDM_INDEX
+
+endif ; FAST_V86_TRAP
+
+;
+; definition for floating status word error mask
+;
+
+FSW_INVALID_OPERATION EQU 1
+FSW_DENORMAL EQU 2
+FSW_ZERO_DIVIDE EQU 4
+FSW_OVERFLOW EQU 8
+FSW_UNDERFLOW EQU 16
+FSW_PRECISION EQU 32
+FSW_STACK_FAULT EQU 64
+FSW_CONDITION_CODE_0 EQU 100H
+FSW_CONDITION_CODE_1 EQU 200H
+FSW_CONDITION_CODE_2 EQU 400H
+FSW_CONDITION_CODE_3 EQU 4000H
+_DATA ENDS
+
+_TEXT$00 SEGMENT
+ ASSUME DS:NOTHING, ES:NOTHING, SS:FLAT, FS:NOTHING, GS:NOTHING
+
+ page , 132
+ subttl "Macro to Handle v86 trap d"
+;++
+;
+; Macro Description:
+;
+; This macro is a fast way to handle v86 bop instructions.
+; Note, all the memory write operations in this macro are done in such a
+; way that if a page fault occurs the memory will still be in a consistent
+; state.
+;
+; That is, we must process the trapped instruction in the following order:
+;
+; 1. Read and Write user memory
+; 2. Update VDM state flags
+; 3. Update trap frame
+;
+; Arguments:
+;
+; interrupts disabled
+;
+; Return Value:
+;
+;--
+
+FAST_V86_TRAP_6 MACRO
+
+local DoFastIo, a, b
+
+BOP_FOR_FASTWRITE EQU 4350C4C4H
+BOP_FOR_FASTREAD EQU 4250C4C4H
+TRAP6_IP EQU 32 ; 8 * 4
+TRAP6_CS EQU 36 ; 8 * 4 + 4
+TRAP6_FLAGS EQU 40 ; 8 * 4 + 8
+TRAP6_SP EQU 44 ; 8 * 4 + 12
+TRAP6_SS EQU 48 ; 8 * 4 + 16
+TRAP6_ES EQU 52
+TRAP6_DS EQU 56
+TRAP6_FS EQU 60
+TRAP6_GS EQU 64
+TRAP6_EAX EQU 28
+TRAP6_EDX EQU 20
+
+ pushad ;eax, ecx, edx, ebx, old esp, ebp, esi, edi
+ mov eax, KGDT_R3_DATA OR RPL_MASK
+ mov ds, ax
+ mov es, ax
+
+ifdef NT_UP
+else
+ mov eax, KGDT_R0_PCR
+ mov fs, ax
+endif
+ mov byte ptr PCR[PcVdmAlert], 6
+
+ mov ax, word ptr [esp+TRAP6_CS] ; [eax] = v86 user cs
+ shl eax, 4
+ add eax, [esp+TRAP6_IP] ; [eax] = addr of BOP
+ mov edx, [eax] ; [edx] = xxxxc4c4 bop + maj bop # + mi #
+ cmp edx, BOP_FOR_FASTREAD
+ je DoFastIo
+
+ cmp edx, BOP_FOR_FASTWRITE
+ je DoFastIo
+
+ cmp dx, 0c4c4h ; Is it a bop?
+ jne V86Trap6PassThrough ; It's an error condition
+ifdef NT_UP
+ mov eax, KGDT_R3_TEB OR RPL_MASK ; (fs)-> USER mode TEB! (Not R0 PCR)
+ shr edx, 16
+ mov fs, ax
+ mov eax, fs:[TbVdm]
+else
+ mov eax, PCR[PcTeb] ; (fs)->PCR
+ shr edx, 16
+ mov eax, [eax].TbVdm ; get pointer to VdmTib
+endif
+ and edx, 0ffh
+ mov dword ptr [eax].VtEIEvent, VdmBop
+ mov dword ptr [eax].VtEIBopNumber, edx
+ mov dword ptr [eax].VtEIInstSize, 3
+ lea eax, [eax].VtVdmContext
+
+;
+; Save V86 state to Vdm structure
+;
+ mov edx, [esp+TRAP6_EDX] ; get edx
+ mov [eax].CsEcx, ecx
+ mov [eax].CsEbx, ebx ; Save non-volatile registers
+ mov [eax].CsEsi, esi
+ mov [eax].CsEdi, edi
+ mov ecx, [esp+TRAP6_EAX] ; Get eax
+ mov [eax].CsEbp, ebp
+ mov [eax].CsEdx, edx
+ mov [eax].CsEax, ecx
+
+ mov ebx, [esp]+TRAP6_IP ; (ebx) = iser ip
+ mov ecx, [esp]+TRAP6_CS ; (ecx) = user cs
+ mov esi, [esp]+TRAP6_SP ; (esi) = user esp
+ mov edi, [esp]+TRAP6_SS ; (edi) = user ss
+ mov edx, [esp]+TRAP6_FLAGS; (edx) = user eflags
+ mov [eax].CsEip, ebx
+ mov [eax].CsSegCs, ecx
+ mov [eax].CsEsp, esi
+ mov [eax].CsSegSs, edi
+ test _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS
+ jz short @f
+
+ test edx, EFLAGS_VIF
+ jnz short a
+
+ and edx, NOT EFLAGS_INTERRUPT_MASK
+ jmp short a
+
+@@: test _KeI386VdmIoplAllowed, 0ffffffffh
+ jnz short a
+
+ mov ebx, _VdmFixedStateLinear ; load ntvdm address
+ test ds:[ebx], VDM_VIRTUAL_INTERRUPTS ; check interrupt
+ jnz short a
+
+ and edx, NOT EFLAGS_INTERRUPT_MASK
+a:
+ mov [eax].CsEFlags, edx
+ mov ebx, [esp]+TRAP6_DS ; (ebx) = user ds
+ mov ecx, [esp]+TRAP6_ES ; (ecx) = user es
+ mov edx, [esp]+TRAP6_FS ; (edx) = user fs
+ mov esi, [esp]+TRAP6_GS ; (esi) = user gs
+ mov [eax].CsSegDs, ebx
+ mov [eax].CsSegEs, ecx
+ mov [eax].CsSegFs, edx
+ mov [eax].CsSegGs, esi
+
+;
+; Load Monitor context
+;
+
+ add eax, VtMonitorContext - VtVdmContext ; (eax)->monitor context
+ mov ebx, [eax].CsSegSs
+ mov esi, [eax].CsEsp
+ mov edi, [eax].CsEFlags
+ mov edx, [eax].CsSegCs
+ mov ecx, [eax].CsEip
+ mov [esp - 4], ebx ; Build Iret frame (can not single step!)
+ mov [esp - 8], esi
+ mov [esp - 12], edi
+ mov [esp - 16], edx
+ mov [esp - 20], ecx
+ mov ebx, [eax].CsEbx ; We don't need to load volatile registers.
+ mov esi, [eax].CsEsi ; because monitor uses SystemCall to return
+ mov edi, [eax].CsEdi ; back to v86. C compiler knows that
+ mov ebp, [eax].CsEbp ; SystemCall does not preserve volatile
+ ; registers.
+ ; fs, ds are set up already.
+ sub esp, 20
+
+;
+; Adjust Tss esp0 value and set return value to SUCCESS
+;
+ mov ecx, PCR[PcPrcbData+PbCurrentThread]
+ mov ecx, [ecx].thInitialStack
+ mov edx, PCR[PcTss]
+ sub ecx, NPX_FRAME_LENGTH + TsV86Gs - TsHardwareSegSs
+ xor eax, eax ; ret status = SUCCESS
+ mov [edx].TssEsp0, ecx
+ mov byte ptr PCR[PcVdmAlert], al
+ifdef NT_UP
+else
+ mov edx, KGDT_R3_TEB OR RPL_MASK
+ mov fs, dx
+endif
+ iretd
+
+DoFastIo:
+ xor eax, eax
+ mov edx, [esp]+TRAP6_EDX ; Restore edx
+ add esp, 7 * 4 ; leave eax in the TsErrCode
+ xchg [esp], eax ; Restore eax, sore a zero errcode
+ sub esp, TsErrcode ; build a trap frame
+ mov [esp].TsEbx, ebx
+ mov [esp].TsEax, eax
+ mov [esp].TsEbp, ebp
+ mov [esp].TsEsi, esi
+ mov [esp].TsEdi, edi
+ mov [esp].TsEcx, ecx
+ mov [esp].TsEdx, edx
+if DBG
+ mov [esp].TsPreviousPreviousMode, -1
+ mov [esp]+TsDbgArgMark, 0BADB0D00h
+endif
+ifdef NT_UP
+ mov ebx, KGDT_R0_PCR
+ mov fs, bx
+endif
+ mov byte ptr PCR[PcVdmAlert], 0
+ mov ebp, esp
+ cld
+ test byte ptr PCR[PcDebugActive], -1
+ jz short @f
+
+ mov ebx,dr0
+ mov esi,dr1
+ mov edi,dr2
+ mov [ebp]+TsDr0,ebx
+ mov [ebp]+TsDr1,esi
+ mov [ebp]+TsDr2,edi
+ mov ebx,dr3
+ mov esi,dr6
+ mov edi,dr7
+ mov [ebp]+TsDr3,ebx
+ mov [ebp]+TsDr6,esi
+ mov [ebp]+TsDr7,edi
+ ;
+ ; Load KernelDr* into processor
+ ;
+ mov edi,dword ptr fs:[PcPrcb]
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr0
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr1
+ mov dr0,ebx
+ mov dr1,esi
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr2
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr3
+ mov dr2,ebx
+ mov dr3,esi
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr6
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr7
+ mov dr6,ebx
+ mov dr7,esi
+@@:
+ ; Raise Irql to APC level before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ xor edx, edx
+ mov dx, word ptr [ebp].TsSegCs
+ shl edx, 4
+ xor ebx, ebx
+ add edx, [ebp].TsEip
+ mov bl, [edx+3] ; [bl] = minor BOP code
+ push ebx
+ push ebp ; (ebp)->TrapFrame
+ call _NTFastDOSIO@8
+ jmp Kt061i
+
+V86Trap6PassThrough:
+ mov byte ptr PCR[PcVdmAlert], 0
+V86Trap6Recovery:
+ popad
+ jmp Kt6SlowBop ; Fall through
+
+endm
+ page , 132
+ subttl "Macro to Handle v86 trap d"
+;++
+;
+; Macro Description:
+;
+; This macro is a fast way to handle SOME v86 mode sensitive instructions.
+; Note, all the memory write operations in this macro are done in such a
+; way that if a page fault occurs the memory will still be in a consistent
+; state.
+;
+; That is, we must process the trapped instruction in the following order:
+;
+; 1. Write user memory (prefer do read here)
+; 2. Update VDM state flags
+; 3. Update trap frame
+;
+; Arguments:
+;
+; interrupts disabled
+;
+; Return Value:
+;
+;--
+
+FAST_V86_TRAP_D MACRO
+
+local V86Exit, V86Exit_1, IntnnExit
+
+ sub esp, TsErrCode
+ mov [esp].TsEdx, edx
+ mov [esp].TsEcx, ecx
+ mov [esp].TsEbx, ebx
+ mov [esp].TsEax, eax
+
+ public _V86CriticalInstruction
+_V86CriticalInstruction:
+ mov ebx, FIXED_NTVDMSTATE_LINEAR_PC_AT
+ ; above ntvdm address may changed in KeI386VdmInitialize for PC-98
+
+ifdef NT_UP
+ mov byte ptr SS:[P0PCRADDRESS][PcVdmAlert], 0dh
+else
+ mov eax, KGDT_R0_PCR
+ mov fs, ax
+ mov byte ptr PCR[PcVdmAlert], 0dh
+endif
+ mov eax, [esp].TsSegCs ; (eax) = H/W Cs
+ shl eax,4
+ add eax,[esp].TsEip ; (eax) -> flat faulted addr
+ xor edx, edx
+ mov ecx, ss:[eax] ; (ecx) = faulted instruction
+ mov dl, cl
+ mov dl, ss:OpcodeIndex[edx] ; (edx) = opcode index
+ jmp ss:V86DispatchTable[edx * type V86DispatchTable]
+
+;
+; (ecx) = faulted instructions
+; (eax) = Flat faulted addr
+; (edx) = Opcode Index
+;
+
+ALIGN 4
+V86Pushf:
+ mov eax, ss:[ebx] ; get ntvdm address
+ mov edx, dword ptr [esp].TsEflags ; (edx) = Hardware Eflags
+ and eax,VDM_VIRTUAL_INTERRUPTS OR VDM_VIRTUAL_AC OR VDM_VIRTUAL_NT
+ or eax,EFLAGS_IOPL_MASK
+ and edx,NOT EFLAGS_INTERRUPT_MASK
+ mov [esp].TsSegDs, ecx ; save ecx
+ or eax,edx ; (ax) = client flags
+ xor ecx, ecx
+ mov cx, word ptr [esp].TsHardwareSegSs ; (edx)= hardware SS
+ xor edx, edx
+ shl ecx,4
+ mov dx, word ptr [esp].TsHardwareEsp ; (edx)= Hardware sp
+ sub edx, 2
+ mov ss:[ecx + edx],ax
+ mov ecx, [esp].TsSegDs ; restore ecx
+;
+; Usually, pushf is followed by cli. So, here we check for this case.
+; If yes, we will handle the cli to save a round trip.
+; It is very important that we first update user stack, Fixed VDM state and
+; finally hardware esp.
+;
+
+ cmp cx, (CLI_OP SHL 8) OR PUSHF_OP ; Is there a cli following pushf?
+ jnz short @f
+
+ MPLOCK and dword ptr ss:[ebx],NOT VDM_VIRTUAL_INTERRUPTS
+ inc dword ptr [esp].TsEip ; skip cli
+@@:
+ mov word ptr [esp].TsHardwareEsp, dx ; update client esp
+V86Exit:
+ inc dword ptr [esp].TsEip ; skip pushf
+V86Exit_1:
+ mov ecx, [esp].TsEcx
+ mov ebx, [esp].TsEbx
+ mov eax, [esp].TsEax
+ mov edx, [esp].TsEdx
+ add esp, TsEip
+ifdef NT_UP
+ mov byte ptr SS:[P0PCRADDRESS][PcVdmAlert], 0
+else
+ mov byte ptr PCR[PcVdmAlert], 0
+endif
+ iretd
+
+ALIGN 4
+V86Cli:
+ MPLOCK and dword ptr ss:[ebx],NOT VDM_VIRTUAL_INTERRUPTS
+ jmp short V86Exit
+
+ALIGN 4
+V86Sti:
+ test ss:[ebx], VDM_INTERRUPT_PENDING
+ ; Can we handle it in fast way?
+ jnz V86PassThrough ; if nz, no, we need to dispatch int
+
+ ;; Pentium CPU traps sti if
+ ;; 1). client's TF is ON or 2). VIP is ON
+ ;; we must set EFLAGS_VIF in this case.
+ test dword ptr _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS
+ jz short v86_sti_01
+ or dword ptr [esp].TsEflags, EFLAGS_VIF
+v86_sti_01:
+ MPLOCK or dword ptr ss:[ebx], EFLAGS_INTERRUPT_MASK
+ jmp short V86Exit
+
+ALIGN 4
+V86Popf:
+ test ss:[ebx], VDM_INTERRUPT_PENDING
+ ; Can we handle it in fast way?
+ jnz V86PassThrough
+
+ xor edx, edx
+ mov dx, word ptr [esp].TsHardwareSegSs ; (edx)= hardware SS
+ xor eax, eax
+ shl edx,4
+ mov ax, word ptr [esp].TsHardwareEsp ; (ecx)= Hardware sp
+ mov edx, ss:[edx + eax] ; (edx) = Client flags
+ add ax, 2 ; (ax) = Client sp
+ and edx, 0FFFFH AND (NOT EFLAGS_IOPL_MASK)
+ MPLOCK and ss:[ebx],NOT (EFLAGS_INTERRUPT_MASK OR EFLAGS_ALIGN_CHECK OR EFLAGS_NT_MASK)
+ mov ecx, edx
+ and edx, (EFLAGS_INTERRUPT_MASK OR EFLAGS_ALIGN_CHECK OR EFLAGS_NT_MASK)
+ and ecx, NOT EFLAGS_NT_MASK
+ MPLOCK or ss:[ebx],edx
+ or ecx, (EFLAGS_INTERRUPT_MASK OR EFLAGS_V86_MASK)
+
+ ;; Pentium CPU traps popf if
+ ;; 1)client's TF is ON or 2) VIP is on and the client's IF is ON
+ ;; We have to propagate IF to VIF if virtual interrupt extension is
+ ;; enabled.
+ test dword ptr _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS
+ jz v86_popf_01
+ and ecx, NOT EFLAGS_VIF ;clear it first
+ and edx, EFLAGS_INTERRUPT_MASK ;isolate and move IF to VIF
+ rol edx, 10 ;position
+.errnz (EFLAGS_INTERRUPT_MASK SHL 10) - EFLAGS_VIF
+ or ecx, edx ;propagate it!
+
+v86_popf_01:
+ mov [esp].TsEflags,ecx
+ mov [esp].TsHardwareEsp, eax ; update client esp
+ jmp V86Exit
+
+ALIGN 4
+V86Intnn:
+ shr ecx, 8
+ xor eax, eax
+ and ecx, 0FFH ; ecx is int#
+ mov ecx, ss:[ecx*4] ; (ecx) = Int nn handler
+ xor edx, edx
+ mov [esp].TsSegDs, ecx ; [esp].Ds = intnn handler
+ mov ax, word ptr [esp].TsHardwareSegSs ; (eax)= hardware SS
+ shl eax,4
+ mov dx, word ptr [esp].TsHardwareEsp ; (edx)= Hardware sp
+ sub dx, 6
+ add eax, edx ; (eax) = User stack
+ mov ecx, [esp].TsEflags
+ test ss:_KeI386VdmIoplAllowed,1
+ jnz short @f
+
+ mov edx, ss:[ebx] ; set the contents
+ and ecx, NOT EFLAGS_INTERRUPT_MASK
+ and edx,VDM_VIRTUAL_INTERRUPTS OR VDM_VIRTUAL_AC
+ or ecx, edx
+ or ecx, IOPL_MASK
+ mov word ptr ss:[eax+4], cx ; push flags
+ mov ecx, [esp].TsSegCs
+ mov edx, [esp].TsEip
+ mov word ptr ss:[eax+2], cx ; push cs
+ add dx, 2
+ MPLOCK and ss:[ebx], NOT VDM_VIRTUAL_INTERRUPTS
+ mov word ptr ss:[eax], dx ; push ip (skip int nn)
+ and [esp].TsEflags, NOT (EFLAGS_NT_MASK OR EFLAGS_TF_MASK)
+IntnnExit:
+ mov ecx, [esp].TsSegDs ; (ecx) = V86 intnn handler
+ sub word ptr [esp].TsHardwareEsp, 6
+ mov [esp].TsEip, ecx
+ shr ecx,16
+ mov [esp].TsSegCs, cx ; cs:ip on trap frame is updated
+ jmp V86Exit_1
+
+@@:
+ or ecx, IOPL_MASK
+ mov word ptr ss:[eax+4], cx ; push flags
+ mov ecx, [esp].TsSegCs
+ mov edx, [esp].TsEip
+ mov word ptr ss:[eax+2], cx ; push cs
+ add dx, 2
+ mov word ptr ss:[eax], dx ; push ip (skip int nn)
+ and [esp].TsEflags, NOT (EFLAGS_INTERRUPT_MASK OR EFLAGS_NT_MASK OR EFLAGS_TF_MASK)
+ jmp short IntnnExit
+
+ALIGN 4
+V86Iret:
+ test ss:[ebx], VDM_INTERRUPT_PENDING
+ jnz V86PassThrough
+
+ xor ecx, ecx
+ mov cx,word ptr [esp].TsHardwareSegSS
+ xor edx, edx
+ shl ecx,4
+ mov dx,word ptr [esp].TsHardwareEsp
+ add ecx,edx ; (ecx) -> User stack
+ mov dx,word ptr ss:[ecx+4] ; get flag value
+ mov ecx, ss:[ecx] ; (ecx) = ret cs:ip
+
+ and edx, NOT (EFLAGS_IOPL_MASK OR EFLAGS_NT_MASK)
+ mov eax,edx
+ or edx, (EFLAGS_V86_MASK OR EFLAGS_INTERRUPT_MASK)
+ and eax, EFLAGS_INTERRUPT_MASK
+ MPLOCK and ss:[ebx],NOT VDM_VIRTUAL_INTERRUPTS
+ MPLOCK or ss:[ebx],eax
+
+ ;; Pentium CPU traps iret if
+ ;; 1)client's TF is ON or 2) VIP is on and the client's IF is ON
+ ;; We have to propagate IF to VIF if virtual interrupt extension is
+ ;; enabled
+ test dword ptr _KeI386VirtualIntExtensions, V86_VIRTUAL_INT_EXTENSIONS
+ jz v86_iret_01
+ and edx, NOT EFLAGS_VIF ;eax must contain ONLY
+ ;INTERRUPT_MASK!!!!
+.errnz (EFLAGS_INTERRUPT_MASK SHL 10) - EFLAGS_VIF
+ rol eax, 10
+ or edx, eax
+
+v86_iret_01:
+ mov [esp].TsEFlags,edx ; update flags in trap frame
+ mov eax, ecx
+ shr ecx, 16
+ and eax, 0ffffh
+ add word ptr [esp].TsHardwareEsp, 6 ; update sp on trap frame
+ mov [esp].TsSegCs,ecx ; update cs
+ mov [esp].TsEip, eax ; update ip
+
+ ; at this point cx:ax is the addr of the ip where v86 mode
+ ; will return. Now we will check if this returning instruction
+ ; is a bop. if so we will directly dispatch the bop from here
+ ; saving a full round trip. This will be really helpful to
+ ; com apps.
+ifdef NT_UP
+ mov byte ptr SS:[P0PCRADDRESS][PcVdmAlert], 10h
+else
+ mov byte ptr PCR[PcVdmAlert], 010h
+endif
+ shl ecx, 4
+ mov ax, ss:[ecx+eax] ; Could fault
+ cmp ax, 0c4c4h
+ jne V86Exit_1
+
+ mov ecx, [esp].TsEcx
+ mov ebx, [esp].TsEbx
+ mov eax, [esp].TsEax
+ mov edx, [esp].TsEdx
+ add esp, TsEip
+if FAST_BOP eq 0
+ifdef NT_UP
+ mov byte ptr SS:[P0PCRADDRESS][PcVdmAlert], 0
+else
+ mov byte ptr PCR[PcVdmAlert], 0
+endif
+endif
+ jmp _KiTrap06
+
+V86Trap10Recovery:
+ jmp V86Exit_1
+
+;
+; If we come here, it means we hit some kind of trap while processing the
+; V86 trap in a fast way. We need to process the instruction in a normal
+; way. For this case, we simply abort the current processing and restart
+; it via reguar v86 trap processing. (This is a very rare case.)
+;
+;; public V86TrapDRecovery
+V86TrapDRecovery:
+ mov ecx, [esp].TsEcx
+ mov ebx, [esp].TsEbx
+ mov eax, [esp].TsEax
+ mov edx, [esp].TsEdx
+ add esp, TsErrCode
+ jmp KtdV86Slow
+
+;
+; If we come here, it means we can not process the trapped instruction.
+; We will build a trap frame and use regular way to process the instruction.
+; Since this could happen if interrupt is pending or the instruction trapped
+; is not in the list of instructions which we handle. We need to "continue"
+; the processing instead of abort it.
+;
+
+ALIGN 4
+V86PassThrough:
+ifdef NT_UP
+ mov byte ptr SS:[P0PCRADDRESS][PcVdmAlert], 0
+else
+ mov byte ptr PCR[PcVdmAlert], 0
+endif
+ ;
+ ; eax, ebx, ecx, edx have been saved already
+ ; Note, we don't want to destroy ecx, and edx
+ ;
+ mov [esp].TsEbp, ebp
+ mov [esp].TsEsi, esi
+ mov [esp].TsEdi, edi
+ mov ebx, KGDT_R0_PCR
+ mov esi, KGDT_R3_DATA OR RPL_MASK
+if DBG
+ mov [esp].TsPreviousPreviousMode, -1
+ mov [esp]+TsDbgArgMark, 0BADB0D00h
+endif
+ mov fs, bx
+ mov ds, esi
+ mov es, esi
+ mov ebp, esp
+ cld
+ test byte ptr PCR[PcDebugActive], -1
+ jz short @f
+
+ mov ebx,dr0
+ mov esi,dr1
+ mov edi,dr2
+ mov [ebp]+TsDr0,ebx
+ mov [ebp]+TsDr1,esi
+ mov [ebp]+TsDr2,edi
+ mov ebx,dr3
+ mov esi,dr6
+ mov edi,dr7
+ mov [ebp]+TsDr3,ebx
+ mov [ebp]+TsDr6,esi
+ mov [ebp]+TsDr7,edi
+ ;
+ ; Load KernelDr* into processor
+ ;
+ mov edi,dword ptr fs:[PcPrcb]
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr0
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr1
+ mov dr0,ebx
+ mov dr1,esi
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr2
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr3
+ mov dr2,ebx
+ mov dr3,esi
+ mov ebx,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr6
+ mov esi,[edi].PbProcessorState.PsSpecialRegisters.SrKernelDr7
+ mov dr6,ebx
+ mov dr7,esi
+@@:
+ jmp KtdV86Slow2
+endm
+
+ page , 132
+ subttl "Macro to dispatch user APC"
+
+;++
+;
+; Macro Description:
+;
+; This macro is called before returning to user mode. It dispatches
+; any pending user mode APCs.
+;
+; Arguments:
+;
+; TFrame - TrapFrame
+; interrupts disabled
+;
+; Return Value:
+;
+;--
+
+DISPATCH_USER_APC macro TFrame, ReturnCurrentEax
+local a, b
+ test dword ptr [TFrame]+TsEflags, EFLAGS_V86_MASK ; is previous mode v86?
+ jnz short b ; if nz, yes, go check for APC
+ test byte ptr [TFrame]+TsSegCs,MODE_MASK ; is previous mode user mode?
+ jz short a ; No, previousmode=Kernel, jump out
+b: mov ebx, PCR[PcPrcbData+PbCurrentThread]; get addr of current thread
+ mov byte ptr [ebx]+ThAlerted, 0 ; clear kernel mode alerted
+ cmp byte ptr [ebx]+ThApcState.AsUserApcPending, 0
+ je short a ; if eq, no user APC pending
+
+ mov ebx, TFrame
+ifnb <ReturnCurrentEax>
+ mov [ebx].TsEax, eax ; Store return code in trap frame
+ mov dword ptr [ebx]+TsSegFs, KGDT_R3_TEB OR RPL_MASK
+ mov dword ptr [ebx]+TsSegDs, KGDT_R3_DATA OR RPL_MASK
+ mov dword ptr [ebx]+TsSegEs, KGDT_R3_DATA OR RPL_MASK
+ mov dword ptr [ebx]+TsSegGs, 0
+endif
+
+;
+; Save previous IRQL and set new priority level
+;
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+
+ sti ; Allow higher priority ints
+
+;
+; call the APC delivery routine.
+;
+; ebx - Trap frame
+; 0 - Null exception frame
+; 1 - Previous mode
+;
+; call APC deliver routine
+;
+
+ stdCall _KiDeliverApc, <1, 0, ebx>
+
+ pop ecx ; (ecx) = OldIrql
+ fstCall KfLowerIrql
+
+ifnb <ReturnCurrentEax>
+ mov eax, [ebx].TsEax ; Restore eax, just in case
+endif
+
+ cli
+ jmp short b
+
+ ALIGN 4
+a:
+endm
+
+
+if DBG
+ page ,132
+ subttl "Processing Exception occurred in a 16 bit stack"
+;++
+;
+; Routine Description:
+;
+; This routine is called after an exception being detected during
+; a 16 bit stack. The system will switch 16 stack to 32 bit
+; stack and bugcheck.
+;
+; Arguments:
+;
+; None.
+;
+; Return value:
+;
+; system stopped.
+;
+;--
+
+align dword
+ public _Ki16BitStackException
+_Ki16BitStackException proc
+
+.FPO (2, 0, 0, 0, 0, FPO_TRAPFRAME)
+
+ push ss
+ push esp
+ mov eax, esp
+ add eax, fs:PcstackLimit
+ mov esp, eax
+ mov eax, KGDT_R0_DATA
+ mov ss, ax
+
+ lea ebp, [esp+8]
+ cld
+ SET_DEBUG_DATA
+
+if DBG
+ push offset FLAT:Ki16BitStackTrapMessage
+ call _dbgPrint
+ add esp, 4
+endif
+ stdCall _KeBugCheck, <0F000FFFFh> ; Never return
+ ret
+
+_Ki16BitStackException endp
+
+endif
+
+
+ page ,132
+ subttl "System Service Call"
+;++
+;
+; Routine Description:
+;
+; This routine gains control when trap occurs via vector 2EH.
+; INT 2EH is reserved for system service calls.
+;
+; The system service is executed by locating its routine address in
+; system service dispatch table and calling the specified function.
+; On return necessary state is restored.
+;
+; Arguments:
+;
+; eax - System service number.
+; edx - Pointer to arguments
+;
+; Return Value:
+;
+; eax - System service status code.
+;
+;--
+
+if 0
+;
+; Error and exception blocks for KiSystemService
+;
+
+Kss_ExceptionHandler:
+
+;
+; WARNING: Here we directly unlink the exception handler from the
+; exception registration chain. NO unwind is performed.
+;
+
+ mov eax, [esp+4] ; (eax)-> ExceptionRecord
+ mov eax, [eax].ErExceptionCode ; (eax) = Exception code
+ mov esp, [esp+8] ; (esp)-> ExceptionList
+
+ pop eax
+ mov PCR[PcExceptionList],eax
+
+ add esp, 4
+ pop ebp
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz kss60 ; v86 mode => usermode
+
+ test dword ptr [ebp].TsSegCs, MODE_MASK ; if premode=kernl
+ jnz kss60 ; nz, prevmode=user, go return
+
+; raise bugcheck if prevmode=kernel
+ stdCall _KeBugCheck, <KMODE_EXCEPTION_NOT_HANDLED>
+endif
+
+;
+; The specified system service number is not within range. Attempt to
+; convert the thread to a GUI thread if the specified system service is
+; not a base service and the thread has not already been converted to a
+; GUI thread.
+;
+
+Kss_ErrorHandler:
+ cmp ecx, SERVICE_TABLE_TEST ; test if GUI service
+ jne short Kss_LimitError ; if ne, not GUI service
+ push edx ; save argument registers
+ push ebx ;
+ stdcall _PsConvertToGuiThread ; attempt to convert to GUI thread
+ or eax, eax ; check if service was successful
+ pop eax ; restore argument registers
+ pop edx ;
+ mov ebp, esp ; reset trap frame address
+ mov [esi]+ThTrapFrame, ebp ; save address of trap frame
+ jz _KiSystemServiceRepeat ; if eq, successful conversion
+
+Kss_LimitError: ;
+ mov eax, STATUS_INVALID_SYSTEM_SERVICE ; set return status
+ jmp kss70 ;
+
+ ENTER_DR_ASSIST kss_a, kss_t,NoAbiosAssist,NoV86Assist
+
+;
+; General System service entrypoint
+;
+
+align 16
+ PUBLIC _KiSystemService
+_KiSystemService proc
+
+ ENTER_SYSCALL kss_a, kss_t ; set up trap frame and save state
+
+;
+; (eax) = Service number
+; (edx) = Callers stack pointer
+; (esi) = Current thread address
+;
+; All other registers have been saved and are free.
+;
+; Check if the service number within valid range
+;
+
+_KiSystemServiceRepeat:
+ mov edi, eax ; copy system service number
+ shr edi, SERVICE_TABLE_SHIFT ; isolate service table number
+ and edi, SERVICE_TABLE_MASK ;
+ mov ecx, edi ; save service table number
+ add edi, [esi]+ThServiceTable ; compute service descriptor address
+ mov ebx, eax ; save system service number
+ and eax, SERVICE_NUMBER_MASK ; isolate service table offset
+
+;
+; If the specified system service number is not within range, then attempt
+; to convert the thread to a GUI thread and retry the service dispatch.
+;
+
+ cmp eax, [edi]+SdLimit ; check if valid service
+ jae Kss_ErrorHandler ; if ae, try to convert to GUI thread
+
+;
+; If the service is a GUI service and the GDI user batch queue is not empty,
+; then call the appropriate service to flush the user batch.
+;
+
+ cmp ecx, SERVICE_TABLE_TEST ; test if GUI service
+ jne short Kss40 ; if ne, not GUI service
+ mov ecx, PCR[PcTeb] ; get current thread TEB address
+ xor ebx, ebx ; get number of batched GDI calls
+ or ebx, [ecx]+TbGdiBatchCount ;
+ jz short Kss40 ; if z, no batched calls
+ push edx ; save address of user arguments
+ push eax ; save service number
+ call [_KeGdiFlushUserBatch] ; flush GDI user batch
+ pop eax ; restore service number
+ pop edx ; restore address of user arguments
+
+;
+; The arguments are passed on the stack. Therefore they always need to get
+; copied since additional space has been allocated on the stack for the
+; machine state frame. Note that we don't check for zero argument. Copy
+; is alway done reguardless of number of arguments. This is because zero
+; argument is very rare.
+;
+
+Kss40: inc dword ptr PCR[PcPrcbData+PbSystemCalls] ; system calls
+
+if DBG
+ mov ecx, [edi]+SdCount ; get count table address
+ jecxz Kss45 ; if zero, table not specified
+ inc dword ptr [ecx+eax*4] ; increment service count
+Kss45: push dword ptr [esi]+ThApcStateIndex ; (ebp-4)
+ push dword ptr [esi]+ThKernelApcDisable ; (ebp-8)
+
+ ;
+ ; work around errata 19 which can in some cases cause an
+ ; extra dword to be moved in the rep movsd below. In the DBG
+ ; build, this will usually case a bugcheck 1 where ebp-8 is no longer
+ ; the kernel apc disable count
+ ;
+
+ sub esp,4
+
+FPOFRAME 2, 0
+endif
+
+ mov esi, edx ; (esi)->User arguments
+ mov ebx, [edi]+SdNumber ; get argument table address
+ xor ecx, ecx
+ mov cl, byte ptr [ebx+eax] ; (ecx) = argument size
+ mov edi, [edi]+SdBase ; get service table address
+ mov ebx, [edi+eax*4] ; (ebx)-> service routine
+ sub esp, ecx ; allocate space for arguments
+ shr ecx, 2 ; (ecx) = number of argument DWORDs
+ mov edi, esp ; (es:edi)->location to receive 1st arg
+
+KiSystemServiceCopyArguments:
+ rep movsd ; copy the arguments to top of stack.
+ ; Since we usually copy more than 3
+ ; arguments. rep movsd is faster than
+ ; mov instructions.
+if DBG
+;
+; Check for user mode call into system at elevated IRQL.
+;
+
+ test byte ptr [ebp]+TsSegCs,MODE_MASK
+ jz short kss50a ; kernel mode, skip test
+ stdCall _KeGetCurrentIrql
+ or al, al ; bogus irql, go bugcheck
+ jnz kss100
+kss50a:
+endif
+
+;
+; Make actual call to system service
+;
+kssdoit:
+ call ebx ; call system service
+
+kss60:
+if DBG
+ mov ebx,PCR[PcPrcbData+PbCurrentThread] ; (ebx)-> Current Thread
+
+;
+; Check for return to user mode at elevated IRQL.
+;
+ test byte ptr [ebp]+TsSegCs,MODE_MASK
+ jz short kss50b
+ mov esi, eax
+ stdCall _KeGetCurrentIrql
+ or al, al
+ jnz kss100 ; bogus irql, go bugcheck
+ mov eax, esi
+kss50b:
+;
+; Check that APC state has not changed
+;
+ mov edx, [ebp-4]
+ cmp dl, [ebx]+ThApcStateIndex
+ jne kss120
+
+ mov edx, [ebp-8]
+ cmp dl, [ebx]+ThKernelApcDisable
+ jne kss120
+
+endif
+
+;
+; Upon return, (eax)= status code
+;
+
+ mov esp, ebp ; deallocate stack space for arguments
+
+;
+; Restore old trap frame address from the current trap frame.
+;
+
+kss70: mov ecx, PCR[PcPrcbData+PbCurrentThread] ; get current thread address
+ mov edx, [ebp].TsEdx ; restore previous trap frame address
+ mov [ecx].ThTrapFrame, edx ;
+
+;
+; System service's private version of KiExceptionExit
+; (Also used by KiDebugService)
+;
+; Check for pending APC interrupts, if found, dispatch to them
+; (saving eax in frame first).
+;
+ public _KiServiceExit
+_KiServiceExit:
+
+ cli ; disable interrupts
+ DISPATCH_USER_APC ebp, ReturnCurrentEax
+
+;
+; Exit from SystemService
+;
+ EXIT_ALL NoRestoreSegs, NoRestoreVolatile
+
+;++
+;
+; _KiServiceExit2 - same as _KiServiceExit BUT the full trap_frame
+; context is restored
+;
+;--
+ public _KiServiceExit2
+_KiServiceExit2:
+
+ cli ; disable interrupts
+ DISPATCH_USER_APC ebp
+
+;
+; Exit from SystemService
+;
+ EXIT_ALL ; RestoreAll
+
+
+
+
+if DBG
+kss100: push PCR[PcIrql] ; put bogus value on stack for dbg
+ mov byte ptr PCR[PcIrql],0 ; avoid recursive trap
+ cli
+
+ stdCall _KeBugCheck,<IRQL_GT_ZERO_AT_SYSTEM_SERVICE>
+
+kss120: stdCall _KeBugCheck,<APC_INDEX_MISMATCH>
+
+endif
+ ret
+
+_KiSystemService endp
+
+;
+; Fast path NtGetTickCount
+;
+
+align 16
+ ENTER_DR_ASSIST kitx_a, kitx_t,NoAbiosAssist
+ PUBLIC _KiGetTickCount
+_KiGetTickCount proc
+
+ cmp [esp+4], KGDT_R3_CODE OR RPL_MASK
+ jnz short @f
+
+Kgtc00:
+ mov eax,dword ptr cs:[_KeTickCount]
+ mul dword ptr cs:[_ExpTickCountMultiplier]
+ shrd eax,edx,24 ; compute resultant tick count
+
+ iretd
+@@:
+ ;
+ ; if v86 mode, we dont handle it
+ ;
+
+ test dword ptr [esp+8], EFLAGS_V86_MASK
+ jnz ktgc20
+
+ ;
+ ; if kernel mode, must be get tick count
+ ;
+
+ test [esp+4], MODE_MASK
+ jz short Kgtc00
+
+ ;
+ ; else check if the caller is USER16
+ ; if eax = ebp = 0xf0f0f0f0 it is get-tick-count
+ ; if eax = ebp = 0xf0f0f0f1 it is set-ldt-entry
+ ;
+
+ cmp eax, ebp ; if eax != ebp, not USER16
+ jne ktgc20
+
+ and eax, 0fffffff0h
+ cmp eax, 0f0f0f0f0h
+ jne ktgc20
+
+ cmp ebp, 0f0f0f0f0h ; Is it user16 gettickcount?
+ je short Kgtc00 ; if z, yes
+
+
+ cmp ebp, 0f0f0f0f1h ; If this is setldt entry
+ jne ktgc20 ; if nz, we don't know what
+ ; it is.
+
+ ;
+ ; The idea here is that user16 can call 32 bit api to
+ ; update LDT entry without going through the penalty
+ ; of DPMI. For Daytona beta.
+ ;
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kitx_a, kitx_t
+ sti
+
+ xor eax, eax
+ mov ebx, [ebp+TsEbx]
+ mov ecx, [ebp+TsEcx]
+ mov edx, [ebp+TsEdx]
+ stdCall _NtSetLdtEntries <ebx, ecx, edx, eax, eax, eax>
+ mov [ebp+TsEax], eax
+ and dword ptr [ebp+TsEflags], 0FFFFFFFEH ; clear carry flag
+ cmp eax, 0 ; success?
+ je short ktgc10
+
+ or dword ptr [ebp+TsEflags], 1 ; set carry flag
+ktgc10:
+ jmp _KiExceptionExit
+
+ktgc20:
+ ;
+ ; We need to *trap* this int 2a. For exception, the eip should
+ ; point to the int 2a instruction not the instruction after it.
+ ;
+
+ sub word ptr [esp], 2
+ push 0
+ jmp _KiTrap0D
+
+_KiGetTickCount endp
+
+ page ,132
+ subttl "Return from User Mode Callback"
+;++
+;
+; NTSTATUS
+; NtCallbackReturn (
+; IN PVOID OutputBuffer OPTIONAL,
+; IN ULONG OutputLength,
+; IN NTSTATUS Status
+; )
+;
+; Routine Description:
+;
+; This function returns from a user mode callout to the kernel mode
+; caller of the user mode callback function.
+;
+; N.B. This service uses a nonstandard calling sequence.
+;
+; Arguments:
+;
+; OutputBuffer (ecx) - Supplies an optional pointer to an output buffer.
+;
+; OutputLength (edx) - Supplies the length of the output buffer.
+;
+; Status (esp + 4) - Supplies the status value returned to the caller of
+; the callback function.
+;
+; Return Value:
+;
+; If the callback return cannot be executed, then an error status is
+; returned. Otherwise, the specified callback status is returned to
+; the caller of the callback function.
+;
+; N.B. This function returns to the function that called out to user
+; mode is a callout is currently active.
+;
+;--
+
+align 16
+ PUBLIC _KiCallbackReturn
+_KiCallbackReturn proc
+
+ push fs ; save segment register
+ push ecx ; save buffer address and return status
+ push eax ;
+ mov ecx,KGDT_R0_PCR ; set PCR segment number
+ mov fs,cx ;
+ mov eax,PCR[PcPrcbData + PbCurrentThread] ; get current thread address
+ mov ecx,[eax].ThCallbackStack ; get callback stack address
+ or ecx,ecx ; check if callback active
+ jz short _KiCbExit ; if z, no callback active
+ mov edi,[esp] + 4 ; set output buffer address
+ mov esi,edx ; set output buffer length
+ mov ebp,[esp] + 0 ; set return status
+
+;
+; N.B. The following code is entered with:
+;
+; eax - The address of the current thread.
+; ecx - The callback stack address.
+; edi - The output buffer address.
+; esi - The output buffer length.
+; ebp - The callback service status.
+;
+; Restore the trap frame and callback stack addresses,
+; store the output buffer address and length, and set the service status.
+;
+
+ cld ; clear the direction flag
+ mov ebx,[ecx].CuOutBf ; get address to store output buffer
+ mov [ebx],edi ; store output buffer address
+ mov ebx,[ecx].CuOutLn ; get address to store output length
+ mov [ebx],esi ; store output buffer length
+ mov esi,PCR[PcInitialStack] ; get source NPX save area address
+ mov esp,ecx ; trim stack back to callback frame
+ pop ecx ; get previous initial stack address
+ mov [eax].ThInitialStack,ecx ; restore initial stack address
+ sub ecx,NPX_FRAME_LENGTH ; compute destination NPX save area
+ mov edx,[esi].FpControlWord ; copy NPX state to previous frame
+ mov [ecx].FpControlWord,edx ;
+ mov edx,[esi].FpStatusWord ;
+ mov [ecx].FpStatusWord,edx ;
+ mov edx,[esi].FpTagWord ;
+ mov [ecx].FpTagWord,edx ;
+ mov edx,[esi].FpCr0NpxState ;
+ mov [ecx].FpCr0NpxState,edx ;
+ mov edx,PCR[PcTss] ; get address of task switch segment
+ mov PCR[PcInitialStack],ecx ; restore stack check base address
+ sub ecx,TsV86Gs - TsHardwareSegSs ; bias for missing V86 fields
+ mov [edx].TssEsp0,ecx ; restore kernel entry stack address
+ sti ; enable interrupts
+ pop [eax].ThTrapFrame ; restore current trap frame address
+ pop [eax].ThCallbackStack ; restore callback stack address
+ mov eax,ebp ; set callback service status
+
+;
+; Restore nonvolatile registers, clean call parameters from stack, and
+; return to callback caller.
+;
+
+ pop edi ; restore nonvolatile registers
+ pop esi ;
+ pop ebx ;
+ pop ebp ;
+ pop edx ; save return address
+ add esp,8 ; remove parameters from stack
+ jmp edx ; return to callback caller
+
+;
+; Restore segment register, set systerm service status, and return.
+;
+
+_KiCbExit: ;
+ add esp, 2 * 4 ; remove saved registers from stack
+ pop fs ; restore segment register
+ mov eax,STATUS_NO_CALLBACK_ACTIVE ; set service status
+ iretd ;
+
+_KiCallbackReturn endp
+
+;
+; Fast path Nt/Zw SetLowWaitHighThread
+;
+
+ ENTER_DR_ASSIST kslwh_a, kslwh_t,NoAbiosAssist,NoV86Assist
+align 16
+ PUBLIC _KiSetLowWaitHighThread
+_KiSetLowWaitHighThread proc
+
+ ENTER_SYSCALL kslwh_a, kslwh_t ; Set up trap frame
+
+ mov eax,STATUS_NO_EVENT_PAIR ; set service status
+ mov edx,[ebp].TsEdx ; restore old trap frame address
+ mov [esi].ThTrapFrame,edx ;
+ cli ; disable interrupts
+
+ DISPATCH_USER_APC ebp, ReturnCurrentEax
+
+ EXIT_ALL NoRestoreSegs, NoRestoreVolatile
+
+_KiSetLowWaitHighThread endp
+
+ page ,132
+ subttl "Common Trap Exit"
+;++
+;
+; KiExceptionExit
+;
+; Routine Description:
+;
+; This code is transfered to at the end of the processing for
+; an exception. Its function is to restore machine state, and
+; continue thread execution. If control is returning to user mode
+; and there is a user APC pending, then control is transfered to
+; the user APC delivery routine.
+;
+; N.B. It is assumed that this code executes at IRQL zero or APC_LEVEL.
+; Therefore page faults and access violations can be taken.
+;
+; NOTE: This code is jumped to, not called.
+;
+; Arguments:
+;
+; (ebp) -> base of trap frame.
+;
+; Return Value:
+;
+; None.
+;
+;--
+align 4
+ public _KiExceptionExit
+_KiExceptionExit proc
+.FPO (0, 0, 0, 0, 0, FPO_TRAPFRAME)
+
+ cli ; disable interrupts
+ DISPATCH_USER_APC ebp
+
+;
+; Exit from Exception
+;
+
+ EXIT_ALL ,,NoPreviousMode
+
+_KiExceptionExit endp
+
+
+;++
+;
+; Kei386EoiHelper
+;
+; Routine Description:
+;
+; This code is transfered to at the end of an interrupt. (via the
+; exit_interrupt macro). It checks for user APC dispatching and
+; performs the exit_all for the interrupt.
+;
+; NOTE: This code is jumped to, not called.
+;
+; Arguments:
+;
+; (esp) -> base of trap frame.
+; interrupts are disabled
+;
+; Return Value:
+;
+; None.
+;
+;--
+align 4
+cPublicProc Kei386EoiHelper, 0
+.FPO (0, 0, 0, 0, 0, FPO_TRAPFRAME)
+ ASSERT_FS
+ DISPATCH_USER_APC esp
+ EXIT_ALL ,,NoPreviousMode
+stdENDP Kei386EoiHelper
+
+
+;++
+;
+; KiUnexpectedInterruptTail
+;
+; Routine Description:
+; This function is jumped to by an IDT entry who has no interrupt
+; handler.
+;
+; Arguments:
+;
+; (esp) - Dword, vector
+; (esp+4) - Processor generated IRet frame
+;
+;--
+
+ ENTER_DR_ASSIST kui_a, kui_t
+
+ public _KiUnexpectedInterruptTail
+_KiUnexpectedInterruptTail proc
+ ENTER_INTERRUPT kui_a, kui_t, PassDwordParm
+
+ inc dword ptr PCR[PcPrcbData+PbInterruptCount]
+
+ mov ebx, [esp] ; get vector & leave it on the stack
+ sub esp, 4 ; make space for OldIrql
+
+; esp - ptr to OldIrql
+; ebx - Vector
+; HIGH_LEVEL - Irql
+ stdCall _HalBeginSystemInterrupt, <HIGH_LEVEL,ebx,esp>
+ or eax, eax
+ jnz kui10
+
+;
+; spurious interrupt
+;
+ add esp, 8
+ EXIT_ALL ,,NoPreviousMode
+
+kui10:
+if DBG
+ push dword ptr [esp+4] ; Vector #
+ push offset FLAT:BadInterruptMessage
+ call _DbgPrint ; display unexpected interrupt message
+ add esp, 8
+endif
+;
+; end this interrupt
+;
+ INTERRUPT_EXIT
+
+_KiUnexpectedInterruptTail endp
+
+
+
+ page , 132
+ subttl "trap processing"
+
+;++
+;
+; Routine Description:
+;
+; _KiTrapxx - protected mode trap entry points
+;
+; These entry points are for internally generated exceptions,
+; such as a general protection fault. They do not handle
+; external hardware interrupts, or user software interrupts.
+;
+; Arguments:
+;
+; On entry the stack looks like:
+;
+; [ss]
+; [esp]
+; eflags
+; cs
+; eip
+; ss:sp-> [error]
+;
+; The cpu saves the previous SS:ESP, eflags, and CS:EIP on
+; the new stack if there was a privilige transition. If no
+; priviledge level transition occurred, then there is no
+; saved SS:ESP.
+;
+; Some exceptions save an error code, others do not.
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+
+ page , 132
+ subttl "Macro to dispatch exception"
+
+;++
+;
+; Macro Description:
+;
+; This macro allocates exception record on stack, sets up exception
+; record using specified parameters and finally sets up arguments
+; and calls _KiDispatchException.
+;
+; Arguments:
+;
+; ExcepCode - Exception code to put into exception record
+; ExceptFlags - Exception flags to put into exception record
+; ExceptRecord - Associated exception record
+; ExceptAddress - Addr of instruction which the hardware exception occurs
+; NumParms - Number of additional parameters
+; ParameterList - the additional parameter list
+;
+; Return Value:
+;
+; None.
+;
+;--
+
+DISPATCH_EXCEPTION macro ExceptCode, ExceptFlags, ExceptRecord, ExceptAddress,\
+ NumParms, ParameterList
+ local de10, de20
+
+.FPO ( ExceptionRecordSize/4+NumParms, 0, 0, 0, 0, FPO_TRAPFRAME )
+
+; Set up exception record for raising exception
+
+?i = 0
+ sub esp, ExceptionRecordSize + NumParms * 4
+ ; allocate exception record
+ mov dword ptr [esp]+ErExceptionCode, ExceptCode
+ ; set up exception code
+ mov dword ptr [esp]+ErExceptionFlags, ExceptFlags
+ ; set exception flags
+ mov dword ptr [esp]+ErExceptionRecord, ExceptRecord
+ ; set associated exception record
+ mov dword ptr [esp]+ErExceptionAddress, ExceptAddress
+ mov dword ptr [esp]+ErNumberParameters, NumParms
+ ; set number of parameters
+ IRP z, <ParameterList>
+ mov dword ptr [esp]+(ErExceptionInformation+?i*4), z
+?i = ?i + 1
+ ENDM
+
+; set up arguments and call _KiDispatchException
+
+ mov ecx, esp ; (ecx)->exception record
+ mov eax,[ebp]+TsSegCs
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jz de10
+
+ mov eax,0FFFFh
+de10: and eax,MODE_MASK
+
+; 1 - first chance TRUE
+; eax - PreviousMode
+; ebp - trap frame addr
+; 0 - Null exception frame
+; ecx - exception record addr
+
+; dispatchexception as appropriate
+ stdCall _KiDispatchException, <ecx, 0, ebp, eax, 1>
+
+ mov esp, ebp ; (esp) -> trap frame
+
+ ENDM
+
+ page , 132
+ subttl "dispatch exception"
+
+;++
+;
+; CommonDispatchException
+;
+; Routine Description:
+;
+; This routine allocates exception record on stack, sets up exception
+; record using specified parameters and finally sets up arguments
+; and calls _KiDispatchException.
+;
+; NOTE:
+;
+; The purpose of this routine is to save code space. Use this routine
+; only if:
+; 1. ExceptionRecord is NULL
+; 2. ExceptionFlags is 0
+; 3. Number of parameters is less or equal than 3.
+;
+; Otherwise, you should use DISPATCH_EXCEPTION macro to set up your special
+; exception record.
+;
+; Arguments:
+;
+; (eax) = ExcepCode - Exception code to put into exception record
+; (ebx) = ExceptAddress - Addr of instruction which the hardware exception occurs
+; (ecx) = NumParms - Number of additional parameters
+; (edx) = Parameter1
+; (esi) = Parameter2
+; (edi) = Parameter3
+;
+; Return Value:
+;
+; None.
+;
+;--
+CommonDispatchException0Args:
+ xor ecx, ecx ; zero arguments
+ call CommonDispatchException
+
+CommonDispatchException1Arg0d:
+ xor edx, edx ; zero edx
+CommonDispatchException1Arg:
+ mov ecx, 1 ; one argument
+ call CommonDispatchException ; there is no return
+
+CommonDispatchException2Args0d:
+ xor edx, edx ; zero edx
+CommonDispatchException2Args:
+ mov ecx, 2 ; two arguments
+ call CommonDispatchException ; there is no return
+
+ public CommonDispatchException
+align dword
+CommonDispatchException proc
+cPublicFpo 0, ExceptionRecordLength / 4
+;
+; Set up exception record for raising exception
+;
+
+ sub esp, ExceptionRecordLength
+ ; allocate exception record
+ mov dword ptr [esp]+ErExceptionCode, eax
+ ; set up exception code
+ xor eax, eax
+ mov dword ptr [esp]+ErExceptionFlags, eax
+ ; set exception flags
+ mov dword ptr [esp]+ErExceptionRecord, eax
+ ; set associated exception record
+ mov dword ptr [esp]+ErExceptionAddress, ebx
+ mov dword ptr [esp]+ErNumberParameters, ecx
+ ; set number of parameters
+ cmp ecx, 0
+ je short de00
+
+ lea ebx, [esp + ErExceptionInformation]
+ mov [ebx], edx
+ mov [ebx+4], esi
+ mov [ebx+8], edi
+de00:
+;
+; set up arguments and call _KiDispatchException
+;
+
+ mov ecx, esp ; (ecx)->exception record
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jz short de10
+
+ mov eax,0FFFFh
+ jmp short de20
+
+de10: mov eax,[ebp]+TsSegCs
+de20: and eax,MODE_MASK
+
+; 1 - first chance TRUE
+; eax - PreviousMode
+; ebp - trap frame addr
+; 0 - Null exception frame
+; ecx - exception record addr
+
+ stdCall _KiDispatchException,<ecx, 0, ebp, eax, 1>
+
+ mov esp, ebp ; (esp) -> trap frame
+ jmp _KiExceptionExit
+
+CommonDispatchException endp
+
+ page , 132
+ subttl "Macro to verify base trap frame"
+
+;++
+;
+; Macro Description:
+;
+; This macro verifies the base trap frame is intact.
+;
+; It is possible while returing to UserMode that we take an exception.
+; Any expection which may block, such as not-present, needs to verify
+; that the base trap frame is not partially dismantled.
+;
+; Arguments:
+; The macro MUST be used directly after ENTER_TRAP macro.
+; assumses all sorts of stuff about ESP!
+;
+; Return Value:
+;
+; If the base frame was incomplete it is totally restored and the
+; return EIP of the current frame is (virtually) backed up to the
+; begining of the exit_all - the effect is that the base frame
+; will be completely exited again. (ie, the exit_all of the base
+; frame is atomic, if it's interrupted we restore it and do it over).
+;
+; None.
+;
+;--
+
+VERIFY_BASE_TRAP_FRAME macro
+ local vbfdone
+
+ mov eax, esp
+ sub eax, PCR[PcInitialStack] ; Bias out this stack
+ add eax, KTRAP_FRAME_LENGTH ; adjust for base frame
+ je short vbfdone ; if eq, then this is the base frame
+
+ cmp eax, -TsEflags ; second frame is only this big
+ jc short vbfdone ; is stack deeper then 2 frames?
+ ; yes, then done
+ ;
+ ; Stack usage is not exactly one frame, and it's not large enough
+ ; to be two complete frames; therefore, we may have a partial base
+ ; frame. (unless it's a kernel thread)
+ ;
+ ; See if this is a kernel thread - Kernel threads don't have a base
+ ; frame (and therefore don't need correcting).
+ ;
+
+ mov eax, PCR[PcTeb]
+ or eax, eax ; Any Teb?
+ jle short vbfdone ; Br if zero or kernel thread address
+
+ call KiRestoreBaseFrame
+
+ align 4
+vbfdone:
+ ENDM
+
+;++ KiRestoreBaseFrame
+;
+; Routine Description:
+;
+; Only to be used from VERIFY_BASE_TRAP_FRAME macro.
+; Makes lots of assumptions about esp & trap frames
+;
+; Arguments:
+;
+; Stack:
+; +-------------------------+
+; | |
+; | |
+; | Npx save area |
+; | |
+; | |
+; +-------------------------+
+; | (possible mvdm regs) |
+; +-------------------------+ <- fs:PcInitialStack
+; | |
+; | Partial base trap frame |
+; | |
+; | ------------+
+; +------------/ | <- Esp @ time of current frame. Location
+; | | where base trap frame is incomplete
+; | Completed 'current' |
+; | trap frame |
+; | |
+; | |
+; | |
+; | |
+; +-------------------------+ <- EBP
+; | return address (dword) |
+; +-------------------------+ <- current ESP
+; | |
+; | |
+;
+; Return:
+;
+; Stack:
+; +-------------------------+
+; | |
+; | |
+; | Npx save area |
+; | |
+; | |
+; +-------------------------+
+; | (possible mvdm regs) |
+; +-------------------------+ <- fs:PcInitialStack
+; | |
+; | Base trap frame |
+; | |
+; | |
+; | |
+; | |
+; | |
+; +-------------------------+ <- return esp & ebp
+; | |
+; | Current trap frame |
+; | | EIP set to begining of
+; | | exit_all code
+; | |
+; | |
+; | |
+; +-------------------------+ <- EBP, ESP
+; | |
+; | |
+;
+;--
+
+KiRestoreBaseFrame proc
+ pop ebx ; Get return address
+IF DBG
+ mov eax, [esp].TsEip ; EIP of trap
+ ;
+ ; This code is to handle a very specific problem of a not-present
+ ; fault during an exit_all. If it's not this problem then stop.
+ ;
+ cmp word ptr [eax], POP_GS
+ je short @f
+ cmp byte ptr [eax], POP_ES
+ je short @f
+ cmp byte ptr [eax], POP_DS
+ je short @f
+ cmp word ptr [eax], POP_FS
+ je short @f
+ cmp byte ptr [eax], IRET_OP
+ je short @f
+ int 3
+@@:
+ENDIF
+ ;
+ ; Move current trap frame out of the way to make space for
+ ; a full base trap frame
+ ;
+ mov edi, PCR[PcInitialStack]
+ sub edi, KTRAP_FRAME_LENGTH + TsEFlags + 4 ; (edi) = bottom of target
+ mov esi, esp ; (esi) = bottom of source
+ mov esp, edi ; make space before copying the data
+ mov ebp, edi ; update location of our trap frame
+ push ebx ; put return address back on stack
+
+ mov ecx, (TsEFlags+4)/4 ; # of dword to move
+ rep movsd ; Move current trap frame
+
+ ;
+ ; Part of the base frame was destoryed when the current frame was
+ ; originally pushed. Now that the current frame has been moved out of
+ ; the way restore the base frame. We know that any missing data from
+ ; the base frame was reloaded into it's corrisponding registers which
+ ; were then pushed into the current frame. So we can restore the missing
+ ; data from the current frame.
+ ;
+ mov ecx, esi ; Location of esp at time of fault
+ mov edi, PCR[PcInitialStack]
+ sub edi, KTRAP_FRAME_LENGTH ; (edi) = base trap frame
+ mov ebx, edi
+
+ sub ecx, edi ; (ecx) = # of bytes which were
+ ; removed from base frame before
+ ; trap occured
+IF DBG
+ test ecx, 3
+ jz short @f ; assume dword alignments only
+ int 3
+@@:
+ENDIF
+ mov esi, ebp ; (esi) = current frame
+ shr ecx, 2 ; copy in dwords
+ rep movsd
+ ;
+ ; The base frame is restored. Instead of backing EIP up to the
+ ; start of the interrupted EXIT_ALL, we simply move the EIP to a
+ ; well known EXIT_ALL. However, this causes a couple of problems
+ ; since this exit_all retores every register whereas the original
+ ; one may not. So:
+ ;
+ ; - When exiting from a system call, eax is normally returned by
+ ; simply not restoring it. We 'know' that the current trap frame's
+ ; EAXs is always the correct one to return. (We know this because
+ ; exit_all always restores eax (if it's going to) before any other
+ ; instruction which may cause a fault).
+ ;
+ ; - Not all enter's push the PreviousPreviousMode. Since this is
+ ; the base trap frame we know that this must be UserMode.
+ ;
+ mov eax, [ebp].TsEax ; make sure correct
+ mov [ebx].TsEax, eax ; eax is in base frame
+ mov byte ptr [ebx].TsPreviousPreviousMode, 1 ; UserMode
+
+ mov [ebp].TsEbp, ebx
+ mov [ebp].TsEip, offset _KiServiceExit2 ; ExitAll which
+
+ ; restores everything
+ ;
+ ; Since we backed up Eip we need to reset some of the kernel selector
+ ; values in case they were already restored by the attempted base frame pop
+ ;
+ mov dword ptr [ebp].TsSegDs, KGDT_R3_DATA OR RPL_MASK
+ mov dword ptr [ebp].TsSegEs, KGDT_R3_DATA OR RPL_MASK
+ mov dword ptr [ebp].TsSegFs, KGDT_R0_PCR
+
+ ;
+ ; The backed up EIP is before interrupts were disabled. Re-enable
+ ; interrupts for the current trap frame
+ ;
+ or [ebp].TsEFlags, EFLAGS_INTERRUPT_MASK
+
+ ret
+
+KiRestoreBaseFrame endp
+
+ page ,132
+ subttl "Divide error processing"
+;++
+;
+; Routine Description:
+;
+; Handle divide error fault.
+;
+; The divide error fault occurs if a DIV or IDIV instructions is
+; executed with a divisor of 0, or if the quotient is too big to
+; fit in the result operand.
+;
+; An INTEGER DIVIDED BY ZERO exception will be raised for the fault.
+; If the fault occurs in kernel mode, the system will be terminated.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction.
+; No error code is provided with the divide error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+ ENTER_DR_ASSIST kit0_a, kit0_t,NoAbiosAssist
+align dword
+ public _KiTrap00
+_KiTrap00 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit0_a, kit0_t
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt0040 ; trap occured in V86 mode
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz short Kt0000
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne Kt0020
+;
+; Set up exception record for raising Integer_Divided_by_zero exception
+; and call _KiDispatchException
+;
+
+Kt0000:
+
+if DBG
+ test [ebp]+TsEFlags, EFLAGS_INTERRUPT_MASK ; faulted with
+ jnz short @f ; interrupts disabled?
+
+ xor eax, eax
+ mov esi, [ebp]+TsEip ; [esi] = faulting instruction
+ stdCall _KeBugCheckEx,<IRQL_NOT_LESS_OR_EQUAL,eax,-1,eax,esi>
+@@:
+endif
+
+ sti
+
+
+;
+; Flat mode
+;
+; The intel processor raises a divide by zero expcetion on DIV instruction
+; which overflows. To be compatible we other processors we want to
+; return overflows as such and not as divide by zero's. The operand
+; on the div instruction is tested to see if it's zero or not.
+;
+ stdCall _Ki386CheckDivideByZeroTrap,<ebp>
+ mov ebx, [ebp]+TsEip ; (ebx)-> faulting instruction
+ jmp CommonDispatchException0Args ; Won't return
+
+Kt0010:
+;
+; 16:16 mode
+;
+ sti
+ mov ebx, [ebp]+TsEip ; (ebx)-> faulting instruction
+ mov eax, STATUS_INTEGER_DIVIDE_BY_ZERO
+ jmp CommonDispatchException0Args ; never return
+
+Kt0020:
+; Check to see if this process is a vdm
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt0010
+
+Kt0040:
+ stdCall _Ki386VdmReflectException_A, <0>
+
+ or al,al
+ jz short Kt0010 ; couldn't reflect, gen exception
+ jmp _KiExceptionExit
+
+_KiTrap00 endp
+
+
+ page ,132
+ subttl "Debug Exception"
+;++
+;
+; Routine Description:
+;
+; Handle debug exception.
+;
+; The processor triggers this exception for any of the following
+; conditions:
+;
+; 1. Instruction breakpoint fault.
+; 2. Data address breakpoint trap.
+; 3. General detect fault.
+; 4. Single-step trap.
+; 5. Task-switch breadkpoint trap.
+;
+;
+; Arguments:
+;
+; At entry, the values of saved CS and EIP depend on whether the
+; exception is a fault or a trap.
+; No error code is provided with the divide error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+ ENTER_DR_ASSIST kit1_a, kit1_t, NoAbiosAssist
+align dword
+ public _KiTrap01
+_KiTrap01 proc
+
+; Set up machine state frame for displaying
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit1_a, kit1_t
+
+;
+; If caller is user mode, we want interrupts back on.
+; . all relevent state has already been saved
+; . user mode code always runs with ints on
+;
+; If caller is kernel mode, we want them off!
+; . some state still in registers, must prevent races
+; . kernel mode code can run with ints off
+;
+;
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz kit01_30 ; fault occured in V86 mode => Usermode
+
+ test word ptr [ebp]+TsSegCs,MODE_MASK
+ jz kit01_10
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne kit01_30
+kit01_05:
+ sti
+kit01_10:
+
+;
+; Set up exception record for raising single step exception
+; and call _KiDispatchException
+;
+
+kit01_20:
+ and dword ptr [ebp]+TsEflags, not EFLAGS_TF_BIT
+ mov ebx, [ebp]+TsEip ; (ebx)-> faulting instruction
+ mov eax, STATUS_SINGLE_STEP
+ jmp CommonDispatchException0Args ; Never return
+
+kit01_30:
+
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz kit01_05
+
+ stdCall _Ki386VdmReflectException_A, <01h>
+ test ax,0FFFFh
+ jz Kit01_20
+
+ jmp _KiExceptionExit
+
+_KiTrap01 endp
+
+ page ,132
+ subttl "Nonmaskable Interrupt"
+;++
+;
+; Routine Description:
+;
+; Handle Nonmaskable interrupt.
+;
+; An NMI is typically used to signal serious system conditions
+; such as bus time-out, memory parity error, and so on.
+;
+; Upon detection of the NMI, the system will be terminated, ie a
+; bugcheck will be raised, no matter what previous mode is.
+;
+; Arguments:
+;
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+; ENTER_DR_ASSIST kit2_a, kit2_t, NoAbiosAssist
+align dword
+ public _KiTrap02
+_KiTrap02 proc
+.FPO (1, 0, 0, 0, 0, 2)
+ cli
+;
+; Update the TSS pointer in the PCR to point to the NMI TSS
+; (which is what we're running on, or else we wouldn't be here)
+;
+ push dword ptr PCR[PcTss]
+
+ mov eax, PCR[PcGdt]
+ mov ch, [eax+KGDT_NMI_TSS+KgdtBaseHi]
+ mov cl, [eax+KGDT_NMI_TSS+KgdtBaseMid]
+ shl ecx, 16
+ mov cx, [eax+KGDT_NMI_TSS+KgdtBaseLow]
+ mov PCR[PcTss], ecx
+
+;
+; Clear Nested Task bit in EFLAGS
+;
+ pushfd
+ and [esp], not 04000h
+ popfd
+
+;
+; Clear the busy bit in the TSS selector
+;
+ mov ecx, PCR[PcGdt]
+ lea eax, [ecx] + KGDT_NMI_TSS
+ mov byte ptr [eax+5], 089h ; 32bit, dpl=0, present, TSS32, not busy
+
+;
+; Let the HAL have a crack at it before we crash
+;
+ stdCall _HalHandleNMI,<0>
+
+ mov eax, offset FLAT:_ZwUnmapViewOfSection@8
+ sub eax, esp
+ cmp eax, 0a00h
+ jnc short @f
+
+ ; not on a real stack, crash
+ stdCall _KeBugCheckEx,<UNEXPECTED_KERNEL_MODE_TRAP,2,0,0,0>
+@@:
+
+;
+; We're back, therefore the Hal has dealt with the NMI. (Crashing
+; is done in the Hal for this special case.)
+;
+
+ pop dword ptr PCR[PcTss] ; restore PcTss
+
+ mov ecx, PCR[PcGdt]
+ lea eax, [ecx] + KGDT_TSS
+ mov byte ptr [eax+5], 08bh ; 32bit, dpl=0, present, TSS32, *busy*
+
+ pushfd ; Set Nested Task bit in EFLAGS
+ or [esp], 04000h ; so iretd will do a tast switch
+ popfd
+
+ iretd ; Return from NMI
+ jmp short _KiTrap02 ; in case we NMI again
+
+_KiTrap02 endp
+
+ page ,132
+ subttl "DebugService Breakpoint"
+;++
+;
+; Routine Description:
+;
+; Handle INT 2d DebugService
+;
+; The trap is caused by an INT 2d. This is used instead of a
+; BREAKPOINT exception so that parameters can be passed for the
+; requested debug service. A BREAKPOINT instruction is assumed
+; to be right after the INT 2d - this allows this code to share code
+; with the breakpoint handler.
+;
+; Arguments:
+; eax - ServiceClass - which call is to be performed
+; ecx - Arg1 - generic first argument
+; edx - Arg2 - generic second argument
+;
+;--
+
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kids_a, kids_t, NoAbiosAssist
+align dword
+ public _KiDebugService
+_KiDebugService proc
+ push 0 ; push dummy error code
+ ENTER_TRAP kids_a, kids_t
+; sti ; *NEVER sti here*
+
+ inc dword ptr [ebp]+TsEip
+ mov eax, [ebp]+TsEax ; ServiceClass
+ mov ecx, [ebp]+TsEcx ; Arg1 (already loaded)
+ mov edx, [ebp]+TsEdx ; Arg2 (already loaded)
+ jmp KiTrap03DebugService
+
+_KiDebugService endp
+
+ page ,132
+ subttl "Single Byte INT3 Breakpoin"
+;++
+;
+; Routine Description:
+;
+; Handle INT 3 breakpoint.
+;
+; The trap is caused by a single byte INT 3 instruction. A
+; BREAKPOINT exception with additional parameter indicating
+; READ access is raised for this trap if previous mode is user.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the instruction immediately
+; following the INT 3 instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit3_a, kit3_t, NoAbiosAssist
+align dword
+ public _KiTrap03
+_KiTrap03 proc
+ push 0 ; push dummy error code
+ ENTER_TRAP kit3_a, kit3_t
+
+ lock inc ds:_KiHardwareTrigger ; trip hardware analyzer
+
+ mov eax, BREAKPOINT_BREAK
+
+KiTrap03DebugService:
+;
+; If caller is user mode, we want interrupts back on.
+; . all relevent state has already been saved
+; . user mode code always runs with ints on
+;
+; If caller is kernel mode, we want them off!
+; . some state still in registers, must prevent races
+; . kernel mode code can run with ints off
+;
+;
+; Arguments:
+; eax - ServiceClass - which call is to be performed
+; ecx - Arg1 - generic first argument
+; edx - Arg2 - generic second argument
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz kit03_30 ; fault occured in V86 mode => Usermode
+
+ test word ptr [ebp]+TsSegCs,MODE_MASK
+ jz kit03_10
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne kit03_30
+
+kit03_05:
+ sti
+kit03_10:
+
+
+;
+; Set up exception record and arguments for raising breakpoint exception
+;
+
+ mov esi, ecx ; ExceptionInfo 2
+ mov edi, edx ; ExceptionInfo 3
+ mov edx, eax ; ExceptionInfo 1
+
+ mov ebx, [ebp]+TsEip
+ dec ebx ; (ebx)-> int3 instruction
+ mov ecx, 3
+ mov eax, STATUS_BREAKPOINT
+ call CommonDispatchException ; Never return
+
+kit03_30:
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz kit03_05
+
+
+ stdCall _Ki386VdmReflectException_A, <03h>
+ test ax,0FFFFh
+ jz Kit03_10
+
+ jmp _KiExceptionExit
+
+_KiTrap03 endp
+
+ page ,132
+ subttl "Integer Overflow"
+;++
+;
+; Routine Description:
+;
+; Handle INTO overflow.
+;
+; The trap occurs when the processor encounters an INTO instruction
+; and the OF flag is set.
+;
+; An INTEGER_OVERFLOW exception will be raised for this fault.
+;
+; N.B. i386 will not generate fault if only OF flag is set.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the instruction immediately
+; following the INTO instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit4_a, kit4_t, NoAbiosAssist
+align dword
+ public _KiTrap04
+_KiTrap04 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit4_a, kit4_t
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz short Kt0430 ; in a vdm, reflect to vdm
+
+ test byte ptr [ebp]+TsSegCs,MODE_MASK
+ jz short Kt0410 ; in kernel mode, gen exception
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne short Kt0420 ; maybe in a vdm
+
+; Set up exception record and arguments for raising exception
+
+Kt0410: sti
+ mov ebx, [ebp]+TsEip ; (ebx)-> instr. after INTO
+ dec ebx ; (ebx)-> INTO
+ mov eax, STATUS_INTEGER_OVERFLOW
+ jmp CommonDispatchException0Args ; Never return
+
+Kt0430:
+ stdCall _Ki386VdmReflectException_A, <04h>
+ test al,0fh
+ jz Kt0410 ; couldn't reflect, gen exception
+ jmp _KiExceptionExit
+
+Kt0420:
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt0410
+ jmp Kt0430
+
+_KiTrap04 endp
+
+ page ,132
+ subttl "Bound Check fault"
+;++
+;
+; Routine Description:
+;
+; Handle bound check fault.
+;
+; The bound check fault occurs if a BOUND instruction finds that
+; the tested value is outside the specified range.
+;
+; For bound check fault, an ARRAY BOUND EXCEEDED exception will be
+; raised.
+; For kernel mode exception, it causes system to be terminated.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting BOUND
+; instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+ ENTER_DR_ASSIST kit5_a, kit5_t, NoAbiosAssist
+align dword
+ public _KiTrap05
+_KiTrap05 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit5_a, kit5_t
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt0530 ; fault in V86 mode
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jnz short Kt0500 ; if nz, previous mode = user
+ mov eax, EXCEPTION_BOUND_CHECK ; (eax) = exception type
+ jmp _KiSystemFatalException ; go terminate the system
+
+
+kt0500: cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne short Kt0520 ; maybe in a vdm
+;
+; set exception record and arguments and call _KiDispatchException
+;
+Kt0510: sti
+ mov ebx, [ebp]+TsEip ; (ebx)->BOUND instruction
+ mov eax, STATUS_ARRAY_BOUNDS_EXCEEDED
+ jmp CommonDispatchException0Args ; Won't return
+
+Kt0520:
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt0510
+
+Kt0530:
+
+ stdCall _Ki386VdmReflectException_A, <05h>
+ test al,0fh
+ jz Kt0510 ; couldn't reflect, gen exception
+ jmp _KiExceptionExit
+
+_KiTrap05 endp
+
+ page ,132
+ subttl "Invalid OP code"
+;++
+;
+; Routine Description:
+;
+; Handle invalid op code fault.
+;
+; The invalid opcode fault occurs if CS:EIP point to a bit pattern which
+; is not recognized as an instruction by the 386. This may happen if:
+;
+; 1. the opcode is not a valid 80386 instruction
+; 2. a register operand is specified for an instruction which requires
+; a memory operand
+; 3. the LOCK prefix is used on an instruction that cannot be locked
+;
+; If fault occurs in USER mode:
+; an Illegal_Instruction exception will be raised
+; if fault occurs in KERNEL mode:
+; system will be terminated.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the first byte of the invalid
+; instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:FLAT, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit6_a, kit6_t, NoAbiosAssist,, kit6_v
+align dword
+ public _KiTrap06
+_KiTrap06 proc
+
+
+; sudeepb 08-Dec-1992 KiTrap06 is performance critical for VDMs,
+; while it hardly ever gets executed in native mode. So this whole
+; code is tuned for VDMs.
+
+ test dword ptr [esp]+8h,EFLAGS_V86_MASK
+ jz Kt060i
+
+if FAST_BOP
+
+ FAST_V86_TRAP_6
+endif
+
+Kt6SlowBop:
+ push 0 ; push dummy error code
+ ENTER_TRAPV86 kit6_a, kit6_v
+
+ ; Raise Irql to APC level before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ call VdmDispatchBop
+ test al,0fh
+ jnz short Kt061i
+
+ stdCall _Ki386VdmReflectException,<6>
+ test al,0fh
+ jnz Kt061i
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp Kt0635
+Kt061i:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ cli
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jz Kt062i
+
+ EXIT_TRAPV86
+ ;
+ ; EXIT_TRAPv86 does not exit if a user mode apc has switched
+ ; the context from V86 mode to flat mode (VDM monitor context)
+ ;
+
+Kt062i:
+ jmp _KiExceptionExit
+
+Kt060i:
+ push 0 ; Push dummy error code
+ ENTER_TRAP kit6_a, kit6_t
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz short Kt0635 ; if z, kernel mode - go dispatch exception
+
+;
+; UserMode. Did the fault happen in a vdm running in protected mode?
+;
+
+ cmp word ptr [ebp]+TsSegCs, KGDT_R3_CODE OR RPL_MASK
+ jz short kt0605
+
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jnz Kt0650
+
+;
+; Invalid Opcode exception could be either INVALID_LOCK_SEQUENCE or
+; ILLEGAL_INSTRUCTION.
+;
+
+kt0605: sti
+ mov eax, [ebp]+TsSegCs
+ push ds
+ mov ds, ax
+ mov esi, [ebp]+TsEip ; (ds:esi) -> address of faulting instruction
+ mov ecx, MAX_INSTRUCTION_PREFIX_LENGTH
+Kt0610:
+ lods byte ptr [esi] ; (al)= instruction byte
+ cmp al, MI_LOCK_PREFIX ; Is it a lock prefix?
+ je short Kt0640 ; Yes, raise Invalid_lock exception
+ loop short Kt0610 ; keep on looping
+
+;
+; Set up exception record for raising Illegal instruction exception
+;
+
+Kt0630: pop ds
+Kt0635: mov ebx, [ebp]+TsEip ; (ebx)-> invalid instruction
+ mov eax, STATUS_ILLEGAL_INSTRUCTION
+ jmp CommonDispatchException0Args ; Won't return
+
+;
+; Set up exception record for raising Invalid lock sequence exception
+;
+
+Kt0640: pop ds
+ mov ebx, [ebp]+TsEip ; (ebx)-> invalid instruction
+ mov eax, STATUS_INVALID_LOCK_SEQUENCE
+ jmp CommonDispatchException0Args ; Won't return
+
+Kt0650:
+
+ ; Raise Irql to APC level before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; SaveOldIrql
+ sti
+ call VdmDispatchBop
+ test al,0fh
+ jnz short Kt0660
+
+ stdCall _Ki386VdmReflectException,<6>
+ test al,0fh
+ jnz Kt0660
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp short Kt0635
+
+Kt0660:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp _KiExceptionExit
+
+_KiTrap06 endp
+
+ page ,132
+ subttl "Coprocessor Not Avalaible"
+;++
+;
+; Routine Description:
+;
+; Handle Coprocessor not avaliable exception.
+;
+; If we are REALLY emulating the 80387, the trap 07 vector is edited
+; to point directly at the emulator's entry point. So this code is
+; only hit when an 80387 DOES exist.
+;
+; The current threads coprocessor state is loaded into the
+; coprocessor. If the coprocessor has a different threads state
+; in it (UP only) it is first saved away. The thread is then continued.
+; Note: the threads state may contian the TS bit - In this case the
+; code loops back to the top of the Trap07 handler. (which is where
+; we would end up if we let the thread return to user code anyway).
+;
+; If the threads NPX context is in the coprocessor and we hit a Trap07
+; there is an NPX error which needs to be processed. If the trap was
+; from usermode the error is dispatched. If the trap was from kernelmode
+; the error is remembered, but we clear CR0 so the kernel code can
+; continue. We can do this because the kernel mode code will restore
+; CR0 (and set TS) to signal a delayed error for this thread.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the first byte of the faulting
+; instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit7_a, kit7_t, NoAbiosAssist
+align dword
+ public _KiTrap07
+_KiTrap07 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit7_a, kit7_t
+Kt0700:
+ mov eax, PCR[PcPrcbData+PbCurrentThread]
+ mov ecx, PCR[PcInitialStack] ; (ecx) -> top of kernel stack
+ cli ; don't context switch
+ test dword ptr [ecx].FpCr0NpxState,CR0_EM
+ jnz Kt07140
+
+Kt0701: cmp byte ptr [eax].ThNpxState, NPX_STATE_LOADED
+ je Kt0715
+
+;
+; Trap occured and this threads NPX state is not loaded. Load it now
+; and resume the application. If someone elses state is in the coprocessor
+; (uniprocessor implementation only) then save it first.
+;
+
+ mov ebx, cr0
+ and ebx, NOT (CR0_MP+CR0_TS+CR0_EM)
+ mov cr0, ebx ; allow frstor (& fnsave) to work
+
+ifdef NT_UP
+Kt0702:
+ mov edx, PCR[PcPrcbData+PbNpxThread] ; Owner of NPX state
+ or edx, edx ; NULL?
+ jz Kt0704 ; Yes - skip save
+
+;
+; Due to an hardware errata we need to know that the coprocessor
+; doesn't generate an error condition once interrupts are disabled and
+; trying to perform an fnsave which could wait for the error condition
+; to be handled.
+;
+; The fix for this errata is that we "know" that the coprocessor is
+; being used by a different thread then the one which may have caused
+; the error condition. The round trip time to swap to a new thread
+; is longer then ANY floating point instruction. We therefore know
+; that any possible coprocessor error has already occured and been
+; handled.
+;
+ mov esi,[edx].ThInitialStack
+ sub esi, NPX_FRAME_LENGTH ; Space for NPX_FRAME
+
+ fnsave [esi] ; Save threads coprocessor state
+
+ mov byte ptr [edx].ThNpxState, NPX_STATE_NOT_LOADED
+Kt0704:
+endif
+
+;
+; Load current threads coprocessor state into the coprocessor
+;
+; (eax) - CurrentThread
+; (ecx) - CurrentThreads NPX save area
+; (ebx) - CR0
+; (ebp) - trap frame
+; Interrupts disabled
+;
+
+ mov byte ptr [eax].ThNpxState, NPX_STATE_LOADED
+ mov PCR[PcPrcbData+PbNpxThread], eax ; owner of coprocessors state
+
+;
+; frstor might generate a NPX execption if there's an error image being
+; loaded. The handler will simply set the TS bit for this context an iret.
+;
+
+Kt0704a:
+ frstor [ecx] ; reload NPX context
+ sti ; Allow interrupts & context switches
+ nop ; sti needs one cycle
+
+ cmp dword ptr [ecx].FpCr0NpxState, 0
+ jz _KiExceptionExit ; nothing to set, skip CR0 reload
+
+;
+; Note: we have to get the CR0 value again to insure that we have the
+; correct state for TS. We may have context switched since
+; the last move from CR0, and our npx state may have been moved off
+; of the npx.
+;
+ cli
+if DBG
+ test dword ptr [ecx].FpCr0NpxState, NOT (CR0_MP+CR0_EM+CR0_TS)
+ jnz short Kt07dbg1
+endif
+ mov ebx,CR0
+ or ebx, [ecx].FpCr0NpxState
+ mov cr0, ebx ; restore threads CR0 NPX state
+ sti
+ test ebx, CR0_TS ; Setting TS? (delayed error)
+ jz _KiExceptionExit ; No - continue
+
+ jmp Kt0700 ; Dispatch delayed exception
+if DBG
+Kt07dbg1: int 3
+Kt07dbg2: int 3
+Kt07dbg3: int 3
+ sti
+ jmp short $-2
+endif
+
+Kt0705:
+;
+; A Trap07 or Trap10 has occured from a ring 0 ESCAPE instruction. This
+; may occur when trying to load the coprocessors state. These
+; code paths rely on Cr0NpxState to signal a delayed error (not CR0) - we
+; set CR0_TS in Cr0NpxState to get a delayed error, and make sure CR0 CR0_TS
+; is not set so the R0 ESC instruction(s) can complete.
+;
+; (ecx) - CurrentThreads NPX save area
+; (ebp) - trap frame
+; Interrupts disabled
+;
+
+if DBG
+ mov eax, cr0 ; Did we fault because some bit in CR0
+ test eax, (CR0_TS+CR0_MP+CR0_EM)
+ jnz short Kt07dbg3
+endif
+
+ or dword ptr [ecx].FpCr0NpxState, CR0_TS ; signal a delayed error
+ mov ecx, [ebp]+TsEip
+
+ cmp ecx, Kt0704a ; Is this fault on reload a thread's context?
+ jne short Kt0716 ; No, dispatch exception
+
+ add dword ptr [ebp]+TsEip, 3 ; Skip frstor ecx instruction
+ jmp _KiExceptionExit
+
+Kt0710:
+ mov eax, PCR[PcPrcbData+PbCurrentThread]
+ mov ecx, PCR[PcInitialStack] ; (ecx) -> top of kernel stack
+
+Kt0715:
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt07130 ; v86 mode
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previousMode=USER?
+ jz Kt0705 ; if z, previousmode=SYSTEM
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ jne Kt07110
+
+;
+; We are about to dispatch a floating point exception to user mode.
+; We need to check to see if the user's NPX instruction is suppose to
+; cause an exception or not.
+;
+; (ecx) - CurrentThreads NPX save area
+;
+
+Kt0716: stdCall _Ki386CheckDelayedNpxTrap,<ebp,ecx>
+ or al, al
+ jnz _KiExceptionExit ; Already handled
+
+ mov eax, PCR[PcPrcbData+PbCurrentThread]
+ mov ecx, PCR[PcInitialStack] ; (ecx) -> top of kernel stack
+
+Kt0720:
+;
+; Some type of coprocessor exception has occured for the current thread.
+;
+; (eax) - CurrentThread
+; (ecx) - CurrentThreads NPX save area
+; (ebp) - TrapFrame
+; Interrupts disabled
+;
+ mov ebx, cr0
+ and ebx, NOT (CR0_MP+CR0_EM+CR0_TS)
+ mov cr0, ebx ; Clear MP+TS+EM to do fnsave & fwait
+
+;
+; Save the faulting state so we can inspect the cause of the floating
+; point fault
+;
+ fnsave [ecx]
+ fwait ; in case fnsave hasn't finished yet
+
+if DBG
+ test dword ptr [ecx].FpCr0NpxState, NOT (CR0_MP+CR0_EM+CR0_TS)
+ jnz Kt07dbg2
+endif
+ or ebx, CR0_TS
+ or ebx,[ecx]+FpCr0NpxState ; restore this threads CR0 NPX state
+ mov cr0, ebx ; set TS so next ESC access causes trap
+
+;
+; Clear TS bit in Cr0NpxFlags in case it was set to trigger this trap.
+;
+ and dword ptr [ecx].FpCr0NpxState, NOT CR0_TS
+
+;
+; The state is no longer in the coprocessor. Clear ThNpxState and
+; re-enable interrupts to allow context switching.
+;
+ mov byte ptr [eax].ThNpxState, NPX_STATE_NOT_LOADED
+ mov dword ptr PCR[PcPrcbData+PbNpxThread], 0 ; No state in coprocessor
+ sti
+
+;
+; According to the floating error priority, we test what is the cause of
+; the NPX error and raise an appropriate exception.
+;
+
+ mov ebx, [ecx] + FpErrorOffset
+ movzx eax, word ptr [ecx] + FpControlWord
+ and eax, FSW_INVALID_OPERATION + FSW_DENORMAL + FSW_ZERO_DIVIDE + FSW_OVERFLOW + FSW_UNDERFLOW + FSW_PRECISION
+ not eax ; ax = mask of enabled exceptions
+ and eax, [ecx] + FpStatusWord
+ test eax, FSW_INVALID_OPERATION ; Is it an invalid op exception?
+ jz Kt0740 ; if z, no, go Kt0740
+ test eax, FSW_STACK_FAULT ; Is it caused by stack fault?
+ jnz short Kt0730 ; if nz, yes, go Kt0730
+
+;
+; Raise Floating reserved operand exception
+;
+
+ mov eax, STATUS_FLOAT_INVALID_OPERATION
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt0730:
+;
+; Raise Access Violation exception for stack overflow/underflow
+;
+
+ mov esi, [ecx] + FpDataOffset ; (esi) = operand addr
+ mov eax, STATUS_FLOAT_STACK_CHECK
+ jmp CommonDispatchException2Args0d ; Won't return
+
+
+Kt0740:
+
+; Check for floating zero divide exception
+
+ test eax, FSW_ZERO_DIVIDE ; Is it a zero divide error?
+ jz short Kt0750 ; if z, no, go Kt0750
+
+; Raise Floating divided by zero exception
+
+ mov eax, STATUS_FLOAT_DIVIDE_BY_ZERO
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt0750:
+
+; Check for denormal error
+
+ test eax, FSW_DENORMAL ; Is it a denormal error?
+ jz short Kt0760 ; if z, no, go Kt0760
+
+; Raise floating reserved operand exception
+
+ mov eax, STATUS_FLOAT_INVALID_OPERATION
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt0760:
+
+; Check for floating overflow error
+
+ test eax, FSW_OVERFLOW ; Is it an overflow error?
+ jz short Kt0770 ; if z, no, go Kt0770
+
+; Raise floating overflow exception
+
+ mov eax, STATUS_FLOAT_OVERFLOW
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt0770:
+
+; Check for floating underflow error
+
+ test eax, FSW_UNDERFLOW ; Is it a underflow error?
+ jz short Kt0780 ; if z, no, go Kt0780
+
+; Raise floating underflow exception
+
+ mov eax, STATUS_FLOAT_UNDERFLOW
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt0780:
+
+; Check for precision (IEEE inexact) error
+
+ test eax, FSW_PRECISION ; Is it a precision error
+ jz short Kt07100 ; if z, no, go Kt07100
+
+ mov eax, STATUS_FLOAT_INEXACT_RESULT
+ jmp CommonDispatchException1Arg0d ; Won't return
+
+Kt07100:
+
+; If status word does not indicate error, then something is wrong...
+
+ sti
+; stop the system
+ stdCall _KeBugCheck, <TRAP_CAUSE_UNKNOWN>
+
+Kt07110:
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt0720 ; no, dispatch exception
+
+Kt07130:
+; Turn off TS
+ mov ebx,CR0
+ and ebx,NOT CR0_TS
+ mov CR0,ebx
+ and dword ptr [ecx]+FpCr0NpxState,NOT CR0_TS
+
+
+; Reflect the exception to the vdm, the VdmHandler enables interrupts
+
+ ; Raise Irql to APC level before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ stdCall _VdmDispatchIRQ13, <ebp> ; ebp - Trapframe
+ test al,0fh
+ jnz Kt07135
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp Kt0720 ; could not reflect, gen exception
+
+Kt07135:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp _KiExceptionExit
+
+Kt07140:
+
+;
+; Insure that this is not an NPX instruction in the kernel. (If
+; an app, such as C7, sets the EM bit after executing NPX instructions,
+; the fsave in SwapContext will catch an NPX exception
+;
+ cmp [ebp].TsSegCS, word ptr KGDT_R0_CODE
+ je Kt0701
+
+;
+; Check to see if it really is a VDM
+;
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt07100
+
+; A vdm is emulating NPX instructions on a machine with an NPX.
+
+ stdCall _Ki386VdmReflectException_A, <07h>
+ test al,0fh
+ jnz _KiExceptionExit
+
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov eax, STATUS_ACCESS_VIOLATION
+ mov esi, -1
+ jmp CommonDispatchException2Args0d ; Won't return
+
+_KiTrap07 endp
+
+
+ page ,132
+ subttl "Double Fault"
+;++
+;
+; Routine Description:
+;
+; Handle double exception fault.
+;
+; Normally, when the processor detects an exception while trying to
+; invoke the handler for a prior exception, the two exception can be
+; handled serially. If, however, the processor cannot handle them
+; serially, it signals the double-fault exception instead.
+;
+; If double exception is detected, no matter previous mode is USER
+; or kernel, a bugcheck will be raised and the system will be terminated.
+;
+; Arguments:
+;
+; error code, which is always zero, is pushed on stack.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+align dword
+ public _KiTrap08
+_KiTrap08 proc
+.FPO (0, 0, 0, 0, 0, 2)
+
+ cli
+;
+; Update the TSS pointer in the PCR to point to the double-fault TSS
+; (which is what we're running on, or else we wouldn't be here)
+;
+ mov eax, PCR[PcGdt]
+ mov ch, [eax+KGDT_DF_TSS+KgdtBaseHi]
+ mov cl, [eax+KGDT_DF_TSS+KgdtBaseMid]
+ shl ecx, 16
+ mov cx, [eax+KGDT_DF_TSS+KgdtBaseLow]
+ mov PCR[PcTss], ecx
+
+;
+; Clear the busy bit in the TSS selector
+;
+ mov ecx, PCR[PcGdt]
+ lea eax, [ecx] + KGDT_DF_TSS
+ mov byte ptr [eax+5], 089h ; 32bit, dpl=0, present, TSS32, not busy
+
+;
+; Clear Nested Task bit in EFLAGS
+;
+ pushfd
+ and [esp], not 04000h
+ popfd
+
+;
+; The original machine context is in original task's TSS
+;
+@@: stdCall _KeBugCheckEx,<UNEXPECTED_KERNEL_MODE_TRAP,8,0,0,0>
+ jmp short @b ; do not remove - for debugger
+
+_KiTrap08 endp
+
+ page ,132
+ subttl "Coprocessor Segment Overrun"
+;++
+;
+; Routine Description:
+;
+; Handle Coprocessor Segment Overrun exception.
+;
+; This exception only occurs on the 80286 (it's a trap 0d on the 80386),
+; so choke if we get here.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the aborted instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit9_a, kit9_t, NoAbiosAssist
+align dword
+ public _KiTrap09
+_KiTrap09 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit9_a, kit9_t
+ sti
+
+ mov eax, EXCEPTION_NPX_OVERRUN ; (eax) = exception type
+ jmp _KiSystemFatalException ; go terminate the system
+
+_KiTrap09 endp
+
+ page ,132
+ subttl "Invalid TSS exception"
+;++
+;
+; Routine Description:
+;
+; Handle Invalid TSS fault.
+;
+; This exception occurs if a segment exception other than the
+; not-present exception is detected when loading a selector
+; from the TSS.
+;
+; If the exception is caused as a result of the kernel, device
+; drivers, or user incorrectly setting the NT bit in the flags
+; while the back-link selector in the TSS is invalid and the
+; IRET instruction being executed, in this case, this routine
+; will clear the NT bit in the trap frame and restart the iret
+; instruction. For other causes of the fault, the user process
+; will be terminated if previous mode is user and the system
+; will stop if the exception occurs in kernel mode. No exception
+; is raised.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction or
+; the first instruction of the task if the fault occurs as part of
+; a task switch.
+; Error code containing the segment causing the exception is provided.
+;
+; NT386 does not use TSS for context switching. So, the invalid tss
+; fault should NEVER occur. If it does, something is wrong with
+; the kernel. We simply shutdown the system.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kita_a, kita_t, NoAbiosAssist
+align dword
+ public _KiTrap0A
+_KiTrap0A proc
+
+ ENTER_TRAP kita_a, kita_t
+
+ ; We can not enable interrupt here. If we came here because DOS/WOW
+ ; iret with NT bit set, it is possible that vdm will swap the trap frame
+ ; with their monitor context. If this happen before we check the NT bit
+ ; we will bugcheck.
+
+; sti
+
+;
+; If the trap occur in USER mode and is caused by iret instruction with
+; OF bit set, we simply clear the OF bit and restart the iret.
+; Any other causes of Invalid TSS cause system to be shutdown.
+;
+
+ test dword ptr [ebp]+TsEFlags, EFLAGS_V86_MASK
+ jnz short Kt0a10
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz short Kt0a20
+
+Kt0a10:
+ test dword ptr [ebp]+TsEFlags, EFLAGS_OF_BIT
+ sti
+ jz short Kt0a20
+
+ and dword ptr [ebp]+TsEFlags, NOT EFLAGS_OF_BIT
+ jmp _KiExceptionExit ; restart the instruction
+
+Kt0a20:
+ mov eax, EXCEPTION_INVALID_TSS ; (eax) = trap type
+ jmp _KiSystemFatalException ; go terminate the system
+
+_KiTrap0A endp
+
+ page ,132
+ subttl "Segment Not Present"
+;++
+;
+; Routine Description:
+;
+; Handle Segment Not Present fault.
+;
+; This exception occurs when the processor finds the P bit 0
+; when accessing an otherwise valid descriptor that is not to
+; be loaded in SS register.
+;
+; The only place the fault can occur (in kernel mode) is Trap/Exception
+; exit code. Otherwise, this exception causes system to be terminated.
+; NT386 uses flat mode, the segment not present fault in Kernel mode
+; indicates system malfunction.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction or
+; the first instruction of the task if the fault occurs as part of
+; a task switch.
+; Error code containing the segment causing the exception is provided.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kitb_a, kitb_t, NoAbiosAssist
+
+align dword
+ public _KiTrap0B
+_KiTrap0B proc
+
+; Set up machine state frame for displaying
+
+ ENTER_TRAP kitb_a, kitb_t
+
+;
+; Did the trap occur in a VDM?
+;
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz Kt0b30
+
+ cmp word ptr [ebp]+TsSegCs,KGDT_R3_CODE OR RPL_MASK
+ je Kt0b20
+
+Kt0b10:
+
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz short Kt0b20
+
+ ; Raise Irql to APC level before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ stdCall _Ki386VdmSegmentNotPresent
+ test eax, 0ffffh
+ jz short Kt0b15
+
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp _KiExceptionExit
+
+Kt0b15:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+
+Kt0b20: sti
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov esi, [ebp]+TsErrCode
+ and esi, 0FFFFh
+ or esi, RPL_MASK
+ mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException2Args0d ; Won't return
+
+Kt0b30:
+;
+; Check if the exception is caused by pop SegmentRegister.
+; We need to deal with the case that user puts a NP selector in fs, ds, cs
+; or es through kernel debugger. (kernel will trap while popping segment
+; registers in trap exit code.)
+; Note: We assume if the faulted instruction is pop segreg. It MUST be
+; in trap exit code. So there MUST be a valid trap frame for the trap exit.
+;
+
+ mov eax, [ebp]+TsEip ; (eax)->faulted Instruction
+ mov eax, [eax] ; (eax)= opcode of faulted instruction
+ mov edx, [ebp]+TsEbp ; (edx)->previous trap exit trapframe
+
+ add edx, TsSegDs ; [edx] = prev trapframe + TsSegDs
+ cmp al, POP_DS ; Is it pop ds instruction?
+ jz Kt0b90 ; if z, yes, go Kt0b90
+
+ add edx, TsSegEs - TsSegDs ; [edx] = prev trapframe + TsSegEs
+ cmp al, POP_ES ; Is it pop es instruction?
+ jz Kt0b90 ; if z, yes, go Kt0b90
+
+ add edx, TsSegFs - TsSegEs ; [edx] = prev trapframe + TsSegFs
+ cmp ax, POP_FS ; Is it pop fs (2-byte) instruction?
+ jz Kt0b90 ; If z, yes, go Kt0b90
+
+ add edx, TsSegGs - TsSegFs ; [edx] = prev trapframe + TsSegGs
+ cmp ax, POP_GS ; Is it pop gs (2-byte) instruction?
+ jz Kt0b90 ; If z, yes, go Kt0b90
+
+;
+; The exception is not caused by pop instruction. We still need to check
+; if it is caused by iret (to user mode.) Because user may have a NP
+; cs and we will trap at iret in trap exit code.
+;
+
+ sti
+ cmp al, IRET_OP ; Is it an iret instruction?
+ jne Kt0b199 ; if ne, not iret, go bugcheck
+
+ lea edx, [ebp]+TsHardwareEsp ; (edx)->trapped iret frame
+ test dword ptr [edx]+4, RPL_MASK ; Check CS of iret addr
+ ; Does the iret have ring transition?
+ jz Kt0b199 ; if z, it's a real fault
+
+;
+; we trapped at iret while returning back to user mode. We will dispatch
+; the exception back to user program.
+;
+
+ mov ecx, (KTRAP_FRAME_LENGTH - 12) / 4
+ lea edx, [ebp]+TsErrCode
+Kt0b40:
+ mov eax, [edx]
+ mov [edx+12], eax
+ sub edx, 4
+ loop Kt0b40
+
+ add esp, 12 ; adjust esp and ebp
+ add ebp, 12
+ jmp Kt0b10
+
+; mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+; xor edx, edx
+; mov esi, [ebp]+TsErrCode
+; or esi, RPL_MASK
+; and esi, 0FFFFh
+; mov ecx, 2
+; mov eax, STATUS_ACCESS_VIOLATION
+; call CommonDispatchException ; WOn't return
+
+;
+; The faulted instruction is pop seg
+;
+
+Kt0b90:
+ mov dword ptr [edx], 0 ; set the segment reg to 0 such that
+ ; we will trap in user mode.
+ EXIT_ALL NoRestoreSegs,,NoPreviousMode ; RETURN
+
+Kt0b199:
+ mov eax, EXCEPTION_SEGMENT_NOT_PRESENT ; (eax) = exception type
+ jmp _KiSystemFatalException ; terminate the system
+
+_KiTrap0B endp
+
+ page ,132
+ subttl "Stack segment fault"
+;++
+;
+; Routine Description:
+;
+; Handle Stack Segment fault.
+;
+; This exception occurs when the processor detects certain problem
+; with the segment addressed by the SS segment register:
+;
+; 1. A limit violation in the segment addressed by the SS (error
+; code = 0)
+; 2. A limit vioalation in the inner stack during an interlevel
+; call or interrupt (error code = selector for the inner stack)
+; 3. If the descriptor to be loaded into SS has its present bit 0
+; (error code = selector for the not-present segment)
+;
+; The exception should never occurs in kernel mode except when we
+; perform the iret back to user mode.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction or
+; the first instruction of the task if the fault occurs as part of
+; a task switch.
+; Error code (whose value depends on detected condition) is provided.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kitc_a, kitc_t, NoAbiosAssist
+align dword
+ public _KiTrap0C
+_KiTrap0C proc
+
+ ENTER_TRAP kitc_a, kitc_t
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt0c30
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz Kt0c10
+
+ cmp word ptr [ebp]+TsSegCs, KGDT_R3_CODE OR RPL_MASK
+ jne Kt0c20 ; maybe in a vdm
+
+Kt0c00: sti
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov edx, EXCEPT_LIMIT_ACCESS; assume it is limit violation
+ mov esi, [ebp]+TsHardwareEsp; (ecx) = User Stack pointer
+ cmp word ptr [ebp]+TsErrCode, 0 ; Is errorcode = 0?
+ jz short kt0c05 ; if z, yes, go dispatch exception
+
+ mov esi, [ebp]+TsErrCode ; Otherwise, set SS segment value
+ ; to be the address causing the fault
+ mov edx, EXCEPT_UNKNOWN_ACCESS
+ or esi, RPL_MASK
+ and esi, 0FFFFh
+kt0c05: mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException2Args ; Won't return
+
+kt0c10:
+ sti
+;
+; Check if the exception is caused by kernel mode iret to user code.
+; We need to deal with the case that user puts a bogus value in ss
+; through SetContext call. (kernel will trap while iret to user code
+; in trap exit code.)
+; Note: We assume if the faulted instruction is iret. It MUST be in
+; trap/exception exit code. So there MUST be a valid trap frame for
+; the trap exit.
+;
+
+ mov eax, [ebp]+TsEip ; (eax)->faulted Instruction
+ mov eax, [eax] ; (eax)= opcode of faulted instruction
+
+;
+; Check if the exception is caused by iret (to user mode.)
+; Because user may have a NOT PRESENT ss and we will trap at iret
+; in trap exit code. (If user put a bogus/not valid SS in trap frame, we
+; will catch it in trap 0D handler.
+;
+
+ cmp al, IRET_OP ; Is it an iret instruction?
+ jne Kt0c15 ; if ne, not iret, go bugcheck
+
+ lea edx, [ebp]+TsHardwareEsp ; (edx)->trapped iret frame
+ test dword ptr [edx]+4, RPL_MASK ; Check CS of iret addr
+ ; Does the iret have ring transition?
+ jz Kt0c15 ; if z, no SS involed, it's a real fault
+
+;
+; we trapped at iret while returning back to user mode. We will dispatch
+; the exception back to user program.
+;
+
+ mov ecx, (KTRAP_FRAME_LENGTH - 12) / 4
+ lea edx, [ebp]+TsErrCode
+@@:
+ mov eax, [edx]
+ mov [edx+12], eax
+ sub edx, 4
+ loop @b
+
+ add esp, 12 ; adjust esp and ebp
+ add ebp, 12
+
+ ;
+ ; Now, we have user mode trap frame set up
+ ;
+
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov edx, EXCEPT_LIMIT_ACCESS; assume it is limit violation
+ mov esi, [ebp]+TsHardwareEsp; (ecx) = User Stack pointer
+ cmp word ptr [ebp]+TsErrCode, 0 ; Is errorcode = 0?
+ jz short @f ; if z, yes, go dispatch exception
+
+ mov esi, [ebp]+TsErrCode ; Otherwise, set SS segment value
+ ; to be the address causing the fault
+ and esi, 0FFFFh
+ mov edx, EXCEPT_UNKNOWN_ACCESS
+ or esi, RPL_MASK
+@@:
+ mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException2Args ; Won't return
+
+Kt0c15:
+ mov eax, EXCEPTION_STACK_FAULT ; (eax) = trap type
+ jmp _KiSystemFatalException
+
+Kt0c20:
+; Check to see if this process is a vdm
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz Kt0c00
+
+Kt0c30:
+ stdCall _Ki386VdmReflectException_A,<0ch>
+ test al,0fh
+ jz Kt0c00
+ jmp _KiExceptionExit
+
+_KiTrap0C endp
+
+
+ page ,132
+ subttl "General Protection Fault"
+;++
+;
+; Routine Description:
+;
+; Handle General protection fault.
+;
+; First, check to see if the fault occured in kernel mode with
+; incorrect selector values. If so, this is a lazy segment load.
+; Correct the selector values and restart the instruction. Otherwise,
+; parse out various kinds of faults and report as exceptions.
+;
+; All protection violations that do not cause another exception
+; cause a general exception. If the exception indicates a violation
+; of the protection model by an application program executing a
+; previleged instruction or I/O reference, a PRIVILEGED INSTRUCTION
+; exception will be raised. All other causes of general protection
+; fault cause a ACCESS VIOLATION exception to be raised.
+;
+; If previous mode = Kernel;
+; the system will be terminated (assuming not lazy segment load)
+; Else previous mode = USER
+; the process will be terminated if the exception was not caused
+; by privileged instruction.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction or
+; the first instruction of the task if the fault occurs as part of
+; a task switch.
+; Error code (whose value depends on detected condition) is provided.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:FLAT, SS:NOTHING, ES:FLAT
+
+
+;
+; Error and exception blocks for KiTrap0d
+;
+
+Ktd_ExceptionHandler:
+
+;
+; WARNING: Here we directly unlink the exception handler from the
+; exception registration chain. NO unwind is performed.
+;
+
+ mov esp, [esp+8] ; (esp)-> ExceptionList
+ pop PCR[PcExceptionList]
+ add esp, 4 ; pop out except handler
+ pop ebp ; (ebp)-> trap frame
+
+ test dword ptr [ebp].TsSegCs, MODE_MASK ; if premode=kernl
+ jnz Kt0d103 ; nz, prevmode=user, go return
+
+; raise bugcheck if prevmode=kernel
+ stdCall _KeBugCheck, <KMODE_EXCEPTION_NOT_HANDLED>
+
+ ENTER_DR_ASSIST kitd_a, kitd_t, NoAbiosAssist,, kitd_v
+align dword
+ public _KiTrap0D
+_KiTrap0D proc
+
+
+
+;
+; Did the trap occur in a VDM in V86 mode? Trap0d is not critical from
+; performance point of view to native NT, but its super critical to
+; VDMs. So here we are doing every thing to make v86 mode most efficient.
+;
+ test dword ptr [esp]+0ch,EFLAGS_V86_MASK
+ jz Ktdi
+
+
+if FAST_V86_TRAP
+
+ FAST_V86_TRAP_D
+endif
+
+
+KtdV86Slow:
+ ENTER_TRAPV86 kitd_a, kitd_v
+
+KtdV86Slow2:
+
+ ; Raise Irql to APC level, before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ stdCall _Ki386DispatchOpcodeV86
+KtdV86Exit:
+ test al,0FFh
+ jnz short Ktdi2
+
+ stdCall _Ki386VdmReflectException,<0dh>
+ test al,0fh
+ jnz short Ktdi2
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp Kt0d105
+Ktdi2:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ cli
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jz Ktdi3
+
+ EXIT_TRAPV86
+ ;
+ ; EXIT_TRAPv86 does not exit if a user mode apc has switched
+ ; the context from V86 mode to flat mode (VDM monitor context)
+ ;
+
+Ktdi3:
+ jmp _KiExceptionExit
+
+Ktdi:
+ ENTER_TRAP kitd_a, kitd_t
+
+;
+; DO NOT TURN INTERRUPTS ON! If we're doing a lazy segment load,
+; could be in an ISR or other code that needs ints off!
+;
+
+;
+; Is this just a lazy segment load? First make sure the exception occurred
+; in kernel mode.
+;
+
+ test dword ptr [ebp]+TsSegCs,MODE_MASK
+ jnz Kt0d02 ; not kernel mode, go process normally
+
+;
+; Before handling kernel mode trap0d, we need to do some checks to make
+; sure the kernel mode code is the one to blame.
+;
+
+if FAST_BOP or FAST_V86_TRAP
+
+ cmp byte ptr PCR[PcVdmAlert], 0 ; See Kt0eVdmAlert
+ jne Kt0eVdmAlert
+endif
+
+;
+; Check if the exception is caused by the handler trying to examine offending
+; instruction. If yes, we raise exception to user mode program. This occurs
+; when user cs is bogus. Note if cs is valid and eip is bogus, the exception
+; will be caught by page fault and out Ktd_ExceptionHandler will be invoked.
+; Both cases, the exception is dispatched back to user mode.
+;
+
+ mov eax, [ebp]+TsEip
+ cmp eax, offset FLAT:Kt0d03
+ jbe short Kt0d000
+ cmp eax, offset FLAT:Kt0d60
+ jae short Kt0d000
+
+ sti
+ mov ebp, [ebp]+TsEbp ; remove the current trap frame
+ mov esp, ebp ; set ebp, esp to previous trap frame
+ jmp Kt0d105 ; and dispatch exception to user mode.
+
+;
+; Check if the exception is caused by pop SegmentRegister.
+; We need to deal with the case that user puts a bogus value in fs, ds,
+; or es through kernel debugger. (kernel will trap while popping segment
+; registers in trap exit code.)
+; Note: We assume if the faulted instruction is pop segreg. It MUST be
+; in trap exit code. So there MUST be a valid trap frame for the trap exit.
+;
+
+Kt0d000:
+ mov eax, [ebp]+TsEip ; (eax)->faulted Instruction
+ mov eax, [eax] ; (eax)= opcode of faulted instruction
+ mov edx, [ebp]+TsEbp ; (edx)->previous trap exit trapframe
+
+ add edx, TsSegDs ; [edx] = prev trapframe + TsSegDs
+ cmp al, POP_DS ; Is it pop ds instruction?
+ jz Kt0d005 ; if z, yes, go Kt0d005
+
+ add edx, TsSegEs - TsSegDs ; [edx] = prev trapframe + TsSegEs
+ cmp al, POP_ES ; Is it pop es instruction?
+ jz Kt0d005 ; if z, yes, go Kt0d005
+
+ add edx, TsSegFs - TsSegEs ; [edx] = prev trapframe + TsSegFs
+ cmp ax, POP_FS ; Is it pop fs (2-byte) instruction?
+ jz Kt0d005 ; If z, yes, go Kt0d005
+
+ add edx, TsSegGs - TsSegFs ; [edx] = prev trapframe + TsSegGs
+ cmp ax, POP_GS ; Is it pop gs (2-byte) instruction?
+ jz Kt0d005 ; If z, yes, go Kt0d005
+
+;
+; The exception is not caused by pop instruction. We still need to check
+; if it is caused by iret (to user mode.) Because user may have a bogus
+; ss and we will trap at iret in trap exit code.
+;
+
+ cmp al, IRET_OP ; Is it an iret instruction?
+ jne Kt0d002 ; if ne, not iret, go check lazy load
+
+ lea edx, [ebp]+TsHardwareEsp ; (edx)->trapped iret frame
+ mov ax, [ebp]+TsErrCode ; (ax) = Error Code
+ and ax, NOT RPL_MASK ; No need to do this ...
+ mov cx, word ptr [edx]+4 ; [cx] = cs selector
+ and cx, NOT RPL_MASK
+ cmp cx, ax ; is it faulted in CS?
+ jne short Kt0d0008 ; No
+
+;
+; Check if this is the code which we use to return to Ki386CallBios
+; (see biosa.asm):
+; cs should be KGDT_R0_CODE OR RPL_MASK
+; eip should be Ki386BiosCallReturnAddress
+; esi should be the esp of function Ki386SetUpAndExitToV86Code
+; (edx) -> trapped iret frame
+;
+
+ mov eax, OFFSET FLAT:Ki386BiosCallReturnAddress
+ cmp eax, [edx] ; [edx]= trapped eip
+ ; Is eip what we're expecting?
+ jne short Kt0d0005 ; No, continue
+
+ mov eax, [edx]+4 ; (eax) = trapped cs
+ cmp ax, KGDT_R0_CODE OR RPL_MASK ; Is Cs what we're exptecting?
+ jne short Kt0d0005 ; No
+
+; add edx, 5 * 4 + NPX_FRAME_LENGTH + (TsV86Gs - TsHardwareSegSs)
+; cmp [ebp]+TsEsi, edx ; esi should be the esp of
+ ; Ki386SetupAndExitToV86Code
+ mov edx, [ebp].TsEsi
+; jne short Kt0d0005
+ mov esp, edx
+ jmp Ki386BiosCallReturnAddress ; with interrupts off
+
+Kt0d0005:
+;
+; Since the CS is bogus, we can not tell if we are going back
+; to user mode...
+;
+
+ mov ebx,PCR[PcPrcbData+PbCurrentThread] ; if previous mode is
+ test byte ptr [ebx]+ThPreviousMode, 0ffh ; kernel, we bugcheck
+ jz Kt0d02
+
+ or word ptr [edx]+4, RPL_MASK
+Kt0d0008:
+ test dword ptr [edx]+4, RPL_MASK ; Check CS of iret addr
+ ; Does the iret have ring transition?
+ jz Kt0d02 ; if z, no SS involed, it's a real fault
+
+ sti
+
+;
+; we trapped at iret while returning back to user mode. We will dispatch
+; the exception back to user program.
+;
+
+ mov ecx, (KTRAP_FRAME_LENGTH - 12) / 4
+ lea edx, [ebp]+TsErrCode
+Kt0d001:
+ mov eax, [edx]
+ mov [edx+12], eax
+ sub edx, 4
+ loop Kt0d001
+
+ add esp, 12 ; adjust esp and ebp
+ add ebp, 12
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov esi, [ebp]+TsErrCode
+ and esi, 0FFFFh
+ mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException2Args0d ; Won't return
+
+;
+; Kernel mode, first opcode byte is 0f, check for rdmsr or wrmsr instruction
+;
+
+Kt0d001a:
+ shr eax, 8
+ cmp al, 30h
+ je short Kt0d001b
+ cmp al, 32h
+ jne short Kt0d002a
+
+Kt0d001b:
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException0Args ; Won't return
+
+;
+; The Exception is not caused by pop instruction. Check to see
+; if the instruction is a rdmsr or wrmsr
+;
+Kt0d002:
+ cmp al, 0fh
+ je short Kt0d001a
+
+;
+; We now check if DS and ES contain correct value. If not, this is lazy
+; segment load, we simply set them to valid selector.
+;
+
+Kt0d002a:
+ cmp word ptr [ebp]+TsSegDs, KGDT_R3_DATA OR RPL_MASK
+ je short Kt0d003
+
+ mov dword ptr [ebp]+TsSegDs, KGDT_R3_DATA OR RPL_MASK
+ jmp short Kt0d01
+
+Kt0d003:
+ cmp word ptr [ebp]+TsSegEs, KGDT_R3_DATA OR RPL_MASK
+ je Kt0d02 ; Real fault, go process it
+
+ mov dword ptr [ebp]+TsSegEs, KGDT_R3_DATA OR RPL_MASK
+ jmp short Kt0d01
+
+;
+; The faulted instruction is pop seg
+;
+
+Kt0d005:
+ xor eax, eax
+ mov dword ptr [edx], eax ; set the segment reg to 0 such that
+ ; we will trap in user mode.
+Kt0d01:
+ EXIT_ALL NoRestoreSegs,,NoPreviousMode ; RETURN
+
+;
+; Caller is not kernel mode, or DS and ES are OK. Therefore this
+; is a real fault rather than a lazy segment load. Process as such.
+; Since this is not a lazy segment load is now safe to turn interrupts on.
+;
+Kt0d02: mov eax, EXCEPTION_GP_FAULT ; (eax) = trap type
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is prevmode=User?
+ jz _KiSystemFatalException ; If z, prevmode=kernel, stop...
+
+
+; preload pointer to process
+ mov ebx,PCR[PcPrcbData+PbCurrentThread]
+ mov ebx,[ebx]+ThApcState+AsProcess
+
+; flat or protect mode ?
+ cmp word ptr [ebp]+TsSegCs, KGDT_R3_CODE OR RPL_MASK
+ jz kt0d0201
+
+;
+; if vdm running in protected mode, handle instruction
+;
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jnz Kt0d110
+
+ sti
+ test word ptr [ebp]+TsErrCode, 0 ; if errcode<>0, raise access
+ ; violation exception
+ jnz Kt0d105 ; if nz, raise access violation
+ jmp short Kt0d03
+
+
+;
+; if vdm running in flat mode, handle pop fs,gs by setting to Zero
+;
+kt0d0202:
+ add dword ptr [ebp].TsEip, 2
+ jmp Kt0d005
+
+kt0d0201:
+ test byte ptr [ebx]+PrVdmFlag,0fh ; is this a vdm process?
+ jz short Kt0d03
+
+ mov eax, [ebp]+TsEip ; (eax)->faulted Instruction
+ mov eax, [eax] ; (eax)= opcode of faulted instruction
+ mov edx, ebp ; (edx)-> trap frame
+
+ add edx, TsSegFs ; [edx] = prev trapframe + TsSegFs
+ cmp ax, POP_FS ; Is it pop fs (2-byte) instruction?
+ jz short kt0d0202
+
+ add edx, TsSegGs - TsSegFs ; [edx] = prev trapframe + TsSegGs
+ cmp ax, POP_GS ; Is it pop gs (2-byte) instruction?
+ jz short kt0d0202
+
+;
+; we need to determine if the trap0d was caused by privileged instruction.
+; First, we need to skip all the instruction prefix bytes
+;
+
+Kt0d03: sti
+ push ds
+
+;
+; First we need to set up an exception handler to handle the case that
+; we fault while reading user mode instruction.
+;
+
+ push ebp ; pass trapframe to handler
+ push offset FLAT:ktd_ExceptionHandler
+ ; set up exception registration record
+ push PCR[PcExceptionList]
+ mov PCR[PcExceptionList], esp
+
+ mov esi, [ebp]+TsEip ; (esi) -> flat address of faulting instruction
+ mov ax, [ebp]+TsSegCs
+ mov ds, ax
+ mov ecx, MAX_INSTRUCTION_LENGTH
+Kt0d05: push ecx ; save ecx for loop count
+ lods byte ptr [esi] ; (al)= instruction byte
+ mov ecx, PREFIX_REPEAT_COUNT
+ mov edi, offset FLAT:PrefixTable ; (ES:EDI)->prefix table
+ repnz scasb ; search for matching (al)
+ pop ecx ; restore loop count
+ jnz short Kt0d10 ; (al) not a prefix byte, go kt0d10
+ loop short Kt0d05 ; go check for prefix byte again
+ pop PCR[PcExceptionList]
+ add esp, 8 ; clear stack
+ jmp Kt0630 ; exceed max instruction length,
+ ; raise ILLEGALINSTRUCTION exception
+
+;
+; (al) = first opcode which is NOT prefix byte
+; (ds:esi)= points to the first opcode which is not prefix byte + 1
+; We need to check if it is one of the privileged instructions
+;
+
+Kt0d10: cmp al, MI_HLT ; Is it a HLT instruction?
+ je Kt0d80 ; if e, yes, go kt0d80
+
+ cmp al, MI_TWO_BYTE ; Is it a two-byte instruction?
+ jne short Kt0d50 ; if ne, no, go check for IO inst.
+
+ lods byte ptr [esi] ; (al)= next instruction byte
+ cmp al, MI_LTR_LLDT ; Is it a LTR or LLDT ?
+ jne short Kt0d20 ; if ne, no, go kt0d20
+
+ lods byte ptr [esi] ; (al)= ModRM byte of instruction
+ and al, MI_MODRM_MASK ; (al)= bit 3-5 of ModRM byte
+ cmp al, MI_LLDT_MASK ; Is it a LLDT instruction?
+ je Kt0d80 ; if e, yes, go Kt0d80
+
+ cmp al, MI_LTR_MASK ; Is it a LTR instruction?
+ je Kt0d80 ; if e, yes, go Kt0d80
+
+ jmp Kt0d100 ; if ne, go raise access vioalation
+
+Kt0d20: cmp al, MI_LGDT_LIDT_LMSW ; Is it one of these instructions?
+ jne short Kt0d30 ; if ne, no, go check special mov inst.
+
+ lods byte ptr [esi] ; (al)= ModRM byte of instruction
+ and al, MI_MODRM_MASK ; (al)= bit 3-5 of ModRM byte
+ cmp al, MI_LGDT_MASK ; Is it a LGDT instruction?
+ je short Kt0d80 ; if e, yes, go Kt0d80
+
+ cmp al, MI_LIDT_MASK ; Is it a LIDT instruction?
+ je short Kt0d80 ; if e, yes, go Kt0d80
+
+ cmp al, MI_LMSW_MASK ; Is it a LMSW instruction?
+ je short Kt0d80 ; if e, yes, go Kt0d80
+
+ jmp Kt0d100 ; else, raise access violation except
+
+Kt0d30: and al, MI_SPECIAL_MOV_MASK ; Is it a special mov instruction?
+ jnz kt0d80 ; if nz, yes, go raise priv instr
+ ; (Even though the regular mov may
+ ; have the special_mov_mask bit set,
+ ; they are NOT 2 byte opcode instr.)
+ jmp Kt0d100 ; else, no, raise access violation
+
+;
+; Now, we need to check if the trap 0d was caused by IO privileged instruct.
+; (al) = first opcode which is NOT prefix byte
+; Also note, if we come here, the instruction has 1 byte opcode (still need to
+; check REP case.)
+;
+
+Kt0d50: mov ebx, [ebp]+TsEflags ; (ebx) = client's eflags
+ and ebx, IOPL_MASK ;
+ shr ebx, IOPL_SHIFT_COUNT ; (ebx) = client's IOPL
+ mov ecx, [ebp]+TsSegCs
+ and ecx, RPL_MASK ; RPL_MASK NOT MODE_MASK!!!
+ ; (ecx) = CPL, 1/2 of computation of
+ ; whether IOPL applies.
+
+ cmp ebx,ecx ; compare IOPL with CPL of caller
+ jge short Kt0d100 ; if ge, not IO privileged,
+ ; go raise access violation
+
+Kt0d60: cmp al, CLI_OP ; Is it a CLI instruction
+ je short Kt0d80 ; if e, yes. Report it.
+
+ cmp al, STI_OP ; Is it a STI?
+ je short Kt0d80 ; if e, yes, report it.
+
+ mov ecx, IO_INSTRUCTION_TABLE_LENGTH
+ mov edi, offset FLAT:IOInstructionTable
+ repnz scasb ; Is it a IO instruction?
+ jnz short Kt0d100 ; if nz, not io instrct.
+
+;
+; We know the instr is an IO instr without IOPL. But, this doesn't mean
+; this is a privileged instruction exception. We need to make sure the
+; IO port access is not granted in the bit map
+;
+
+
+ mov edi, fs:PcSelfPcr ; (edi)->Pcr
+ mov esi, [edi]+PcGdt ; (esi)->Gdt addr
+ add esi, KGDT_TSS
+ movzx ebx, word ptr [esi] ; (ebx) = Tss limit
+
+ mov edx, [ebp].TsEdx ; [edx] = port addr
+ mov ecx, edx
+ and ecx, 07 ; [ecx] = Bit position
+ shr edx, 3 ; [edx] = offset to the IoMap
+
+ mov edi, [edi]+PcTss ; (edi)->TSS
+ movzx eax, word ptr [edi + TssIoMapBase] ; [eax] = Iomap offset
+ add edx, eax
+ cmp edx, ebx ; is the offset addr beyond tss limit?
+ ja short Kt0d80 ; yes, no I/O priv.
+
+ add edi, edx ; (edi)-> byte correspons to the port addr
+ mov edx, 1
+ shl edx, cl
+ test dword ptr [edi], edx ; Is the bit of the port disabled?
+ jz short Kt0d100 ; if z, no, then it is access violation
+
+Kt0d80:
+ pop PCR[PcExceptionList]
+ add esp, 8 ; clear stack
+ pop ds
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov eax, STATUS_PRIVILEGED_INSTRUCTION
+ jmp CommonDispatchException0Args ; Won't return
+
+;
+; NOTE All the GP fault (except the ones we can
+; easily detect now) will cause access violation exception
+; AND, all the access violation will be raised with additional
+; parameters set to "read" and "virtual address which caused
+; the violation = unknown (-1)"
+;
+
+Kt0d100:
+ pop PCR[PcExceptionList]
+ add esp, 8 ; clear stack
+Kt0d103:
+ pop ds
+Kt0d105:
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov esi, -1
+ mov eax, STATUS_ACCESS_VIOLATION
+ jmp CommonDispatchException2Args0d ; Won't return
+
+Kt0d110:
+ ; Raise Irql to APC level, before enabling interrupts
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+ push eax ; Save OldIrql
+ sti
+
+ stdCall _Ki386DispatchOpcode
+ test eax,0FFFFh
+ jnz short Kt0d120
+
+ stdCall _Ki386VdmReflectException,<0dh>
+ test al,0fh
+ jnz short Kt0d120
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp short Kt0d105
+
+Kt0d120:
+ pop ecx ; (TOS) = OldIrql
+ fstCall KfLowerIrql
+ jmp _KiExceptionExit
+
+_KiTrap0D endp
+
+ page ,132
+ subttl "Page fault processing"
+;++
+;
+; Routine Description:
+;
+; Handle page fault.
+;
+; The page fault occurs if paging is enable and any one of the
+; conditions is true:
+;
+; 1. page not present
+; 2. the faulting procedure does not have sufficient privilege to
+; access the indicated page.
+;
+; For case 1, the referenced page will be loaded to memory and
+; execution continues.
+; For case 2, registered exception handler will be invoked with
+; appropriate error code (in most cases STATUS_ACCESS_VIOLATION)
+;
+; N.B. It is assume that no page fault is allowed during task
+; switches.
+;
+; N.B. INTERRUPTS MUST REMAIN OFF UNTIL AFTER CR2 IS CAPTURED.
+;
+; Arguments:
+;
+; Error code left on stack.
+; CR2 contains faulting address.
+; Interrupts are turned off at entry by use of an interrupt gate.
+;
+; Return value:
+;
+; None
+;
+;--
+
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+ ENTER_DR_ASSIST kite_a, kite_t, NoAbiosAssist
+align dword
+ public _KiTrap0E
+_KiTrap0E proc
+
+ ENTER_TRAP kite_a, kite_t
+
+if FAST_V86_TRAP or FAST_BOP
+
+ cmp byte ptr PCR[PcVdmAlert], 0
+ jne Kt0eVdmAlert
+endif
+
+ VERIFY_BASE_TRAP_FRAME
+
+ mov edi,cr2
+ sti
+
+
+ test [ebp]+TsEFlags, EFLAGS_INTERRUPT_MASK ; faulted with
+ jz Kt0e12b ; interrupts disabled?
+Kt0e01:
+
+;
+; call _MmAccessFault to page in the not present page. If the cause
+; of the fault is 2, _MmAccessFault will return approriate error code
+;
+
+ sub esp, 12
+ mov eax,[ebp]+TsSegCs
+ and eax,MODE_MASK ; (eax) = arg3: PreviousMode
+ mov [esp+8], eax
+ mov [esp+4], edi
+ mov eax, [ebp]+TsErrCode ; (eax)= error code
+ and eax, ERR_0E_STORE ; (eax)= 0 if fault caused by read
+ ; = 2 if fault caused by write
+ shr eax, 1 ; (eax) = 0 if read fault, 1 if write fault
+ mov [esp+0], eax ; arg3: load/store indicator
+
+ call _MmAccessFault@12
+
+if DEVL
+ cmp _PsWatchEnabled,0
+ jz short xxktskip
+ mov ebx, [ebp]+TsEip
+ stdCall _PsWatchWorkingSet,<eax, ebx, edi>
+xxktskip:
+endif
+
+ or eax, eax ; sucessful?
+ jge Kt0e10 ; yes, go exit
+
+ mov ecx,PCR[PcGdt]
+
+ ; Form Ldt Base
+ movzx ebx,byte ptr [ecx + KGDT_LDT].KgdtBaseHi
+ shl ebx,8
+ or bl,byte ptr [ecx + KGDT_LDT].KgdtBaseMid
+ shl ebx,16
+ or bx,word ptr [ecx + KGDT_LDT].KgdtBaseLow
+ or ebx,ebx ; check for zero
+ jz short Kt0e05 ; no ldt
+
+ cmp edi,ebx
+ jb short Kt0e05 ; address not in LDT
+
+ ; Form Ldt limit
+ movzx edx,byte ptr [ecx + KGDT_LDT].KgdtLimitHi
+ and edx,000000FFh
+ shl edx,16
+ or dx,word ptr [ecx + KGDT_LDT].KgdtLimitLow
+ add ebx,edx
+ cmp edi,ebx
+ jae short Kt0e05 ; too high to be an ldt address
+
+ sldt cx ; Verify this process has an LDT
+ test ecx, 0ffffh ; Check CX
+ jz short Kt0e05 ; If ZY, no LDT
+
+ mov eax, [ebp]+TsErrCode ; (eax)= error code
+ and eax, ERR_0E_STORE ; (eax)= 0 if fault caused by read
+ ; = 2 if fault caused by write
+ shr eax, 1 ; (eax) = 0 if read fault, 1 if write fault
+ ; call page fault handler
+ stdCall _MmAccessFault, <eax, edi, 0>
+
+ or eax, eax ; sucessful?
+ jge Kt0e10 ; if z, yes, go exit
+
+ mov ebx,[ebp]+TsSegCs ; restore previous mode
+ and ebx,MODE_MASK
+ mov [esp + 8],ebx
+
+Kt0e05:
+;
+; Did the fault occur in KiSystemService while copying arguments from
+; user stack to kernel stack?
+;
+
+ mov ecx, offset FLAT:KiSystemServiceCopyArguments
+ cmp [ebp].TsEip, ecx
+ jne short @f
+
+ mov ecx, [ebp].TsEbp ; (eax)->TrapFrame of SysService
+ test [ecx].TsSegCs, MODE_MASK
+ jz short @f ; caller of SysService is k mode, we
+ ; will let it bugchecked.
+ mov [ebp].TsEip, offset FLAT:kss60
+ mov eax, STATUS_ACCESS_VIOLATION
+ mov [ebp].TsEax, eax
+ jmp _KiExceptionExit
+@@:
+
+ mov ecx, [ebp]+TsErrCode ; (ecx) = error code
+ and ecx, ERR_0E_STORE ; (ecx) = 0 if fault caused by read
+ ; 2 if fault caused by write
+ shr ecx,1 ; (ecx) = load/store indicator
+;
+; Did the fault occur in a VDM?
+;
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt0e7
+
+;
+; Did the fault occur in a VDM while running in protected mode?
+;
+
+ mov esi,PCR[PcPrcbData+PbCurrentThread]
+ mov esi,[esi]+ThApcState+AsProcess
+ test byte ptr [esi]+PrVdmFlag,0fh ; is this a vdm process?
+ jz short Kt0e9 ; z -> not vdm
+
+ test dword ptr [ebp]+TsSegCs, MODE_MASK
+ jz short kt0e8
+
+ cmp word ptr [ebp]+TsSegCs, KGDT_R3_CODE OR RPL_MASK
+ jz kt0e9 ; z -> not vdm
+
+Kt0e7: mov esi, eax
+ stdCall _VdmDispatchPageFault, <ebp,ecx,edi>
+ test al,0fh ; returns TRUE, if success
+ jnz Kt0e11 ; Exit,No need to call the debugger
+
+ mov eax, esi
+ jmp short Kt0e9
+
+Kt0e8:
+;
+; Did the fault occur in our kernel VDM support code?
+; At this point, we know:
+; . Current process is VDM process
+; . this is a unresolvable pagefault
+; . the fault occurred in kernel mode.
+;
+
+;
+; First make sure this is not pagefault at irql > 1
+; which should be bugchecked.
+;
+
+ cmp eax, STATUS_IN_PAGE_ERROR or 10000000h
+ je Kt0e12
+
+ cmp word ptr [ebp]+TsSegCs, KGDT_R0_CODE ; Did fault occur in kernel?
+ jnz short Kt0e9 ; if nz, no, not ours
+
+ cmp PCR[PcExceptionList], EXCEPTION_CHAIN_END
+ jnz short Kt0e9 ; there is at least a handler to
+ ; handle this exception
+
+ mov ebp, PCR[PcInitialStack]
+ xor ecx, ecx ; set to fault-at-read
+ sub ebp, KTRAP_FRAME_LENGTH
+ mov esp, ebp ; clear stack (ebp)=(esp)->User trap frame
+ mov esi, [ebp]+TsEip ; (esi)-> faulting instruction
+ jmp Kt0e9b ; go dispatching the exception to user
+
+Kt0e9:
+; Set up exception record and arguments and call _KiDispatchException
+ mov esi, [ebp]+TsEip ; (esi)-> faulting instruction
+
+ cmp eax, STATUS_ACCESS_VIOLATION ; dispatch access violation or
+ je short Kt0e9b ; or in_page_error?
+
+ cmp eax, STATUS_GUARD_PAGE_VIOLATION
+ je short Kt0e9b
+
+ cmp eax, STATUS_STACK_OVERFLOW
+ je short Kt0e9b
+
+
+;
+; test to see if davec's reserved status code bit is set. If so, then bugchecka
+;
+
+ cmp eax, STATUS_IN_PAGE_ERROR or 10000000h
+ je Kt0e12 ; bugchecka
+
+;
+; (ecx) = ExceptionInfo 1
+; (edi) = ExceptionInfo 2
+; (eax) = ExceptionInfo 3
+; (esi) -> Exception Addr
+;
+
+ mov edx, ecx
+ mov ebx, esi
+ mov esi, edi
+ mov ecx, 3
+ mov edi, eax
+ mov eax, STATUS_IN_PAGE_ERROR
+ call CommonDispatchException ; Won't return
+
+Kt0e9b:
+ mov ebx, esi
+ mov edx, ecx
+ mov esi, edi
+ jmp CommonDispatchException2Args ; Won't return
+
+.FPO ( 0, 0, 0, 0, 0, FPO_TRAPFRAME )
+
+Kt0e10:
+
+if DEVL
+ mov esp,ebp ; (esp) -> trap frame
+ test _KdpOweBreakpoint, 1 ; do we have any owed breakpoints?
+ jz _KiExceptionExit ; No, all done
+
+ stdCall _KdSetOwedBreakpoints ; notify the debugger
+endif
+
+Kt0e11: mov esp,ebp ; (esp) -> trap frame
+ jmp _KiExceptionExit ; join common code
+
+
+Kt0e12:
+ stdCall _KeGetCurrentIrql ; (eax) = OldIrql
+Kt0e12a:
+ lock inc ds:_KiHardwareTrigger ; trip hardware analyzer
+
+;
+; bugcheck a, addr, irql, load/store, pc
+;
+ mov ecx, [ebp]+TsErrCode ; (ecx)= error code
+ and ecx, ERR_0E_STORE ; (ecx)= 0 if fault caused by read
+ shr ecx, 1 ; (ecx) = 0 if read fault, 1 if write fault
+
+ mov esi, [ebp]+TsEip ; [esi] = faulting instruction
+
+ stdCall _KeBugCheckEx,<IRQL_NOT_LESS_OR_EQUAL,edi,eax,ecx,esi>
+
+Kt0e12b:
+ ; In V86 mode with iopl allowed it is OK to handle
+ ; a page fault with interrupts disabled
+
+ test dword ptr [ebp]+TsEFlags, EFLAGS_V86_MASK
+ jz short Kt0e12c
+
+ cmp _KeI386VdmIoplAllowed, 0
+ jnz Kt0e01
+
+Kt0e12c:
+ cmp _KiFreezeFlag,0 ; during boot we can take
+ jnz Kt0e01 ; 'transistion failts' on the
+ ; debugger before it's been locked
+
+ cmp _KiBugCheckData, 0 ; If crashed, handle trap in
+ jnz Kt0e01 ; normal manner
+
+
+ mov eax, 0ffh ; OldIrql = -1
+ jmp short Kt0e12a
+
+if FAST_BOP or FAST_V86_TRAP
+
+Kt0eVdmAlert:
+ ; If a page fault occured while we are in VDM alert mode (processing
+ ; v86 trap without building trap frame), we will restore all the
+ ; registers and return to its recovery routine which is stored in
+ ; the TsSegGs of original trap frame.
+ ;
+ mov eax, PCR[PcVdmAlert]
+ mov byte ptr PCR[PcVdmAlert], 0
+
+IF FAST_V86_TRAP
+ cmp al, 0Dh
+ jne short @f
+
+ mov eax, offset FLAT:V86TrapDRecovery
+ jmp short KtvaExit
+
+@@:
+ cmp al, 10h
+ jne short @f
+
+ mov eax, offset FLAT:V86Trap10Recovery
+ jmp short KtvaExit
+
+@@:
+ENDIF ; FAST_V86_TRAP
+
+IF FAST_BOP
+ cmp al, 6
+ jne short @f
+
+ mov eax, offset FLAT:V86Trap6Recovery
+ jmp short KtvaExit
+
+@@:
+ENDIF ; FAST_BOP
+
+ mov eax, [ebp].TsEip
+KtvaExit:
+ mov [ebp].TsEip, eax
+ mov esp,ebp ; (esp) -> trap frame
+ jmp _KiExceptionExit ; join common code
+
+if DBG
+@@: int 3
+endif
+ENDIF ; FAST_BOP OR FAST_V86_TRAP
+_KiTrap0E endp
+
+ page ,132
+ subttl "Trap0F -- Intel Reserved"
+;++
+;
+; Routine Description:
+;
+; The trap 0F should never occur. If, however, the exception occurs in
+; USER mode, the current process will be terminated. If the exception
+; occurs in KERNEL mode, a bugcheck will be raised. NO registered
+; handler, if any, will be inviked to handle the exception.
+;
+; Arguments:
+;
+; None
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kitf_a, kitf_t, NoAbiosAssist
+align dword
+ public _KiTrap0F
+_KiTrap0F proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kitf_a, kitf_t
+ sti
+
+ mov eax, EXCEPTION_RESERVED_TRAP ; (eax) = trap type
+ jmp _KiSystemFatalException ; go terminate the system
+
+_KiTrap0F endp
+
+
+ page ,132
+ subttl "Coprocessor Error"
+
+;++
+;
+; Routine Description:
+;
+; Handle Coprocessor Error.
+;
+; This exception is used on 486 or above only. For i386, it uses
+; IRQ 13 instead.
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the aborted instruction.
+; No error code is provided with the error.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit10_a, kit10_t, NoAbiosAssist
+
+align dword
+ public _KiTrap10
+_KiTrap10 proc
+
+ push 0 ; push dummy error code
+ ENTER_TRAP kit10_a, kit10_t
+
+ mov eax, PCR[PcPrcbData+PbNpxThread] ; Correct context for fault?
+ cmp eax, PCR[PcPrcbData+PbCurrentThread]
+ je Kt0710 ; Yes - go try to dispatch it
+
+;
+; We are in the wrong NPX context and can not dispatch the exception right now.
+; Set up the target thread for a delay exception.
+;
+; Note: we don't think this is a possible case, but just to be safe...
+;
+ mov eax, [eax].ThInitialStack
+ sub eax, NPX_FRAME_LENGTH ; Space for NPX_FRAME
+ or dword ptr [eax].FpCr0NpxState, CR0_TS ; Set for delayed error
+
+ jmp _KiExceptionExit
+
+_KiTrap10 endp
+
+ page ,132
+ subttl "Alignment fault"
+;++
+;
+; Routine Description:
+;
+; Handle alignment faults.
+;
+; This exception occurs when an unaligned data access is made by a thread
+; with alignment checking turned on.
+;
+; This exception will only occur on 486 machines. The 386 will not do
+; any alignment checking. Only threads which have the appropriate bit
+; set in EFLAGS will generate alignment faults.
+;
+; The exception will never occur in kernel mode. (hardware limitation)
+;
+; Arguments:
+;
+; At entry, the saved CS:EIP point to the faulting instruction.
+; Error code is provided.
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:NOTHING, SS:NOTHING, ES:NOTHING
+
+ ENTER_DR_ASSIST kit11_a, kit11_t, NoAbiosAssist
+align dword
+ public _KiTrap11
+_KiTrap11 proc
+
+ ENTER_TRAP kit11_a, kit11_t
+ sti
+
+ test dword ptr [ebp]+TsEFlags,EFLAGS_V86_MASK
+ jnz Kt11_01 ; v86 mode => usermode
+
+ test byte ptr [ebp]+TsSegCs, MODE_MASK ; Is previous mode = USER
+ jz Kt11_10
+
+;
+; Check to make sure that the AutoAlignment state of this thread is FALSE.
+; If not, this fault occurred because the thread messed with its own EFLAGS.
+; In order to "fixup" this fault, we just clear the ALIGN_CHECK bit in the
+; EFLAGS and restart the instruction. Exceptions will only be generated if
+; AutoAlignment is FALSE.
+;
+Kt11_01:
+ mov ebx,PCR[PcPrcbData+PbCurrentThread] ; (ebx)-> Current Thread
+ test byte ptr [ebx].ThAutoAlignment, -1
+ jz kt11_00
+;
+; This fault was generated even though the thread had AutoAlignment set to
+; TRUE. So we fix it up by setting the correct state in his EFLAGS and
+; restarting the instruction.
+;
+ and dword ptr [ebp]+TsEflags, NOT EFLAGS_ALIGN_CHECK
+ jmp _KiExceptionExit
+
+kt11_00:
+ mov ebx, [ebp]+TsEip ; (ebx)->faulting instruction
+ mov edx, EXCEPT_LIMIT_ACCESS; assume it is limit violation
+ mov esi, [ebp]+TsHardwareEsp; (esi) = User Stack pointer
+ cmp word ptr [ebp]+TsErrCode, 0 ; Is errorcode = 0?
+ jz short kt11_05 ; if z, yes, go dispatch exception
+
+ mov edx, EXCEPT_UNKNOWN_ACCESS
+kt11_05:
+ mov eax, STATUS_DATATYPE_MISALIGNMENT
+ jmp CommonDispatchException2Args ; Won't return
+
+kt11_10:
+;
+; We should never be here, since the 486 will not generate alignment faults
+; in kernel mode.
+;
+ mov eax, EXCEPTION_ALIGNMENT_CHECK ; (eax) = trap type
+ jmp _KiSystemFatalException
+
+_KiTrap11 endp
+
+ page ,132
+ subttl "Coprocessor Error Handler"
+;++
+;
+; Routine Description:
+;
+; When the 80387 detects an error, it raises its error line. This
+; was supposed to be routed directly to the 386 to cause a trap 16
+; (which would actually occur when the 386 encountered the next FP
+; instruction).
+;
+; However, the ISA design routes the error line to IRQ13 on the
+; slave 8259. So an interrupt will be generated whenever the 387
+; discovers an error. Unfortunately, we could be executing system
+; code at the time, in which case we can't dispatch the exception.
+;
+; So we force emulation of the intended behaviour. This interrupt
+; handler merely sets TS and Cr0NpxState TS and dismisses the interrupt.
+; Then, on the next user FP instruction, a trap 07 will be generated, and
+; the exception can be dispatched then.
+;
+; Note that we don't have to clear the FP exeception here,
+; since that will be done in the trap 07 handler. The 386 will
+; generate the trap 07 before the 387 gets a chance to raise another
+; error interrupt. We'll want to save the 387 state in the trap 07
+; handler WITH the error information.
+;
+; Note the caller must clear the 387 error latch. (this is done in
+; the hal).
+;
+; Arguments:
+;
+; None
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:FLAT, SS:NOTHING, ES:NOTHING
+
+align dword
+cPublicProc _KiCoprocessorError ,0
+
+;
+; Set TS in Cr0NpxState - the next time this thread runs an ESC
+; instruction the error will be dispatched. We also need to set TS
+; in CR0 in case the owner of the NPX is currently running.
+;
+; Bit must be set in FpCr0NpxState before CR0.
+;
+ mov eax, PCR[PcPrcbData+PbNpxThread]
+ mov eax, [eax].ThInitialStack
+ sub eax, NPX_FRAME_LENGTH ; Space for NPX_FRAME
+ or dword ptr [eax].FpCr0NpxState, CR0_TS
+
+ mov eax, cr0
+ or eax, CR0_TS
+ mov cr0, eax
+ stdRET _KiCoprocessorError
+
+stdENDP _KiCoprocessorError
+
+
+;++
+;
+; VOID
+; KiFlushNPXState (
+; VOID
+; )
+;
+; Routine Description:
+;
+; When a threads NPX context is requested (most likely by a debugger)
+; this function is called to flush the threads NPX context out of the
+; compressor if required.
+;
+; Note: the kernel debugger does not invoke this function. (which
+; is good since it may have interrupts off).
+;
+; Arguments:
+;
+; None
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:FLAT, SS:NOTHING, ES:NOTHING
+align dword
+cPublicProc _KiFlushNPXState ,0
+
+ mov eax, PCR[PcPrcbData+PbCurrentThread]
+ cmp byte ptr [eax].ThNpxState, NPX_STATE_LOADED
+ jne short fnpx90
+
+ pushfd
+ mov ecx, PCR[PcInitialStack] ; (ecx) -> top of kernel stack
+
+; this was only needed for 388/387 support
+; fwait ; Get any errors now
+
+ cli ; don't context switch
+ cmp byte ptr [eax].ThNpxState, NPX_STATE_LOADED
+ jne short fnpx70 ; still in coprocessor?
+
+ fnsave [ecx] ; NPX state to save area
+ fwait ; Make sure data is in save area
+ mov byte ptr [eax].ThNpxState, NPX_STATE_NOT_LOADED
+ mov dword ptr PCR[PcPrcbData+PbNpxThread], 0 ; clear owner
+ mov eax, cr0
+ or eax, CR0_TS
+if DBG
+ test dword ptr [ecx].FpCr0NpxState, NOT (CR0_MP+CR0_EM+CR0_TS)
+ jz @f
+ int 3
+@@:
+endif
+ or eax, [ecx].FpCr0NpxState
+ mov cr0, eax
+
+fnpx70: popfd ; enable interrupts
+fnpx90: stdRET _KiFlushNPXState
+
+stdENDP _KiFlushNPXState
+
+
+;++
+;
+; VOID
+; KiSetHardwareTrigger (
+; VOID
+; )
+;
+; Routine Description:
+;
+; This function sets KiHardwareTrigger such that an analyzer can sniff
+; for this access. It needs to occur with a lock cycle such that
+; the processor won't speculatively read this value. Interlocked
+; functions can't be used as in a UP build they do not use a
+; lock prefix.
+;
+; Arguments:
+;
+; None
+;
+; Return value:
+;
+; None
+;
+;--
+ ASSUME DS:FLAT, SS:NOTHING, ES:NOTHING
+cPublicProc _KiSetHardwareTrigger,0
+ lock inc ds:_KiHardwareTrigger ; trip hardware analyzer
+ stdRet _KiSetHardwareTrigger
+stdENDP _KiSetHardwareTrigger
+
+
+ page ,132
+ subttl "Processing System Fatal Exceptions"
+;++
+;
+; Routine Description:
+;
+; This routine processes the system fatal exceptions.
+; The machine state and trap type will be displayed and
+; System will be stopped.
+;
+; Arguments:
+;
+; (eax) = Trap type
+; (ebp) -> machine state frame
+;
+; Return value:
+;
+; system stopped.
+;
+;--
+ assume ds:nothing, es:nothing, ss:nothing, fs:nothing, gs:nothing
+
+align dword
+ public _KiSystemFatalException
+_KiSystemFatalException proc
+.FPO (0, 0, 0, 0, 0, FPO_TRAPFRAME)
+
+ stdCall _KeBugCheckEx,<UNEXPECTED_KERNEL_MODE_TRAP, eax, 0, 0, 0>
+ ret
+
+_KiSystemFatalException endp
+
+ page
+ subttl "Continue Execution System Service"
+;++
+;
+; NTSTATUS
+; NtContinue (
+; IN PCONTEXT ContextRecord,
+; IN BOOLEAN TestAlert
+; )
+;
+; Routine Description:
+;
+; This routine is called as a system service to continue execution after
+; an exception has occurred. Its function is to transfer information from
+; the specified context record into the trap frame that was built when the
+; system service was executed, and then exit the system as if an exception
+; had occurred.
+;
+; WARNING - Do not call this routine directly, always call it as
+; ZwContinue!!! This is required because it needs the
+; trapframe built by KiSystemService.
+;
+; Arguments:
+;
+; KTrapFrame (ebp+0: after setup) -> base of KTrapFrame
+;
+; ContextRecord (ebp+8: after setup) = Supplies a pointer to a context rec.
+;
+; TestAlert (esp+12: after setup) = Supplies a boolean value that specifies
+; whether alert should be tested for the previous processor mode.
+;
+; Return Value:
+;
+; Normally there is no return from this routine. However, if the specified
+; context record is misaligned or is not accessible, then the appropriate
+; status code is returned.
+;
+;--
+
+NcTrapFrame equ [ebp + 0]
+NcContextRecord equ [ebp + 8]
+NcTestAlert equ [ebp + 12]
+
+align dword
+cPublicProc _NtContinue ,2
+
+ push ebp
+
+;
+; Restore old trap frame address since this service exits directly rather
+; than returning.
+;
+
+ mov ebx, PCR[PcPrcbData+PbCurrentThread] ; get current thread address
+ mov edx, [ebp].TsEdx ; restore old trap frame address
+ mov [ebx].ThTrapFrame, edx ;
+
+;
+; Call KiContinue to load ContextRecord into TrapFrame. On x86 TrapFrame
+; is an atomic entity, so we don't need to allocate any other space here.
+;
+; KiContinue(NcContextRecord, 0, NcTrapFrame)
+;
+
+ mov ebp,esp
+ mov eax, NcTrapFrame
+ mov ecx, NcContextRecord
+ stdCall _KiContinue, <ecx, 0, eax>
+ or eax,eax ; return value 0?
+ jnz short Nc20 ; KiContinue failed, go report error
+
+;
+; Check to determine if alert should be tested for the previous processor mode.
+;
+
+ cmp byte ptr NcTestAlert,0 ; Check test alert flag
+ je short Nc10 ; if z, don't test alert, go Nc10
+ mov al,byte ptr [ebx]+ThPreviousMode ; No need to xor eax, eax.
+ stdCall _KeTestAlertThread, <eax> ; test alert for current thread
+
+Nc10: pop ebp ; (ebp) -> TrapFrame
+ mov esp,ebp ; (esp) = (ebp) -> trapframe
+ jmp _KiServiceExit2 ; common exit
+
+Nc20: pop ebp ; (ebp) -> TrapFrame
+ mov esp,ebp ; (esp) = (ebp) -> trapframe
+ jmp _KiServiceExit ; common exit
+
+stdENDP _NtContinue
+
+ page
+ subttl "Raise Exception System Service"
+;++
+;
+; NTSTATUS
+; NtRaiseException (
+; IN PEXCEPTION_RECORD ExceptionRecord,
+; IN PCONTEXT ContextRecord,
+; IN BOOLEAN FirstChance
+; )
+;
+; Routine Description:
+;
+; This routine is called as a system service to raise an exception. Its
+; function is to transfer information from the specified context record
+; into the trap frame that was built when the system service was executed.
+; The exception may be raised as a first or second chance exception.
+;
+; WARNING - Do not call this routine directly, always call it as
+; ZwRaiseException!!! This is required because it needs the
+; trapframe built by KiSystemService.
+;
+; NOTE - KiSystemService will terminate the ExceptionList, which is
+; not what we want for this case, so we will fish it out of
+; the trap frame and restore it.
+;
+; Arguments:
+;
+; TrapFrame (ebp+0: before setup) -> System trap frame for this call
+;
+; ExceptionRecord (ebp+8: after setup) -> An exception record.
+;
+; ContextRecord (ebp+12: after setup) -> Points to a context record.
+;
+; FirstChance (epb+16: after setup) -> Supplies a boolean value that
+; specifies whether the exception is to be raised as a first (TRUE)
+; or second chance (FALSE) exception.
+;
+; Return Value:
+;
+; None.
+;--
+align dword
+cPublicProc _NtRaiseException ,3
+
+ push ebp
+
+;
+; Restore old trap frame address since this service exits directly rather
+; than returning.
+;
+
+ mov ebx, PCR[PcPrcbData+PbCurrentThread] ; get current thread address
+ mov edx, [ebp].TsEdx ; restore old trap frame address
+ mov [ebx].ThTrapFrame, edx ;
+
+;
+; Put back the ExceptionList so the exception can be properly
+; dispatched.
+;
+
+ mov ebp,esp ; [ebp+0] -> TrapFrame
+ mov ebx, [ebp+0] ; (ebx)->TrapFrame
+ mov edx, [ebp+16] ; (edx) = First chance indicator
+ mov eax, [ebx]+TsExceptionList ; Old exception list
+ mov ecx, [ebp+12] ; (ecx)->ContextRecord
+ mov PCR[PcExceptionList],eax
+ mov eax, [ebp+8] ; (eax)->ExceptionRecord
+
+;
+; KiRaiseException(ExceptionRecord, ContextRecord, ExceptionFrame,
+; TrapFrame, FirstChance)
+;
+
+ stdCall _KiRaiseException,<eax, ecx, 0, ebx, edx>
+
+ pop ebp
+ mov esp,ebp ; (esp) = (ebp) -> trap frame
+
+;
+; If the exception was handled, then the trap frame has been edited to
+; reflect new state, and we'll simply exit the system service to get
+; the effect of a continue.
+;
+; If the exception was not handled, we'll return to our caller, who
+; will raise a new exception.
+;
+ jmp _KiServiceExit2
+
+stdENDP _NtRaiseException
+
+
+
+ page
+ subttl "Reflect Exception to a Vdm"
+;++
+;
+; Routine Description:
+; Local stub which reflects an exception to a VDM using
+; Ki386VdmReflectException,
+;
+; Arguments:
+;
+; ebp -> Trap frame
+; ss:esp + 4 = trap number
+;
+; Returns
+; ret value from Ki386VdmReflectException
+; interrupts are disabled uppon return
+;
+cPublicProc _Ki386VdmReflectException_A, 1
+
+ sub esp, 4*2
+
+ mov ecx, APC_LEVEL
+ fstCall KfRaiseIrql
+
+ sti
+ mov [esp+4], eax ; Save OldIrql
+ mov eax, [esp+12] ; Pick up trap number
+ mov [esp+0], eax
+
+ call _Ki386VdmReflectException@4 ; pops one dword
+
+ mov ecx, [esp+0] ; (ecx) = OldIrql
+ mov [esp+0], eax ; Save return code
+
+ fstCall KfLowerIrql
+
+ pop eax ; pops second dword
+ ret 4
+
+stdENDP _Ki386VdmReflectException_A
+
+
+_TEXT$00 ends
+ end
diff --git a/private/ntos/ke/i386/trapc.c b/private/ntos/ke/i386/trapc.c
new file mode 100644
index 000000000..0cd804e4e
--- /dev/null
+++ b/private/ntos/ke/i386/trapc.c
@@ -0,0 +1,545 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ trapc.c
+
+Abstract:
+
+ This module contains some trap handling code written in C.
+ Only by the kernel.
+
+Author:
+
+ Ken Reneris 6-9-93
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+Ki386CheckDivideByZeroTrap (
+ IN PKTRAP_FRAME UserFrame
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, Ki386CheckDivideByZeroTrap)
+#endif
+
+
+#define REG(field) ((ULONG)(&((KTRAP_FRAME *)0)->field))
+#define GETREG(frame,reg) ((PULONG) (((ULONG) frame)+reg))[0]
+
+typedef struct {
+ UCHAR RmDisplaceOnly; // RM of displacment only, no base reg
+ UCHAR RmSib; // RM of SIB
+ UCHAR RmDisplace; // bit mask of RMs which have a displacement
+ UCHAR Disp; // sizeof displacement (in bytes)
+} KMOD, *PKMOD;
+
+static UCHAR RM32[] = {
+ /* 000 */ REG(Eax),
+ /* 001 */ REG(Ecx),
+ /* 010 */ REG(Edx),
+ /* 011 */ REG(Ebx),
+ /* 100 */ REG(HardwareEsp),
+ /* 101 */ REG(Ebp), // SIB
+ /* 110 */ REG(Esi),
+ /* 111 */ REG(Edi)
+};
+
+static UCHAR RM8[] = {
+ /* 000 */ REG(Eax), // al
+ /* 001 */ REG(Ecx), // cl
+ /* 010 */ REG(Edx), // dl
+ /* 011 */ REG(Ebx), // bl
+ /* 100 */ REG(Eax) + 1, // ah
+ /* 101 */ REG(Ecx) + 1, // ch
+ /* 110 */ REG(Edx) + 1, // dh
+ /* 111 */ REG(Ebx) + 1 // bh
+};
+
+static KMOD MOD32[] = {
+ /* 00 */ 5, 4, 0x20, 4,
+ /* 01 */ 0xff, 4, 0xff, 1,
+ /* 10 */ 0xff, 4, 0xff, 4,
+ /* 11 */ 0xff, 0xff, 0x00, 0
+} ;
+
+static struct {
+ UCHAR Opcode1, Opcode2; // instruction opcode
+ UCHAR ModRm, type; // if 2nd part of opcode is encoded in ModRm
+} NoWaitNpxInstructions[] = {
+ /* FNINIT */ 0xDB, 0xE3, 0, 1,
+ /* FNCLEX */ 0xDB, 0xE2, 0, 1,
+ /* FNSTENV */ 0xD9, 0x06, 1, 1,
+ /* FNSAVE */ 0xDD, 0x06, 1, 1,
+ /* FNSTCW */ 0xD9, 0x07, 1, 2,
+ /* FNSTSW */ 0xDD, 0x07, 1, 3,
+ /* FNSTSW AX*/ 0xDF, 0xE0, 0, 4,
+ 0x00, 0x00, 0, 1
+};
+
+
+NTSTATUS
+Ki386CheckDivideByZeroTrap (
+ IN PKTRAP_FRAME UserFrame
+ )
+/*++
+
+Routine Description:
+
+ This function gains control when the x86 processor generates a
+ divide by zero trap. The x86 design generates such a trap on
+ divide by zero and on division overflows. In order to determine
+ which expection code to dispatch, the divisor of the "div" or "idiv"
+ instruction needs to be inspected.
+
+Arguments:
+
+ UserFrame - Trap frame of the divide by zero trap
+
+Return Value:
+
+ exception code dispatch
+
+--*/
+{
+ ULONG operandsize, operandmask, i, accum;
+ PUCHAR istream, pRM;
+ UCHAR ibyte, rm;
+ PKMOD Mod;
+ BOOLEAN fPrefix;
+ NTSTATUS status;
+
+ status = STATUS_INTEGER_DIVIDE_BY_ZERO;
+ try {
+
+ //
+ // read instruction prefixes
+ //
+
+ fPrefix = TRUE;
+ pRM = RM32;
+ operandsize = 4;
+ operandmask = 0xffffffff;
+ istream = (PUCHAR) UserFrame->Eip;
+ while (fPrefix) {
+ ibyte = ProbeAndReadUchar(istream);
+ istream++;
+ switch (ibyte) {
+ case 0x2e: // cs override
+ case 0x36: // ss override
+ case 0x3e: // ds override
+ case 0x26: // es override
+ case 0x64: // fs override
+ case 0x65: // gs override
+ case 0xF3: // rep
+ case 0xF2: // rep
+ case 0xF0: // lock
+ break;
+
+ case 0x66:
+ // 16 bit operand override
+ operandsize = 2;
+ operandmask = 0xffff;
+ break;
+
+ case 0x67:
+ // 16 bit address size override
+ // this is some non-flat code
+ goto try_exit;
+
+ default:
+ fPrefix = FALSE;
+ break;
+ }
+ }
+
+ //
+ // Check instruction opcode
+ //
+
+ if (ibyte != 0xf7 && ibyte != 0xf6) {
+ // this is not a DIV or IDIV opcode
+ goto try_exit;
+ }
+
+ if (ibyte == 0xf6) {
+ // this is a byte div or idiv
+ operandsize = 1;
+ operandmask = 0xff;
+ }
+
+ //
+ // Get Mod R/M
+ //
+
+ ibyte = ProbeAndReadUchar (istream);
+ istream++;
+ Mod = MOD32 + (ibyte >> 6);
+ rm = ibyte & 7;
+
+ //
+ // put register values into accum
+ //
+
+ if (operandsize == 1 && (ibyte & 0xc0) == 0xc0) {
+ pRM = RM8;
+ }
+
+ accum = 0;
+ if (rm != Mod->RmDisplaceOnly) {
+ if (rm == Mod->RmSib) {
+ // get SIB
+ ibyte = ProbeAndReadUchar(istream);
+ istream++;
+ i = (ibyte >> 3) & 7;
+ if (i != 4) {
+ accum = GETREG(UserFrame, RM32[i]);
+ accum = accum << (ibyte >> 6); // apply scaler
+ }
+ i = ibyte & 7;
+ accum = accum + GETREG(UserFrame, RM32[i]);
+ } else {
+ // get register's value
+ accum = GETREG(UserFrame, pRM[rm]);
+ }
+ }
+
+ //
+ // apply displacement to accum
+ //
+
+ if (Mod->RmDisplace & (1 << rm)) {
+ if (Mod->Disp == 4) {
+ i = ProbeAndReadUlong ((PULONG) istream);
+ } else {
+ ibyte = ProbeAndReadChar (istream);
+ i = (signed long) ((signed char) ibyte); // sign extend
+ }
+ accum += i;
+ }
+
+ //
+ // if this is an effective address, go get the data value
+ //
+
+ if (Mod->Disp) {
+ switch (operandsize) {
+ case 1: accum = ProbeAndReadUchar((PUCHAR) accum); break;
+ case 2: accum = ProbeAndReadUshort((PUSHORT) accum); break;
+ case 4: accum = ProbeAndReadUlong((PULONG) accum); break;
+ }
+ }
+
+ //
+ // accum now contains the instruction operand, see if the
+ // operand was really a zero
+ //
+
+ if (accum & operandmask) {
+ // operand was non-zero, must be an overflow
+ status = STATUS_INTEGER_OVERFLOW;
+ }
+
+try_exit: ;
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ // do nothing...
+ }
+
+ return status;
+}
+
+UCHAR
+KiNextIStreamByte (
+ IN PKTRAP_FRAME UserFrame,
+ IN PUCHAR *istream
+ )
+/*++
+
+Routine Description:
+
+ Reads the next byte from the istream pointed to by the UserFrame, and
+ advances the EIP.
+
+
+ Note: this function works for 32 bit code only
+
+--*/
+{
+ UCHAR ibyte;
+
+ if (UserFrame->SegCs == KGDT_R0_CODE) {
+ ibyte = **istream;
+ } else {
+ ibyte = ProbeAndReadUchar (*istream);
+ }
+
+ *istream += 1;
+ return ibyte;
+}
+
+
+
+
+BOOLEAN
+Ki386CheckDelayedNpxTrap (
+ IN PKTRAP_FRAME UserFrame,
+ IN PFLOATING_SAVE_AREA NpxFrame
+ )
+/*++
+
+Routine Description:
+
+ This function gains control from the Trap07 handler. It examines
+ the user mode instruction to see if it's a NoWait NPX instruction.
+ Such instructions do not generate floating point exceptions - this
+ check needs to be done due to the way 80386/80387 systems are
+ implemented. Such machines will generate a floating point exception
+ interrupt when the kernel performs an FRSTOR to reload the thread's
+ NPX context. If the thread's next instruction is a NoWait style
+ instruction, then we clear the exception or emulate the instruction.
+
+ AND... due to a different 80386/80387 "feature" the kernel needs
+ to use FWAIT at times which can causes 80487's to generate delayed
+ exceptions that can lead to the same problem described above.
+
+Arguments:
+
+ UserFrame - Trap frame of the exception
+ NpxFrame - Thread's NpxFrame (WARNING: does not have NpxState)
+
+ Interrupts are disabled
+
+Return Value:
+
+ FALSE - Dispatch NPX exception to user mode
+ TRUE - Exception handled, continue
+
+--*/
+{
+ EXCEPTION_RECORD ExceptionRecord;
+ UCHAR ibyte1, ibyte2, inmodrm, status;
+ USHORT StatusWord, ControlWord, UsersWord;
+ PUCHAR istream;
+ BOOLEAN fPrefix;
+ UCHAR rm;
+ PKMOD Mod;
+ ULONG accum, i;
+
+ status = 0;
+ try {
+
+ //
+ // read instruction prefixes
+ //
+
+ fPrefix = TRUE;
+ istream = (PUCHAR) UserFrame->Eip;
+ while (fPrefix) {
+ ibyte1 = KiNextIStreamByte (UserFrame, &istream);
+ switch (ibyte1) {
+ case 0x2e: // cs override
+ case 0x36: // ss override
+ case 0x3e: // ds override
+ case 0x26: // es override
+ case 0x64: // fs override
+ case 0x65: // gs override
+ break;
+
+ default:
+ fPrefix = FALSE;
+ break;
+ }
+ }
+
+ //
+ // Check for coprocessor NoWait NPX instruction
+ //
+
+ ibyte2 = KiNextIStreamByte (UserFrame, &istream);
+ inmodrm = (ibyte2 >> 3) & 0x7;
+
+ for (i=0; NoWaitNpxInstructions[i].Opcode1; i++) {
+ if (NoWaitNpxInstructions[i].Opcode1 == ibyte1) {
+
+ //
+ // first opcode byte matched - check second part of opcode
+ //
+
+ if (NoWaitNpxInstructions[i].ModRm) {
+ if (NoWaitNpxInstructions[i].Opcode2 == inmodrm) {
+ // This is a no-wait NPX instruction
+ status = NoWaitNpxInstructions[i].type;
+ break;
+ }
+
+ } else {
+ if (NoWaitNpxInstructions[i].Opcode2 == ibyte2) {
+ // This is a no-wait NPX instruction
+ status = NoWaitNpxInstructions[i].type;
+ break;
+ }
+ }
+ }
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ // do nothing...
+ }
+
+ if (status == 0) {
+ //
+ // Dispatch coprocessor exception to user mode
+ //
+
+ return FALSE;
+ }
+
+ if (status == 1) {
+ //
+ // Ignore pending exception, user mode instruction does not trap
+ // on pending execptions and it will clear/mask the pending exceptions
+ //
+
+ _asm {
+ mov eax, cr0
+ and eax, NOT (CR0_MP+CR0_EM+CR0_TS)
+ mov cr0, eax
+ }
+
+ NpxFrame->Cr0NpxState &= ~CR0_TS;
+ return TRUE;
+ }
+
+ //
+ // This is either FNSTSW or FNSTCW. Both of these instructions get
+ // a value from the coprocessor without effecting the pending exception
+ // state. To do this we emulate the instructions.
+ //
+
+ //
+ // Read the coprocessors Status & Control word state, then re-enable
+ // interrupts. (it's safe to context switch after that point)
+ //
+
+ _asm {
+ mov eax, cr0
+ mov ecx, eax
+ and eax, NOT (CR0_MP+CR0_EM+CR0_TS)
+ mov cr0, eax
+
+ fnstsw StatusWord
+ fnstcw ControlWord
+
+ mov cr0, ecx
+ sti
+ }
+
+ if (status == 4) {
+ //
+ // Emulate FNSTSW AX
+ //
+
+ UserFrame->Eip = (ULONG)istream;
+ UserFrame->Eax = (UserFrame->Eax & 0xFFFF0000) | StatusWord;
+ return TRUE;
+ }
+
+ if (status == 2) {
+ UsersWord = ControlWord;
+ } else {
+ UsersWord = StatusWord;
+ }
+
+ try {
+
+ //
+ // (PERFNOTE: the operand decode code should really share code with
+ // KiCheckDivideByZeroTrap, but this is a late change therefore the
+ // code was copied to keep the impact of the change localized)
+ //
+
+ //
+ // decode Mod/RM byte
+ //
+
+ Mod = MOD32 + (ibyte2 >> 6);
+ rm = ibyte2 & 7;
+
+ //
+ // Decode the instruction's word pointer into accum
+ //
+
+ accum = 0;
+ if (rm != Mod->RmDisplaceOnly) {
+ if (rm == Mod->RmSib) {
+ // get SIB
+ ibyte1 = KiNextIStreamByte (UserFrame, &istream);
+ i = (ibyte1 >> 3) & 7;
+ if (i != 4) {
+ accum = GETREG(UserFrame, RM32[i]);
+ accum = accum << (ibyte1 >> 6); // apply scaler
+ }
+ i = ibyte1 & 7;
+ accum = accum + GETREG(UserFrame, RM32[i]);
+ } else {
+ // get register's value
+ accum = GETREG(UserFrame, RM32[rm]);
+ }
+ }
+
+ //
+ // apply displacement to accum
+ //
+
+ if (Mod->RmDisplace & (1 << rm)) {
+ if (Mod->Disp == 4) {
+ i = KiNextIStreamByte (UserFrame, &istream);
+ (KiNextIStreamByte (UserFrame, &istream) << 8) |
+ (KiNextIStreamByte (UserFrame, &istream) << 16) |
+ (KiNextIStreamByte (UserFrame, &istream) << 24);
+ } else {
+ ibyte1 = KiNextIStreamByte (UserFrame, &istream);
+ i = (signed long) ((signed char) ibyte1); // sign extend
+ }
+ accum += i;
+ }
+
+ //
+ // Set the word pointer
+ //
+
+ if (UserFrame->SegCs == KGDT_R0_CODE) {
+ *((PUSHORT) accum) = UsersWord;
+ } else {
+ ProbeAndWriteUshort ((PUSHORT) accum, UsersWord);
+ }
+ UserFrame->Eip = (ULONG)istream;
+
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+ //
+ // Faulted addressing user's memory.
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(UserFrame->Eip);
+ KiDispatchException(
+ &ExceptionRecord,
+ NULL, // ExceptionFrame
+ UserFrame,
+ UserMode,
+ TRUE
+ );
+ }
+
+ return TRUE;
+}
diff --git a/private/ntos/ke/i386/vdm.c b/private/ntos/ke/i386/vdm.c
new file mode 100644
index 000000000..92498d399
--- /dev/null
+++ b/private/ntos/ke/i386/vdm.c
@@ -0,0 +1,1641 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ VDM.C
+
+Abstract:
+
+ This module contains support routines for the x86 monitor for
+ running Dos applications in V86 mode.
+
+Author:
+
+ Dave Hastings (daveh) 20 Mar 1991
+
+Environment:
+
+ The code in this module is all x86 specific.
+
+Notes:
+
+ In its current implementation, this code is less robust than it needs
+ to be. This will be fixed. Specifically, parameter verification needs
+ to be done. (daveh 7/15/91)
+
+ Support for 32 bit segements (2/2/92)
+Revision History:
+
+ 20-Mar-1991 daveh
+ created
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#include "vdmntos.h"
+
+#define VDM_IO_TEST 0
+
+#if VDM_IO_TEST
+VOID
+TestIoHandlerStuff(
+ VOID
+ );
+#endif
+
+BOOLEAN
+Ki386GetSelectorParameters(
+ IN USHORT Selector,
+ OUT PULONG Flags,
+ OUT PULONG Base,
+ OUT PULONG Limit
+ );
+
+
+BOOLEAN
+Ki386VdmDispatchIo(
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN UCHAR InstructionSize,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+BOOLEAN
+Ki386VdmDispatchStringIo(
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Rep,
+ IN BOOLEAN Read,
+ IN ULONG Count,
+ IN ULONG Address,
+ IN UCHAR InstructionSize,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+
+BOOLEAN
+VdmDispatchIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN OUT PULONG Data
+ );
+
+BOOLEAN
+VdmDispatchUnalignedIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN OUT PULONG Data
+ );
+
+BOOLEAN
+VdmDispatchStringIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN ULONG Count,
+ IN BOOLEAN Read,
+ IN ULONG Data
+ );
+
+BOOLEAN
+VdmCallStringIoHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN PVOID StringIoRoutine,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN ULONG Count,
+ IN BOOLEAN Read,
+ IN ULONG Data
+ );
+
+BOOLEAN
+VdmConvertToLinearAddress(
+ IN ULONG SegmentedAddress,
+ IN PVOID *LinearAddress
+ );
+
+VOID
+KeI386VdmInitialize(
+ VOID
+ );
+
+ULONG
+Ki386VdmEnablePentiumExtentions(
+ ULONG
+ );
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, Ki386GetSelectorParameters)
+#pragma alloc_text(PAGE, Ki386VdmDispatchIo)
+#pragma alloc_text(PAGE, Ki386VdmDispatchStringIo)
+#pragma alloc_text(PAGE, VdmDispatchIoToHandler)
+#pragma alloc_text(PAGE, VdmDispatchUnalignedIoToHandler)
+#pragma alloc_text(PAGE, VdmDispatchStringIoToHandler)
+#pragma alloc_text(PAGE, VdmCallStringIoHandler)
+#pragma alloc_text(PAGE, VdmConvertToLinearAddress)
+#pragma alloc_text(INIT, KeI386VdmInitialize)
+#endif
+
+KMUTEX VdmStringIoMutex;
+ULONG VdmFixedStateLinear;
+
+ULONG KeI386EFlagsAndMaskV86 = EFLAGS_USER_SANITIZE;
+ULONG KeI386EFlagsOrMaskV86 = EFLAGS_INTERRUPT_MASK;
+BOOLEAN KeI386VdmIoplAllowed = FALSE;
+ULONG KeI386VirtualIntExtensions = 0;
+
+
+BOOLEAN
+Ki386GetSelectorParameters(
+ IN USHORT Selector,
+ OUT PULONG Flags,
+ OUT PULONG Base,
+ OUT PULONG Limit
+ )
+
+/*++
+
+Routine Description:
+
+ This routine gets information about a selector in the ldt, and
+ returns it to the caller.
+
+Arguments:
+
+ IN USHORT Selector -- selector number for selector to return info for
+ OUT PULONG Flags -- flags indicating the type of the selector.
+ OUT PULONG Base -- base linear address of the selector
+ OUT PULONG Limit -- limit of the selector.
+
+Return Value:
+
+ return-value - True if the selector is in the LDT, and present.
+ False otherwise.
+Note:
+
+ This routine should probably be somewhere else. There are a number
+ of issues to clear up with respect to selectors and the kernel, and
+ after they have been cleared up, this code will be moved to its
+ correct place
+
+--*/
+
+{
+
+ PLDT_ENTRY Ldt,OldLdt;
+ ULONG LdtLimit,OldLdtLimit,RetryCount = 0;
+ PKPROCESS Process;
+ BOOLEAN ReturnValue;
+
+ *Flags = 0;
+
+ if ((Selector & (SELECTOR_TABLE_INDEX | DPL_USER))
+ != (SELECTOR_TABLE_INDEX | DPL_USER)) {
+ return FALSE;
+ }
+
+
+ Process = KeGetCurrentThread()->ApcState.Process;
+ Ldt = (PLDT_ENTRY)((Process->LdtDescriptor.BaseLow) |
+ (Process->LdtDescriptor.HighWord.Bytes.BaseMid << 16) |
+ (Process->LdtDescriptor.HighWord.Bytes.BaseHi << 24));
+
+ LdtLimit = ((Process->LdtDescriptor.LimitLow) |
+ (Process->LdtDescriptor.HighWord.Bits.LimitHi << 16));
+
+ Selector &= ~(SELECTOR_TABLE_INDEX | DPL_USER);
+
+ //
+ // Under normal circumstances, we will only execute the following loop
+ // once. If there is a bug in the user mode wow code however, the LDT
+ // may change while we execute the following code. We don't want to take
+ // the Ldt mutex, because that is expensive.
+ //
+
+ do {
+
+ RetryCount++;
+
+ if (((ULONG)Selector >= LdtLimit) || (!Ldt)) {
+ return FALSE;
+ }
+
+ try {
+
+ if (!Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Pres) {
+ *Flags = SEL_TYPE_NP;
+ ReturnValue = FALSE;
+ } else {
+
+ *Base = (Ldt[Selector/sizeof(LDT_ENTRY)].BaseLow |
+ (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bytes.BaseMid << 16) |
+ (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bytes.BaseHi << 24));
+
+ *Limit = (Ldt[Selector/sizeof(LDT_ENTRY)].LimitLow |
+ (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.LimitHi << 16));
+
+ *Flags = 0;
+
+ if ((Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Type & 0x18) == 0x18) {
+ *Flags |= SEL_TYPE_EXECUTE;
+
+ if (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Type & 0x02) {
+ *Flags |= SEL_TYPE_READ;
+ }
+ } else {
+ *Flags |= SEL_TYPE_READ;
+ if (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Type & 0x02) {
+ *Flags |= SEL_TYPE_WRITE;
+ }
+ if (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Type & 0x04) {
+ *Flags |= SEL_TYPE_ED;
+ }
+ }
+
+ if (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Default_Big) {
+ *Flags |= SEL_TYPE_BIG;
+ }
+
+ if (Ldt[Selector/sizeof(LDT_ENTRY)].HighWord.Bits.Granularity) {
+ *Flags |= SEL_TYPE_2GIG;
+ }
+ }
+ ReturnValue = TRUE;
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ // Don't do anything here. We took the fault because the
+ // Ldt moved. We will get an answer the next time around
+ }
+
+ //
+ // If we can't get an answer in 10 tries, we never will
+ //
+ if ((RetryCount > 10)) {
+ ReturnValue = FALSE;
+ }
+
+ if (ReturnValue == FALSE) {
+ break;
+ }
+
+ OldLdt = Ldt;
+ OldLdtLimit = LdtLimit;
+
+ Ldt = (PLDT_ENTRY)((Process->LdtDescriptor.BaseLow) |
+ (Process->LdtDescriptor.HighWord.Bytes.BaseMid << 16) |
+ (Process->LdtDescriptor.HighWord.Bytes.BaseHi << 24));
+
+ LdtLimit = ((Process->LdtDescriptor.LimitLow) |
+ (Process->LdtDescriptor.HighWord.Bits.LimitHi << 16));
+
+ } while ((Ldt != OldLdt) || (LdtLimit != OldLdtLimit));
+
+ return ReturnValue;
+}
+
+BOOLEAN
+Ki386VdmDispatchIo(
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN UCHAR InstructionSize,
+ IN PKTRAP_FRAME TrapFrame
+ )
+/*++
+
+Routine Description:
+
+ This routine sets up the Event info for an IO event, and causes the
+ event to be reflected to the Monitor.
+
+ It is assumed that interrupts are enabled upon entry, and Irql is
+ at APC level.
+
+Arguments:
+
+ PortNumber -- Supplies the port number the IO was done to
+ Size -- Supplies the size of the IO operation.
+ Read -- Indicates whether the IO operation was a read or a write.
+ InstructionSize -- Supplies the size of the IO instruction in bytes.
+
+Return Value:
+
+ True if the io instruction will be reflected to User mode.
+
+--*/
+{
+ PVDM_TIB VdmTib;
+ EXCEPTION_RECORD ExceptionRecord;
+ VDM_IO_HANDLER VdmIoHandler;
+ ULONG Result;
+ BOOLEAN Success = FALSE;
+ ULONG Context;
+
+ Success = Ps386GetVdmIoHandler(
+ PsGetCurrentProcess(),
+ PortNumber & ~0x3,
+ &VdmIoHandler,
+ &Context
+ );
+
+ if (Success) {
+ Result = TrapFrame->Eax;
+ // if port is not aligned, perform unaligned IO
+ // else do the io the easy way
+ if (PortNumber % Size) {
+ Success = VdmDispatchUnalignedIoToHandler(
+ &VdmIoHandler,
+ Context,
+ PortNumber,
+ Size,
+ Read,
+ &Result
+ );
+ } else {
+ Success = VdmDispatchIoToHandler(
+ &VdmIoHandler,
+ Context,
+ PortNumber,
+ Size,
+ Read,
+ &Result
+ );
+ }
+ }
+
+ if (Success) {
+ if (Read) {
+ switch (Size) {
+ case 4:
+ TrapFrame->Eax = Result;
+ break;
+ case 2:
+ *(PUSHORT)(&TrapFrame->Eax) = (USHORT)Result;
+ break;
+ case 1:
+ *(PUCHAR)(&TrapFrame->Eax) = (UCHAR)Result;
+ break;
+ }
+ }
+ TrapFrame->Eip += (ULONG) InstructionSize;
+ return TRUE;
+ } else {
+ try {
+ VdmTib = NtCurrentTeb()->Vdm;
+ VdmTib->EventInfo.InstructionSize = (ULONG) InstructionSize;
+ VdmTib->EventInfo.Event = VdmIO;
+ VdmTib->EventInfo.IoInfo.PortNumber = (USHORT)PortNumber;
+ VdmTib->EventInfo.IoInfo.Size = (USHORT)Size;
+ VdmTib->EventInfo.IoInfo.Read = Read;
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ ExceptionRecord.ExceptionCode = STATUS_ACCESS_VIOLATION;
+ ExceptionRecord.ExceptionFlags = 0;
+ ExceptionRecord.NumberParameters = 0;
+ ExRaiseException(&ExceptionRecord);
+ return FALSE;
+ }
+ }
+
+ VdmEndExecution(TrapFrame, VdmTib);
+
+ return TRUE;
+
+}
+
+
+BOOLEAN
+Ki386VdmDispatchStringIo(
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Rep,
+ IN BOOLEAN Read,
+ IN ULONG Count,
+ IN ULONG Address,
+ IN UCHAR InstructionSize,
+ IN PKTRAP_FRAME TrapFrame
+ )
+/*++
+
+Routine Description:
+
+ This routine sets up the Event info for a string IO event, and causes the
+ event to be reflected to the Monitor.
+
+ It is assumed that interrupts are enabled upon entry, and Irql is
+ at APC level.
+
+Arguments:
+
+ PortNumber -- Supplies the port number the IO was done to
+ Size -- Supplies the size of the IO operation.
+ Read -- Indicates whether the IO operation was a read or a write.
+ Count -- indicates the number of IO operations of Size size
+ Address -- Indicates address for string io
+ InstructionSize -- Supplies the size of the IO instruction in bytes.
+
+
+Return Value:
+
+ True if the io instruction will be reflected to User mode.
+
+
+
+--*/
+{
+ PVDM_TIB VdmTib;
+ EXCEPTION_RECORD ExceptionRecord;
+ BOOLEAN Success = FALSE;
+ VDM_IO_HANDLER VdmIoHandler;
+ ULONG Context;
+
+ Success = Ps386GetVdmIoHandler(
+ PsGetCurrentProcess(),
+ PortNumber & ~0x3,
+ &VdmIoHandler,
+ &Context
+ );
+
+
+ if (Success) {
+ Success = VdmDispatchStringIoToHandler(
+ &VdmIoHandler,
+ Context,
+ PortNumber,
+ Size,
+ Count,
+ Read,
+ Address
+ );
+ }
+
+ if (Success) {
+ PUSHORT pIndexRegister;
+ USHORT Index;
+
+ // WARNING no 32 bit address support
+
+ pIndexRegister = Read ? (PUSHORT)&TrapFrame->Edi
+ : (PUSHORT)&TrapFrame->Esi;
+
+ if (TrapFrame->EFlags & EFLAGS_DF_MASK) {
+ Index = *pIndexRegister - (USHORT)(Count * Size);
+ }
+ else {
+ Index = *pIndexRegister + (USHORT)(Count * Size);
+ }
+
+ *pIndexRegister = Index;
+
+ if (Rep) {
+ (USHORT)TrapFrame->Ecx = 0;
+ }
+
+ TrapFrame->Eip += (ULONG) InstructionSize;
+ return TRUE;
+ }
+
+ try {
+ VdmTib = NtCurrentTeb()->Vdm;
+ VdmTib->EventInfo.InstructionSize = (ULONG) InstructionSize;
+ VdmTib->EventInfo.Event = VdmStringIO;
+ VdmTib->EventInfo.StringIoInfo.PortNumber = (USHORT)PortNumber;
+ VdmTib->EventInfo.StringIoInfo.Size = (USHORT)Size;
+ VdmTib->EventInfo.StringIoInfo.Rep = Rep;
+ VdmTib->EventInfo.StringIoInfo.Read = Read;
+ VdmTib->EventInfo.StringIoInfo.Count = Count;
+ VdmTib->EventInfo.StringIoInfo.Address = Address;
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ ExceptionRecord.ExceptionCode = STATUS_ACCESS_VIOLATION;
+ ExceptionRecord.ExceptionFlags = 0;
+ ExceptionRecord.NumberParameters = 0;
+ ExRaiseException(&ExceptionRecord);
+ return FALSE;
+ }
+
+
+ VdmEndExecution(TrapFrame, VdmTib);
+
+ return TRUE;
+}
+
+
+BOOLEAN
+VdmDispatchIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN OUT PULONG Data
+ )
+/*++
+
+Routine Description:
+
+ This routine calls the handler for the IO. If there is not a handler
+ of the proper size, it will call this function for 2 io's to the next
+ smaller size. If the size was a byte, and there was no handler, FALSE
+ is returned.
+
+Arguments:
+
+ VdmIoHandler -- Supplies a pointer to the handler table
+ Context -- Supplies 32 bits of data set when the port was trapped
+ PortNumber -- Supplies the port number the IO was done to
+ Size -- Supplies the size of the IO operation.
+ Read -- Indicates whether the IO operation was a read or a write.
+ Result -- Supplies a pointer to the location to put the result
+
+Return Value:
+
+ True if one or more handlers were called to take care of the IO.
+ False if no handler was called to take care of the IO.
+
+--*/
+{
+ NTSTATUS Status;
+ BOOLEAN Success1, Success2;
+ USHORT FnIndex;
+ UCHAR AccessType;
+
+ // Insure that Io is aligned
+ ASSERT((!(PortNumber % Size)));
+
+ if (Read) {
+ FnIndex = 0;
+ AccessType = EMULATOR_READ_ACCESS;
+ } else {
+ FnIndex = 1;
+ AccessType = EMULATOR_WRITE_ACCESS;
+ }
+
+ switch (Size) {
+ case 1:
+ if (VdmIoHandler->IoFunctions[FnIndex].UcharIo[PortNumber % 4]) {
+ Status = (*(VdmIoHandler->IoFunctions[FnIndex].UcharIo[PortNumber % 4]))(
+ Context,
+ PortNumber,
+ AccessType,
+ (PUCHAR)Data
+ );
+ if (NT_SUCCESS(Status)) {
+ return TRUE;
+ }
+ }
+ // No handler for this port
+ return FALSE;
+
+ case 2:
+ if (VdmIoHandler->IoFunctions[FnIndex].UshortIo[PortNumber % 2]) {
+ Status = (*(VdmIoHandler->IoFunctions[FnIndex].UshortIo[PortNumber % 2]))(
+ Context,
+ PortNumber,
+ AccessType,
+ (PUSHORT)Data
+ );
+ if (NT_SUCCESS(Status)) {
+ return TRUE;
+ }
+ } else {
+ // Dispatch to the two uchar handlers for this ushort port
+ Success1 = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ Size /2,
+ Read,
+ Data
+ );
+
+ Success2 = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber + 1,
+ Size / 2,
+ Read,
+ (PULONG)((PUCHAR)Data + 1)
+ );
+
+ return (Success1 || Success2);
+
+ }
+ return FALSE;
+
+ case 4:
+ if (VdmIoHandler->IoFunctions[FnIndex].UlongIo) {
+ Status = (*(VdmIoHandler->IoFunctions[FnIndex].UlongIo))(
+ Context,
+ PortNumber,
+ AccessType,
+ Data
+ );
+ if (NT_SUCCESS(Status)) {
+ return TRUE;
+ }
+ } else {
+ // Dispatch to the two ushort handlers for this port
+ Success1 = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ Size /2,
+ Read,
+ Data);
+ Success2 = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber + 2,
+ Size / 2,
+ Read,
+ (PULONG)((PUSHORT)Data + 1)
+ );
+
+ return (Success1 || Success2);
+ }
+ return FALSE;
+ }
+}
+
+BOOLEAN
+VdmDispatchUnalignedIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN BOOLEAN Read,
+ IN OUT PULONG Data
+ )
+/*++
+
+Routine Description:
+
+ This routine converts the unaligned IO to the necessary number of aligned
+ IOs to smaller ports.
+
+Arguments:
+
+ VdmIoHandler -- Supplies a pointer to the handler table
+ Context -- Supplies 32 bits of data set when the port was trapped
+ PortNumber -- Supplies the port number the IO was done to
+ Size -- Supplies the size of the IO operation.
+ Read -- Indicates whether the IO operation was a read or a write.
+ Result -- Supplies a pointer to the location to put the result
+
+Return Value:
+
+ True if one or more handlers were called to take care of the IO.
+ False if no handler was called to take care of the IO.
+
+--*/
+{
+ ULONG Offset;
+ BOOLEAN Success;
+
+ ASSERT((Size > 1));
+ ASSERT((PortNumber % Size));
+
+ Offset = 0;
+
+ //
+ // The possible unaligned io situations are as follows.
+ //
+ // 1. Uchar aligned Ulong io
+ // We have to dispatch a uchar io, a ushort io, and a uchar io
+ //
+ // 2. Ushort aligned Ulong Io
+ // We have to dispatch a ushort io, and a ushort io
+ //
+ // 3. Uchar aligned Ushort Io
+ // We have to dispatch a uchar io and a uchar io
+ //
+
+ // if the port is uchar aligned
+ if ((PortNumber % Size) & 1) {
+ Success = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ 1,
+ Read,
+ Data
+ );
+ Offset += 1;
+ // else it is ushort aligned (and therefore must be a ulong port)
+ } else {
+ Success = VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ 2,
+ Read,
+ Data
+ );
+ Offset += 2;
+ }
+
+ // if it is a ulong port, we know we have a ushort IO to dispatch
+ if (Size == 4) {
+ Success |= VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber + Offset,
+ 2,
+ Read,
+ (PULONG)((PUCHAR)Data + Offset)
+ );
+ Offset += 2;
+ }
+
+ // If we haven't dispatched the entire port, dispatch the final uchar
+ if (Offset != 4) {
+ Success |= VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber + Offset,
+ 1,
+ Read,
+ (PULONG)((PUCHAR)Data + Offset)
+ );
+ }
+
+ return Success;
+}
+
+BOOLEAN
+VdmDispatchStringIoToHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN ULONG Count,
+ IN BOOLEAN Read,
+ IN ULONG Data
+ )
+/*++
+
+Routine Description:
+
+ This routine calls the handler for the IO. If there is not a handler
+ of the proper size, or the io is not aligned, it will simulate the io
+ to the normal io handlers.
+
+Arguments:
+
+ VdmIoHandler -- Supplies a pointer to the handler table
+ Context -- Supplies 32 bits of data set when the port was trapped
+ PortNumber -- Supplies the port number the IO was done to
+ Size -- Supplies the size of the IO operation.
+ Count -- Supplies the number of IO operations.
+ Read -- Indicates whether the IO operation was a read or a write.
+ Data -- Supplies a segmented address at which to put the result.
+
+Return Value:
+
+ True if one or more handlers were called to take care of the IO.
+ False if no handler was called to take care of the IO.
+
+--*/
+{
+ BOOLEAN Success = FALSE;
+ USHORT FnIndex;
+ NTSTATUS Status;
+
+ if (Read) {
+ FnIndex = 0;
+ } else {
+ FnIndex = 1;
+ }
+
+ Status = KeWaitForSingleObject(
+ &VdmStringIoMutex,
+ Executive,
+ KernelMode,
+ FALSE,
+ NULL
+ );
+
+ if (!NT_SUCCESS(Status)) {
+ return FALSE;
+ }
+
+ switch (Size) {
+ case 1:
+ Success = VdmCallStringIoHandler(
+ VdmIoHandler,
+ (PVOID)VdmIoHandler->IoFunctions[FnIndex].UcharStringIo[PortNumber % 4],
+ Context,
+ PortNumber,
+ Size,
+ Count,
+ Read,
+ Data
+ );
+ case 2:
+ Success = VdmCallStringIoHandler(
+ VdmIoHandler,
+ (PVOID)VdmIoHandler->IoFunctions[FnIndex].UshortStringIo[PortNumber % 2],
+ Context,
+ PortNumber,
+ Size,
+ Count,
+ Read,
+ Data
+ );
+ case 4:
+ Success = VdmCallStringIoHandler(
+ VdmIoHandler,
+ (PVOID)VdmIoHandler->IoFunctions[FnIndex].UlongStringIo,
+ Context,
+ PortNumber,
+ Size,
+ Count,
+ Read,
+ Data
+ );
+ }
+ KeReleaseMutex(&VdmStringIoMutex, FALSE);
+ return Success;
+}
+
+#define STRINGIO_BUFFER_SIZE 1024
+UCHAR VdmStringIoBuffer[STRINGIO_BUFFER_SIZE];
+
+BOOLEAN
+VdmCallStringIoHandler(
+ IN PVDM_IO_HANDLER VdmIoHandler,
+ IN PVOID StringIoRoutine,
+ IN ULONG Context,
+ IN ULONG PortNumber,
+ IN ULONG Size,
+ IN ULONG Count,
+ IN BOOLEAN Read,
+ IN ULONG Data
+ )
+/*++
+
+Routine Description:
+
+ This routine actually performs the call to string io routine. It takes
+ care of buffering the user data in kernel space so that the device driver
+ does not have to. If there is not a string io function, or the io is
+ misaligned, it will be simulated as a series of normal io operations
+
+Arguments:
+
+ StringIoRoutine -- Supplies a pointer to the string Io routine
+ Context -- Supplies 32 bits of data set when the port was trapped
+ PortNumber -- Supplies the number of the port to perform Io to
+ Size -- Supplies the size of the io operations
+ Count -- Supplies the number of Io operations in the string.
+ Read -- Indicates a read operation
+ Data -- Supplies a pointer to the user buffer to perform the io on.
+
+Returns
+
+ TRUE if a handler was called
+ FALSE if not.
+
+--*/
+{
+ ULONG TotalBytes,BytesDone,BytesToDo,LoopCount,NumberIo;
+ PUCHAR CurrentDataPtr;
+ UCHAR AccessType;
+ EXCEPTION_RECORD ExceptionRecord;
+ NTSTATUS Status;
+ BOOLEAN Success;
+
+ Success = VdmConvertToLinearAddress(
+ Data,
+ &CurrentDataPtr
+ );
+
+ if (!Success) {
+ ExceptionRecord.ExceptionCode = STATUS_ACCESS_VIOLATION;
+ ExceptionRecord.ExceptionFlags = 0;
+ ExceptionRecord.NumberParameters = 0;
+ ExRaiseException(&ExceptionRecord);
+ // Cause kernel exit, rather than Io reflection
+ return TRUE;
+ }
+
+
+ TotalBytes = Count * Size;
+ BytesDone = 0;
+
+ if (PortNumber % Size) {
+ StringIoRoutine = NULL;
+ }
+
+ if (Read) {
+ AccessType = EMULATOR_READ_ACCESS;
+ } else {
+ AccessType = EMULATOR_WRITE_ACCESS;
+ }
+
+
+ // Set up try out here to avoid overhead in loop
+ try {
+ while (BytesDone < TotalBytes) {
+ if ((BytesDone + STRINGIO_BUFFER_SIZE) > TotalBytes) {
+ BytesToDo = TotalBytes - BytesDone;
+ } else {
+ BytesToDo = STRINGIO_BUFFER_SIZE;
+ }
+
+ ASSERT((!(BytesToDo % Size)));
+
+ if (!Read) {
+ RtlMoveMemory(VdmStringIoBuffer, CurrentDataPtr, BytesToDo);
+ }
+
+ NumberIo = BytesToDo / Size;
+
+ if (StringIoRoutine) {
+ // in order to avoid having 3 separate calls, one for each size
+ // we simply cast the parameters appropriately for the
+ // byte routine.
+
+ Status = (*((PDRIVER_IO_PORT_UCHAR_STRING)StringIoRoutine))(
+ Context,
+ PortNumber,
+ AccessType,
+ VdmStringIoBuffer,
+ NumberIo
+ );
+
+ if (NT_SUCCESS(Status)) {
+ Success |= TRUE;
+ }
+ } else {
+ if (PortNumber % Size) {
+ for (LoopCount = 0; LoopCount < NumberIo; LoopCount++ ) {
+ Success |= VdmDispatchUnalignedIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ Size,
+ Read,
+ (PULONG)(VdmStringIoBuffer + LoopCount * Size)
+ );
+ }
+ } else {
+ for (LoopCount = 0; LoopCount < NumberIo; LoopCount++ ) {
+ Success |= VdmDispatchIoToHandler(
+ VdmIoHandler,
+ Context,
+ PortNumber,
+ Size,
+ Read,
+ (PULONG)(VdmStringIoBuffer + LoopCount * Size)
+ );
+ }
+
+ }
+ }
+
+ if (Read) {
+ RtlMoveMemory(CurrentDataPtr, VdmStringIoBuffer, BytesToDo);
+ }
+
+ BytesDone += BytesToDo;
+ CurrentDataPtr += BytesToDo;
+ }
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ ExceptionRecord.ExceptionCode = GetExceptionCode();
+ ExceptionRecord.ExceptionFlags = 0;
+ ExceptionRecord.NumberParameters = 0;
+ ExRaiseException(&ExceptionRecord);
+ // Cause kernel exit, rather than Io reflection
+ Success = TRUE;
+ }
+ return Success;
+
+}
+
+BOOLEAN
+VdmConvertToLinearAddress(
+ IN ULONG SegmentedAddress,
+ OUT PVOID *LinearAddress
+ )
+/*++
+
+Routine Description:
+
+ This routine converts the specified segmented address into a linear
+ address, based on processor mode in user mode.
+
+Arguments:
+
+ SegmentedAddress -- Supplies the segmented address to convert.
+ LinearAddress -- Supplies a pointer to the destination for the
+ coresponding linear address
+
+Return Value:
+
+ True if the address was converted.
+ False otherwise
+
+Note:
+
+ A linear address of 0 is a valid return
+--*/
+{
+ PKTHREAD Thread;
+ PKTRAP_FRAME TrapFrame;
+ BOOLEAN Success;
+ ULONG Base, Limit, Flags;
+
+ Thread = KeGetCurrentThread();
+ TrapFrame = VdmGetTrapFrame(Thread);
+
+ if (TrapFrame->EFlags & EFLAGS_V86_MASK) {
+ *LinearAddress = (PVOID)(((SegmentedAddress & 0xFFFF0000) >> 12) +
+ (SegmentedAddress & 0xFFFF));
+ Success = TRUE;
+ } else {
+ Success = Ki386GetSelectorParameters(
+ (USHORT)((SegmentedAddress & 0xFFFF0000) >> 12),
+ &Flags,
+ &Base,
+ &Limit
+ );
+ if (Success) {
+ *LinearAddress = (PVOID)(Base + (SegmentedAddress & 0xFFFF));
+ }
+ }
+ return Success;
+}
+
+VOID
+KeI386VdmInitialize(
+ VOID
+ )
+/*++
+
+Routine Description:
+
+ This routine initializes the vdm stuff
+
+Arguments:
+
+ None
+
+Return Value:
+
+ None
+--*/
+{
+ NTSTATUS Status;
+ OBJECT_ATTRIBUTES ObjectAttributes;
+ HANDLE RegistryHandle = NULL;
+ UNICODE_STRING WorkString;
+ UCHAR KeyInformation[sizeof(KEY_VALUE_BASIC_INFORMATION) + 30];
+ ULONG ResultLength;
+
+ extern UCHAR V86CriticalInstruction;
+
+ KeInitializeMutex( &VdmStringIoMutex, MUTEX_LEVEL_VDM_IO );
+
+ //
+ // Set up and open KeyPath to wow key
+ //
+
+ RtlInitUnicodeString(
+ &WorkString,
+ L"\\REGISTRY\\MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Wow"
+ );
+
+ InitializeObjectAttributes(
+ &ObjectAttributes,
+ &WorkString,
+ OBJ_CASE_INSENSITIVE,
+ (HANDLE)NULL,
+ NULL
+ );
+
+ Status = ZwOpenKey(
+ &RegistryHandle,
+ KEY_READ,
+ &ObjectAttributes
+ );
+
+ //
+ // If there is no Wow key, don't allow Vdms to run
+ //
+ if (!NT_SUCCESS(Status)) {
+ return;
+ }
+
+ //
+ // Set up for using virtual interrupt extensions if they are available
+ //
+
+ //
+ // Get the Pentium Feature disable value.
+ // If this value is present, don't enable vme stuff.
+ //
+ RtlInitUnicodeString(
+ &WorkString,
+ L"DisableVme"
+ );
+
+ Status = ZwQueryValueKey(
+ RegistryHandle,
+ &WorkString,
+ KeyValueBasicInformation,
+ &KeyInformation,
+ sizeof(KEY_VALUE_BASIC_INFORMATION) + 30,
+ &ResultLength
+ );
+
+ if (!NT_SUCCESS(Status)) {
+
+ //
+ // If we have the extensions, set the appropriate bits
+ // in cr4
+ //
+ if (KeFeatureBits & KF_V86_VIS) {
+ KiIpiGenericCall(
+ Ki386VdmEnablePentiumExtentions,
+ TRUE
+ );
+ KeI386VirtualIntExtensions = V86_VIRTUAL_INT_EXTENSIONS;
+ }
+ }
+
+ //
+ // If we have V86 mode int extensions, we don't want to run with
+ // IOPL in v86 mode
+ //
+ if (!KeI386VirtualIntExtensions & V86_VIRTUAL_INT_EXTENSIONS) {
+ //
+ // Read registry to determine if Vdms will run with IOPL in v86 mode
+ //
+
+ //
+ // Get the VdmIOPL value.
+ //
+ RtlInitUnicodeString(
+ &WorkString,
+ L"VdmIOPL"
+ );
+
+ Status = ZwQueryValueKey(
+ RegistryHandle,
+ &WorkString,
+ KeyValueBasicInformation,
+ &KeyInformation,
+ sizeof(KEY_VALUE_BASIC_INFORMATION) + 30,
+ &ResultLength
+ );
+
+ //
+ // If the value exists, let Vdms run with IOPL in V86 mode
+ //
+ if (NT_SUCCESS(Status)) {
+ //
+ // KeEflagsAndMaskV86 and KeEflagsOrMaskV86 are used
+ // in SANITIZE_FLAGS, and the Vdm code to make sure the
+ // values in EFlags for v86 mode trap frames are acceptable
+ //
+ KeI386EFlagsAndMaskV86 = EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK;
+ KeI386EFlagsOrMaskV86 = EFLAGS_IOPL_MASK;
+
+ //
+ // KeVdmIoplAllowed is used by the Vdm code to determine if
+ // the virtual interrupt flag is in EFlags, or 40:xx
+ //
+ KeI386VdmIoplAllowed = TRUE;
+
+ }
+ }
+
+ ZwClose(RegistryHandle);
+
+ //
+ // Initialize the address of the Vdm communications area based on
+ // machine type because of non-AT Japanese PCs. Note that we only
+ // have to change the op-code for PC-98 machines as the default is
+ // the PC/AT value.
+ //
+
+ if (KeI386MachineType & MACHINE_TYPE_PC_9800_COMPATIBLE) {
+
+ //
+ // Set NTVDM state liner for PC-9800 Series
+ //
+
+ VdmFixedStateLinear = FIXED_NTVDMSTATE_LINEAR_PC_98;
+
+ *(PULONG)(&V86CriticalInstruction + 1) = VdmFixedStateLinear;
+
+ } else {
+
+ //
+ // We are running on an normal PC/AT or a Fujitsu FMR comaptible.
+ //
+
+ VdmFixedStateLinear = FIXED_NTVDMSTATE_LINEAR_PC_AT;
+ }
+}
+
+
+BOOLEAN
+Ke386VdmInsertQueueApc (
+ IN PKAPC Apc,
+ IN PKTHREAD Thread,
+ IN KPROCESSOR_MODE ApcMode,
+ IN PKKERNEL_ROUTINE KernelRoutine,
+ IN PKRUNDOWN_ROUTINE RundownRoutine OPTIONAL,
+ IN PKNORMAL_ROUTINE NormalRoutine OPTIONAL,
+ IN PVOID NormalContext OPTIONAL,
+ IN PVOID SystemArgument1 OPTIONAL,
+ IN PVOID SystemArgument2 OPTIONAL,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes, and queues a vdm type of APC to the
+ specified thread.
+
+
+ A Vdm type of APC:
+ - OriginalApcEnvironment
+ - will only be queued to one thread at a time
+ - if UserMode Fires on the next system exit. A UserMode apc should
+ not be queued if the current vdm context is not application mode.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ ApcMode - Supplies the processor mode user\kernel of the Apc
+
+ KernelRoutine - Supplies a pointer to a function that is to be
+ executed at IRQL APC_LEVEL in kernel mode.
+
+ RundownRoutine - Supplies an optional pointer to a function that is to be
+ called if the APC is in a thread's APC queue when the thread terminates.
+
+ NormalRoutine - Supplies an optional pointer to a function that is
+ to be executed at IRQL 0 in the specified processor mode. If this
+ parameter is not specified, then the ProcessorMode and NormalContext
+ parameters are ignored.
+
+ NormalContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the NormalRoutine parameter.
+
+ SystemArgument1, SystemArgument2 - Supply a set of two arguments that
+ contain untyped data provided by the executive.
+
+ Increment - Supplies the priority increment that is to be applied if
+ queuing the APC causes a thread wait to be satisfied.
+
+
+Return Value:
+
+ If APC queuing is disabled, then a value of FALSE is returned.
+ Otherwise a value of TRUE is returned.
+
+
+--*/
+
+{
+
+ PKAPC_STATE ApcState;
+ PKTHREAD ApcThread;
+ KIRQL OldIrql;
+ BOOLEAN Inserted;
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the apc object not initialized, then initialize it and acquire
+ // the target thread APC queue lock.
+ //
+
+ if (Apc->Type != ApcObject) {
+ Apc->Type = ApcObject;
+ Apc->Size = sizeof(KAPC);
+ Apc->ApcStateIndex = OriginalApcEnvironment;
+ } else {
+
+ //
+ // Acquire the APC thread APC queue lock.
+ //
+ // If the APC is inserted in the corresponding APC queue, and the
+ // APC thread is not the same thread as the target thread, then
+ // the APC is removed from its current queue, the APC pending state
+ // is updated, the APC thread APC queue lock is released, and the
+ // target thread APC queue lock is acquired. Otherwise, the APC
+ // thread and the target thread are same thread and the APC is already
+ // queued to the correct thread.
+ //
+ // If the APC is not inserted in an APC queue, then release the
+ // APC thread APC queue lock and acquire the target thread APC queue
+ // lock.
+ //
+
+ ApcThread = Apc->Thread;
+ if (ApcThread) {
+ KiAcquireSpinLock(&ApcThread->ApcQueueLock);
+ if (Apc->Inserted) {
+ if (ApcThread == Apc->Thread && Apc->Thread != Thread) {
+ Apc->Inserted = FALSE;
+ RemoveEntryList(&Apc->ApcListEntry);
+ ApcState = Apc->Thread->ApcStatePointer[Apc->ApcStateIndex];
+ if (IsListEmpty(&ApcState->ApcListHead[Apc->ApcMode]) != FALSE) {
+ if (Apc->ApcMode == KernelMode) {
+ ApcState->KernelApcPending = FALSE;
+
+ } else {
+ ApcState->UserApcPending = FALSE;
+ }
+ }
+
+ } else {
+ KiReleaseSpinLock(&ApcThread->ApcQueueLock);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return TRUE;
+ }
+ }
+
+ KiReleaseSpinLock(&ApcThread->ApcQueueLock);
+ }
+ }
+
+
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+
+ Apc->ApcMode = ApcMode;
+ Apc->Thread = Thread;
+ Apc->KernelRoutine = KernelRoutine;
+ Apc->RundownRoutine = RundownRoutine;
+ Apc->NormalRoutine = NormalRoutine;
+ Apc->SystemArgument1 = SystemArgument1;
+ Apc->SystemArgument2 = SystemArgument2;
+ Apc->NormalContext = NormalContext;
+
+ //
+ // Unlock the target thread APC queue.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // If APC queuing is enable, then attempt to queue the APC object.
+ //
+
+ if (Thread->ApcQueueable && KiInsertQueueApc(Apc, Increment)) {
+ Inserted = TRUE;
+
+ //
+ // If UserMode:
+ // For vdm a UserMode Apc is only queued by a kernel mode
+ // apc which is on the current thread for the target thread.
+ // Force UserApcPending for User mode apcstate, so that
+ // the apc will fire when this thread exits the kernel.
+ //
+
+ if (ApcMode == UserMode) {
+ KiBoostPriorityThread(Thread, Increment);
+ Thread->ApcState.UserApcPending = TRUE;
+ }
+
+ } else {
+ Inserted = FALSE;
+ }
+
+ //
+ // Unlock the dispatcher database, lower IRQL to its previous value, and
+ // return whether the APC object was inserted.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Inserted;
+}
+
+
+VOID
+Ke386VdmClearApcObject(
+ IN PKAPC Apc
+ )
+/*++
+
+Routine Description:
+
+ Clears a VDM APC object, synchronously with Ke386VdmInsertQueueApc, and
+ is expected to be called by one of the vdm kernel apc routine or the
+ rundown routine.
+
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+
+Return Value:
+
+ void
+
+--*/
+{
+
+ KIRQL OldIrql;
+
+ //
+ // Take Dispatcher database lock, to sync with Ke386VDMInsertQueueApc
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ Apc->Thread = NULL;
+ KiUnlockDispatcherDatabase(OldIrql);
+
+}
+
+
+
+
+
+
+
+//
+// END of ACTIVE CODE
+//
+
+
+
+
+
+
+
+#if VDM_IO_TEST
+NTSTATUS
+TestIoByteRoutine(
+ IN ULONG Port,
+ IN UCHAR AccessMode,
+ IN OUT PUCHAR Data
+ )
+{
+ if (AccessMode & EMULATOR_READ_ACCESS) {
+ *Data = Port - 400;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+TestIoWordReadRoutine(
+ IN ULONG Port,
+ IN UCHAR AccessMode,
+ IN OUT PUSHORT Data
+ )
+{
+ if (AccessMode & EMULATOR_READ_ACCESS) {
+ *Data = Port - 200;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+TestIoWordWriteRoutine(
+ IN ULONG Port,
+ IN UCHAR AccessMode,
+ IN OUT PUSHORT Data
+ )
+{
+ DbgPrint("Word Write routine port # %lx, %x\n",Port,*Data);
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+TestIoDwordRoutine(
+ IN ULONG Port,
+ IN USHORT AccessMode,
+ IN OUT PULONG Data
+ )
+{
+ if (AccessMode & EMULATOR_READ_ACCESS) {
+ *Data = Port;
+ }
+
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+TestIoStringRoutine(
+ IN ULONG Port,
+ IN USHORT AccessMode,
+ IN OUT PSHORT Data,
+ IN ULONG Count
+ )
+{
+ ULONG i;
+
+ if (AccessMode & EMULATOR_READ_ACCESS) {
+ for (i = 0;i < Count ;i++ ) {
+ Data[i] = i;
+ }
+ } else {
+ DbgPrint("String Port Called for write port #%lx,",Port);
+ for (i = 0;i < Count ;i++ ) {
+ DbgPrint("%x\n",Data[i]);
+ }
+ }
+
+ return STATUS_SUCCESS;
+}
+
+PROCESS_IO_PORT_HANDLER_INFORMATION IoPortHandler;
+EMULATOR_ACCESS_ENTRY Entry[4];
+BOOLEAN Connect = TRUE, Disconnect = FALSE;
+
+VOID
+TestIoHandlerStuff(
+ VOID
+ )
+{
+ NTSTATUS Status;
+
+ IoPortHandler.Install = TRUE;
+ IoPortHandler.NumEntries = 5L;
+ IoPortHandler.EmulatorAccessEntries = Entry;
+
+ Entry[0].BasePort = 0x400;
+ Entry[0].NumConsecutivePorts = 0x30;
+ Entry[0].AccessType = Uchar;
+ Entry[0].AccessMode = EMULATOR_READ_ACCESS | EMULATOR_WRITE_ACCESS;
+ Entry[0].StringSupport = FALSE;
+ Entry[0].Routine = TestIoByteRoutine;
+
+ Entry[1].BasePort = 0x400;
+ Entry[1].NumConsecutivePorts = 0x18;
+ Entry[1].AccessType = Ushort;
+ Entry[1].AccessMode = EMULATOR_READ_ACCESS | EMULATOR_WRITE_ACCESS;
+ Entry[1].StringSupport = FALSE;
+ Entry[1].Routine = TestIoWordReadRoutine;
+
+ Entry[2].BasePort = 0x400;
+ Entry[2].NumConsecutivePorts = 0xc;
+ Entry[2].AccessType = Ulong;
+ Entry[2].AccessMode = EMULATOR_READ_ACCESS | EMULATOR_WRITE_ACCESS;
+ Entry[2].StringSupport = FALSE;
+ Entry[2].Routine = TestIoDwordRoutine;
+
+ Entry[3].BasePort = 0x400;
+ Entry[3].NumConsecutivePorts = 0x18;
+ Entry[3].AccessType = Ushort;
+ Entry[3].AccessMode = EMULATOR_READ_ACCESS | EMULATOR_WRITE_ACCESS;
+ Entry[3].StringSupport = TRUE;
+ Entry[3].Routine = TestIoStringRoutine;
+
+ if (Connect) {
+ Status = ZwSetInformationProcess(
+ NtCurrentProcess(),
+ ProcessIoPortHandlers,
+ &IoPortHandler,
+ sizeof(PROCESS_IO_PORT_HANDLER_INFORMATION)
+ ) ;
+ if (!NT_SUCCESS(Status)) {
+ DbgBreakPoint();
+ }
+ Connect = FALSE;
+ }
+
+ IoPortHandler.Install = FALSE;
+ if (Disconnect) {
+ Status = ZwSetInformationProcess(
+ NtCurrentProcess(),
+ ProcessIoPortHandlers,
+ &IoPortHandler,
+ sizeof(PROCESS_IO_PORT_HANDLER_INFORMATION)
+ );
+ if (!NT_SUCCESS(Status)) {
+ DbgBreakPoint();
+ }
+ Disconnect = FALSE;
+ }
+}
+#endif
+
diff --git a/private/ntos/ke/i386/vdmint21.c b/private/ntos/ke/i386/vdmint21.c
new file mode 100644
index 000000000..dbc32c356
--- /dev/null
+++ b/private/ntos/ke/i386/vdmint21.c
@@ -0,0 +1,228 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ vdmint21.c
+
+Abstract:
+
+ This module implements interfaces that support manipulation of i386
+ int 21 entry of IDT. These entry points only exist on i386 machines.
+
+Author:
+
+ Shie-Lin Tzong (shielint) 26-Dec-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#include "vdmntos.h"
+
+#define IDT_ACCESS_DPL_USER 0x6000
+#define IDT_ACCESS_TYPE_386_TRAP 0xF00
+#define IDT_ACCESS_TYPE_286_TRAP 0x700
+#define IDT_ACCESS_PRESENT 0x8000
+#define LDT_MASK 4
+
+//
+// External Reference
+//
+
+BOOLEAN
+Ki386GetSelectorParameters(
+ IN USHORT Selector,
+ OUT PULONG Flags,
+ OUT PULONG Base,
+ OUT PULONG Limit
+ );
+
+//
+// Define forward referenced function prototypes.
+//
+
+VOID
+Ki386LoadTargetInt21Entry (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+#define KiLoadInt21Entry() \
+ KeGetPcr()->IDT[0x21] = PsGetCurrentProcess()->Pcb.Int21Descriptor
+
+NTSTATUS
+Ke386SetVdmInterruptHandler (
+ PKPROCESS Process,
+ ULONG Interrupt,
+ USHORT Selector,
+ ULONG Offset,
+ BOOLEAN Gate32
+ )
+
+/*++
+
+Routine Description:
+
+ The specified (software) interrupt entry of IDT will be updated to
+ point to the specified handler. For all threads which belong to the
+ specified process, their execution processors will be notified to
+ make the same change.
+
+ This function only exists on i386 and i386 compatible processors.
+
+ No checking is done on the validity of the interrupt handler.
+
+Arguments:
+
+ Process - Pointer to KPROCESS object describing the process for
+ which the int 21 entry is to be set.
+
+ Interrupt - The software interrupt vector which will be updated.
+
+ Selector, offset - Specified the address of the new handler.
+
+ Gate32 - True if the gate should be 32 bit, false otherwise
+
+Return Value:
+
+ NTSTATUS.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN LocalProcessor;
+ KAFFINITY TargetProcessors;
+ PKPRCB Prcb;
+ KIDTENTRY IdtDescriptor;
+ ULONG Flags, Base, Limit;
+
+ //
+ // Check the validity of the request
+ // 1. Currently, we support int21 redirection only
+ // 2. The specified interrupt handler must be in user space.
+ //
+
+ if (Interrupt != 0x21 || Offset >= (ULONG)MM_HIGHEST_USER_ADDRESS ||
+ !Ki386GetSelectorParameters(Selector, &Flags, &Base, &Limit) ){
+ return(STATUS_INVALID_PARAMETER);
+ }
+
+ //
+ // Initialize the contents of the IDT entry
+ //
+
+ IdtDescriptor.Offset = (USHORT)Offset;
+ IdtDescriptor.Selector = Selector | RPL_MASK | LDT_MASK;
+ IdtDescriptor.ExtendedOffset = (USHORT)(Offset >> 16);
+ IdtDescriptor.Access = IDT_ACCESS_DPL_USER | IDT_ACCESS_PRESENT;
+ if (Gate32) {
+ IdtDescriptor.Access |= IDT_ACCESS_TYPE_386_TRAP;
+
+ } else {
+ IdtDescriptor.Access |= IDT_ACCESS_TYPE_286_TRAP;
+ }
+
+ //
+ // Acquire the context swap lock so a context switch will not occur.
+ //
+
+ KiLockContextSwap(&OldIrql);
+
+ //
+ // Set the Ldt fields in the process object
+ //
+
+ Process->Int21Descriptor = IdtDescriptor;
+
+ //
+ // Tell all processors active for this process to reload their LDTs
+ //
+
+#if !defined(NT_UP)
+
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = Process->ActiveProcessors & ~Prcb->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ Ki386LoadTargetInt21Entry,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ KiLoadInt21Entry();
+
+#if !defined(NT_UP)
+
+ //
+ // Wait until all of the target processors have finished reloading
+ // their LDT.
+ //
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Restore IRQL and unlock the context swap lock.
+ //
+
+ KiUnlockContextSwap(OldIrql);
+ return STATUS_SUCCESS;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+Ki386LoadTargetInt21Entry (
+ IN PKIPI_CONTEXT PacketContext,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+/*++
+
+Routine Description:
+
+ Reload local Ldt register and clear signal bit in TargetProcessor mask
+
+Arguments:
+
+ Argument - pointer to a ipi packet structure.
+ ReadyFlag - Pointer to flag to be set once LDTR has been reloaded
+
+Return Value:
+
+ none.
+
+--*/
+
+{
+
+ //
+ // Set the int 21 entry of IDT from currently active process object
+ //
+
+ KiLoadInt21Entry();
+ KiIpiSignalPacketDone(PacketContext);
+ return;
+}
+
+#endif
diff --git a/private/ntos/ke/i386/vdmp.h b/private/ntos/ke/i386/vdmp.h
new file mode 100644
index 000000000..e9211422b
--- /dev/null
+++ b/private/ntos/ke/i386/vdmp.h
@@ -0,0 +1,73 @@
+#define VDM_APP_MODE 0x00000001L
+#define VDM_INTERRUPT_PENDING 0x00000002L
+#define VDM_STATE_CHANGE 0x00000004L
+#define VDM_VIRTUAL_INTERRUPTS 0x00000200L
+#define VDM_PE_MASK 0x80000000L
+
+typedef enum _VdmEventClass {
+ VdmIO,
+ VdmStringIO,
+ VdmMemAccess,
+ VdmIntAck,
+ VdmBop,
+ VdmError,
+ VdmIrq13
+} VDMEVENTCLASS, *PVDMEVENTCLASS;
+
+typedef struct _VdmIoInfo {
+ USHORT PortNumber;
+ USHORT Size;
+ BOOLEAN Read;
+} VDMIOINFO, *PVDMIOINFO;
+
+typedef struct _VdmStringIoInfo {
+ USHORT PortNumber;
+ USHORT Size;
+ BOOLEAN Read;
+ ULONG Count;
+ ULONG Address;
+} VDMSTRINGIOINFO, *PVDMSTRINGIOINFO;
+
+typedef ULONG VDMBOPINFO;
+typedef NTSTATUS VDMERRORINFO;
+
+typedef struct _VdmEventInfo {
+ ULONG Size;
+ VDMEVENTCLASS Event;
+ ULONG InstructionSize;
+ union {
+ VDMIOINFO IoInfo;
+ VDMSTRINGIOINFO StringIoInfo;
+ VDMBOPINFO BopNumber;
+ VDMERRORINFO ErrorStatus;
+ };
+} VDMEVENTINFO, *PVDMEVENTINFO;
+
+typedef struct _Vdm_InterruptHandler {
+ USHORT CsSelector;
+ ULONG Eip;
+ USHORT SsSelector;
+ ULONG Esp;
+} VDM_INTERRUPTHANDLER, *PVDM_INTERRUPTHANDLER;
+
+typedef struct _Vdm_Tib {
+ ULONG Size;
+ ULONG Flags;
+ VDM_INTERRUPTHANDLER VdmInterruptHandlers[255];
+ CONTEXT MonitorContext;
+ CONTEXT VdmContext;
+ VDMEVENTINFO EventInfo;
+} VDM_TIB, *PVDM_TIB;
+
+NTSTATUS
+NtStartVdmExecution(
+ );
+
+// Flags that don't belong here
+
+#define SEL_TYPE_READ 0x00000001
+#define SEL_TYPE_WRITE 0x00000002
+#define SEL_TYPE_EXECUTE 0x00000004
+#define SEL_TYPE_BIG 0x00000008
+#define SEL_TYPE_ED 0x00000010
+#define SEL_TYPE_2GIG 0x00000020
diff --git a/private/ntos/ke/kernldat.c b/private/ntos/ke/kernldat.c
new file mode 100644
index 000000000..4925398d6
--- /dev/null
+++ b/private/ntos/ke/kernldat.c
@@ -0,0 +1,623 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ kernldat.c
+
+Abstract:
+
+ This module contains the declaration and allocation of kernel data
+ structures.
+
+Author:
+
+ David N. Cutler (davec) 12-Mar-1989
+
+Revision History:
+
+--*/
+#include "ki.h"
+
+//
+// The following data is read/write data that is grouped together for
+// performance. The layout of this data is important and must not be
+// changed.
+//
+// KiDispatcherReadyListHead - This is an array of type list entry. The
+// elements of the array are indexed by priority. Each element is a list
+// head for a set of threads that are in a ready state for the respective
+// priority. This array is used by the find next thread code to speed up
+// search for a ready thread when a thread becomes unrunnable. See also
+// KiReadySummary.
+//
+
+LIST_ENTRY KiDispatcherReadyListHead[MAXIMUM_PRIORITY];
+
+//
+// KiIdleSummary - This is the set of processors that are idle. It is used by
+// the ready thread code to speed up the search for a thread to preempt
+// when a thread becomes runnable.
+//
+
+KAFFINITY KiIdleSummary = 0;
+
+//
+// KiReadySummary - This is the set of dispatcher ready queues that are not
+// empty. A member is set in this set for each priority that has one or
+// more entries in its respective dispatcher ready queues.
+//
+
+ULONG KiReadySummary = 0;
+
+//
+// KiTimerTableListHead - This is a array of list heads that anchor the
+// individual timer lists.
+//
+
+LIST_ENTRY KiTimerTableListHead[TIMER_TABLE_SIZE];
+
+//
+// KiSwapContextNotifyRoutine - This is the address of a callout routine
+// which is called at each context switch if the address is not NULL.
+//
+
+PSWAP_CONTEXT_NOTIFY_ROUTINE KiSwapContextNotifyRoutine;
+
+//
+// KiThreadSelectNotifyRoutine - This is the address of a callout routine
+// which is called when a thread is being selected for execution if
+// the address is not NULL.
+//
+
+PTHREAD_SELECT_NOTIFY_ROUTINE KiThreadSelectNotifyRoutine;
+
+//
+// KiTimeUpdateNotifyRoutine - This is the address of a callout routine
+// which is called when the runtime for a thread is updated if the
+// address is not NULL.
+//
+
+PTIME_UPDATE_NOTIFY_ROUTINE KiTimeUpdateNotifyRoutine;
+
+//
+// Public kernel data declaration and allocation.
+//
+// KeActiveProcessors - This is the set of processors that active in the
+// system.
+//
+
+KAFFINITY KeActiveProcessors = 0;
+
+//
+// KeBootTime - This is the absolute time when the system was booted.
+//
+
+LARGE_INTEGER KeBootTime;
+
+//
+// KeBugCheckCallbackListHead - This is the list head for registered
+// bug check callback routines.
+//
+
+LIST_ENTRY KeBugCheckCallbackListHead;
+
+//
+// KeBugCheckCallbackLock - This is the spin lock that guards the bug
+// check callback list.
+//
+
+KSPIN_LOCK KeBugCheckCallbackLock;
+
+//
+// KeDcacheFlushCount - This is the number of data cache flushes that have
+// been performed since the system was booted.
+//
+
+ULONG KeDcacheFlushCount = 0;
+
+//
+// KeIcacheFlushCount - This is the number of instruction cache flushes that
+// have been performed since the system was booted.
+//
+
+ULONG KeIcacheFlushCount = 0;
+
+//
+// KeGdiFlushUserBatch - This is the address of the GDI user batch flush
+// routine which is initialized when the win32k subsystem is loaded.
+//
+
+PGDI_BATCHFLUSH_ROUTINE KeGdiFlushUserBatch;
+
+//
+// KeLoaderBlock - This is a pointer to the loader parameter block which is
+// constructed by the OS Loader.
+//
+
+PLOADER_PARAMETER_BLOCK KeLoaderBlock = NULL;
+
+//
+// KeMinimumIncrement - This is the minimum time between clock interrupts
+// in 100ns units that is supported by the host HAL.
+//
+
+ULONG KeMinimumIncrement;
+
+//
+// KeNumberProcessors - This is the number of processors in the configuration.
+// If is used by the ready thread and spin lock code to determine if a
+// faster algorithm can be used for the case of a single processor system.
+// The value of this variable is set when processors are initialized.
+//
+
+CCHAR KeNumberProcessors = 0;
+
+//
+// KeNumberProcessIds - This is a MIPS specific value and defines the number
+// of process id tags in the host TB.
+//
+
+#if defined(_MIPS_)
+
+ULONG KeNumberProcessIds;
+
+#endif
+
+//
+// KeNumberTbEntries - This is a MIPS specific value and defines the number
+// of TB entries in the host TB.
+//
+
+#if defined(_MIPS_)
+
+ULONG KeNumberTbEntries;
+
+#endif
+
+//
+// KeRegisteredProcessors - This is the maxumum number of processors
+// which should utilized by the system.
+//
+
+#if !defined(NT_UP)
+
+#if DBG
+
+ULONG KeRegisteredProcessors = 4;
+ULONG KeLicensedProcessors;
+
+#else
+
+ULONG KeRegisteredProcessors = 2;
+ULONG KeLicensedProcessors;
+
+#endif
+
+#endif
+
+//
+// KeProcessorArchitecture - Architecture of all processors present in system.
+// See PROCESSOR_ARCHITECTURE_ defines in ntexapi.h
+//
+
+USHORT KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_UNKNOWN;
+
+//
+// KeProcessorLevel - Architectural specific processor level of all processors
+// present in system.
+
+USHORT KeProcessorLevel = 0;
+
+//
+// KeProcessorRevision - Architectural specific processor revision number that is
+// the least common denominator of all processors present in system.
+//
+
+USHORT KeProcessorRevision = 0;
+
+//
+// KeFeatureBits - Architectural specific processor features present
+// on all processors.
+//
+
+ULONG KeFeatureBits = 0;
+
+//
+// KeServiceDescriptorTable - This is a table of descriptors for system
+// service providers. Each entry in the table describes the base
+// address of the dispatch table and the number of services provided.
+//
+
+KSERVICE_TABLE_DESCRIPTOR KeServiceDescriptorTable[NUMBER_SERVICE_TABLES];
+KSERVICE_TABLE_DESCRIPTOR KeServiceDescriptorTableShadow[NUMBER_SERVICE_TABLES];
+
+//
+// KeThreadSwitchCounters - These counters record the number of times a
+// thread can be scheduled on the current processor, any processor,
+// or the last processor it ran on.
+//
+
+KTHREAD_SWITCH_COUNTERS KeThreadSwitchCounters;
+
+//
+// KeTimeIncrement - This is the nominal number of 100ns units that are to
+// be added to the system time at each interval timer interupt. This
+// value is set by the HAL and is used to compute the dure time for
+// timer table entries.
+//
+
+ULONG KeTimeIncrement;
+
+//
+// KeTimeSynchronization - This variable controls whether time synchronization
+// is performed using the realtime clock (TRUE) or whether it is under the
+// control of a service (FALSE).
+//
+
+BOOLEAN KeTimeSynchronization = TRUE;
+
+//
+// KeUserApcDispatcher - This is the address of the user mode APC dispatch
+// code. This address is looked up in NTDLL.DLL during initialization
+// of the system.
+//
+
+ULONG KeUserApcDispatcher;
+
+//
+// KeUserCallbackDispatcher - This is the address of the user mode callback
+// dispatch code. This address is looked up in NTDLL.DLL during
+// initialization of the system.
+//
+
+ULONG KeUserCallbackDispatcher;
+
+//
+// KeUserExceptionDispatcher - This is the address of the user mode exception
+// dispatch code. This address is looked up in NTDLL.DLL during system
+// initialization.
+//
+
+ULONG KeUserExceptionDispatcher;
+
+//
+// KeRaiseUserExceptionDispatcher - This is the address of the raise user
+// mode exception dispatch code. This address is looked up in NTDLL.DLL
+// during system initialization.
+//
+
+ULONG KeRaiseUserExceptionDispatcher;
+
+//
+// Private kernel data declaration and allocation.
+//
+// KiBugCodeMessages - Address of where the BugCode messages can be found.
+//
+
+#if DEVL
+
+PMESSAGE_RESOURCE_DATA KiBugCodeMessages = NULL;
+
+#endif
+
+//
+// KiDmaIoCoherency - This determines whether the host platform supports
+// coherent DMA I/O.
+//
+
+ULONG KiDmaIoCoherency;
+
+//
+// KiMaximumSearchCount - this is the maximum number of timers entries that
+// have had to be examined to insert in the timer tree.
+//
+
+ULONG KiMaximumSearchCount = 0;
+
+//
+// KiDebugRoutine - This is the address of the kernel debugger. Initially
+// this is filled with the address of a routine that just returns. If
+// the system debugger is present in the system, then it sets this
+// location to the address of the systemn debugger's routine.
+//
+
+PKDEBUG_ROUTINE KiDebugRoutine;
+
+//
+// KiDebugSwitchRoutine - This is the address of the kernel debuggers
+// processor switch routine. This is used on an MP system to
+// switch host processors while debugging.
+//
+
+PKDEBUG_SWITCH_ROUTINE KiDebugSwitchRoutine;
+
+//
+// KiDispatcherLock - This is the spin lock that guards the dispatcher
+// database.
+//
+
+extern KSPIN_LOCK KiDispatcherLock;
+
+CCHAR KiFindFirstSetRight[256] = {
+ 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
+
+CCHAR KiFindFirstSetLeft[256] = {
+ 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
+
+//
+// KiFreezeExecutionLock - This is the spin lock that guards the freezing
+// of execution.
+//
+
+extern KSPIN_LOCK KiFreezeExecutionLock;
+
+//
+// KiFreezeLockBackup - For debug builds only. Allows kernel debugger to
+// be entered even FreezeExecutionLock is jammed.
+//
+
+extern KSPIN_LOCK KiFreezeLockBackup;
+
+//
+// KiFreezeFlag - For debug builds only. Flags to track and signal non-
+// normal freezelock conditions.
+//
+
+ULONG KiFreezeFlag;
+
+//
+// KiSuspenState - Flag to track suspend/resume state of processors
+//
+
+volatile ULONG KiSuspendState;
+
+//
+// KiFindLeftNibbleBitTable - This a table that is used to find the left most bit in
+// a 4-bit nibble.
+//
+
+UCHAR KiFindLeftNibbleBitTable[] = {0, 0, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3};
+
+//
+// KiProcessorBlock - This is an array of pointers to processor control blocks.
+// The elements of the array are indexed by processor number. Each element
+// is a pointer to the processor control block for one of the processors
+// in the configuration. This array is used by various sections of code
+// that need to effect the execution of another processor.
+//
+
+PKPRCB KiProcessorBlock[MAXIMUM_PROCESSORS];
+
+//
+// KiSwapEvent - This is the event that is used to wake up the balance set
+// thread to inswap processes, outswap processes, and to inswap kernel
+// stacks.
+//
+
+KEVENT KiSwapEvent;
+
+//
+// KiProcessInSwapListHead - This is the list of processes that are waiting
+// to be inswapped.
+//
+
+LIST_ENTRY KiProcessInSwapListHead;
+
+//
+// KiProcessOutSwapListHead - This is the list of processes that are waiting
+// to be outswapped.
+//
+
+LIST_ENTRY KiProcessOutSwapListHead;
+
+//
+// KiStackInSwapListHead - This is the list of threads that are waiting
+// to get their stack inswapped before they can run. Threads are
+// inserted in this list in ready thread and removed by the balance
+// set thread.
+//
+
+LIST_ENTRY KiStackInSwapListHead;
+
+//
+// KiProfileSourceListHead - The list of profile sources that are currently
+// active.
+//
+
+LIST_ENTRY KiProfileSourceListHead;
+
+//
+// KiProfileAlignmentFixup - Indicates whether alignment fixup profiling
+// is active.
+//
+
+BOOLEAN KiProfileAlignmentFixup;
+
+//
+// KiProfileAlignmentFixupInterval - Indicates the current alignment fixup
+// profiling interval.
+//
+
+ULONG KiProfileAlignmentFixupInterval;
+
+//
+// KiProfileAlignmentFixupCount - Indicates the current alignment fixup
+// count.
+//
+
+ULONG KiProfileAlignmentFixupCount;
+
+//
+// KiProfileInterval - The profile interval in 100ns units.
+//
+
+ULONG KiProfileInterval = DEFAULT_PROFILE_INTERVAL;
+
+//
+// KiProfileListHead - This is the list head for the profile list.
+//
+
+LIST_ENTRY KiProfileListHead;
+
+//
+// KiProfileLock - This is the spin lock that guards the profile list.
+//
+
+extern KSPIN_LOCK KiProfileLock;
+
+//
+// KiTimerExpireDpc - This is the Deferred Procedure Call (DPC) object that
+// is used to process the timer queue when a timer has expired.
+//
+
+KDPC KiTimerExpireDpc;
+
+//
+// KiTimeIncrementReciprocal - This is the reciprocal fraction of the time
+// increment value that is specified by the HAL when the system is
+// booted.
+//
+
+LARGE_INTEGER KiTimeIncrementReciprocal;
+
+//
+// KiTimeIncrementShiftCount - This is the shift count that corresponds to
+// the time increment reciprocal value.
+//
+
+CCHAR KiTimeIncrementShiftCount;
+
+//
+// KiWaitInListHead - This is a list of threads that are waiting with a
+// resident kernel stack.
+//
+
+LIST_ENTRY KiWaitInListHead;
+
+//
+// KiWaitOutListHead - This is a list of threads that are either waiting
+// with a kernel stack that is nonresident or are not elligible to
+// have their stack swapped.
+//
+
+LIST_ENTRY KiWaitOutListHead;
+
+//
+// Private kernel data declaration and allocation.
+//
+//
+// KiIpiCounts - Instrumentation counters for IPI requests.
+// Each processor has it's own set. Intstrumentation build only.
+//
+
+#if NT_INST
+
+KIPI_COUNTS KiIpiCounts[MAXIMUM_PROCESSORS];
+
+#endif // NT_INST
+
+//
+// KiMasterPid - This is the master PID that is used to assign PID's to
+// processes.
+//
+
+#if defined(_PPC_)
+
+ULONG KiMasterPid;
+
+//
+// KiMasterSequence - This is the master sequence number that is used to
+// assign PID's to processes.
+//
+
+ULONG KiMasterSequence = 1;
+
+//
+// KiProcessIdWrapLock - This is the spin lock that is used to provide
+// mutual exclusion between the TB flush routines and the process
+// swap code when PID roll over occurs.
+//
+
+KSPIN_LOCK KiProcessIdWrapLock = 0;
+
+#endif
+
+//
+// KxUnexpectedInterrupt - This is the interrupt object that is used to
+// populate the interrupt vector table for interrupt that are not
+// connected to any interrupt.
+//
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+
+KINTERRUPT KxUnexpectedInterrupt;
+
+#endif
+
+//
+// Performance data declaration and allocation.
+//
+// KiFlushSingleCallData - This is the call performance data for the kernel
+// flush single TB function.
+//
+
+#if defined(_COLLECT_FLUSH_SINGLE_CALLDATA_)
+
+CALL_PERFORMANCE_DATA KiFlushSingleCallData;
+
+#endif
+
+//
+// KiSetEventCallData - This is the call performance data for the kernel
+// set event function.
+//
+
+#if defined(_COLLECT_SET_EVENT_CALLDATA_)
+
+CALL_PERFORMANCE_DATA KiSetEventCallData;
+
+#endif
+
+//
+// KiWaitSingleCallData - This is the call performance data for the kernel
+// wait for single object function.
+//
+
+#if defined(_COLLECT_WAIT_SINGLE_CALLDATA_)
+
+CALL_PERFORMANCE_DATA KiWaitSingleCallData;
+
+#endif
diff --git a/private/ntos/ke/ki.h b/private/ntos/ke/ki.h
new file mode 100644
index 000000000..562d0061c
--- /dev/null
+++ b/private/ntos/ke/ki.h
@@ -0,0 +1,1128 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ ki.h
+
+Abstract:
+
+ This module contains the private (internal) header file for the
+ kernel.
+
+Author:
+
+ David N. Cutler (davec) 28-Feb-1989
+
+Revision History:
+
+--*/
+
+#ifndef _KI_
+#define _KI_
+#include "ntos.h"
+#include "stdio.h"
+#include "stdlib.h"
+#include "zwapi.h"
+
+//
+// Private (internal) constant definitions.
+//
+// Priority increment value definitions
+//
+
+#define ALERT_INCREMENT 2 // Alerted unwait priority increment
+#define BALANCE_INCREMENT 10 // Balance set priority increment
+#define RESUME_INCREMENT 0 // Resume thread priority increment
+#define TIMER_EXPIRE_INCREMENT 0 // Timer expiration priority increment
+
+//
+// Define NIL pointer value.
+//
+
+#define NIL (PVOID)NULL // Null pointer to void
+
+//
+// Define macros which are used in the kernel only
+//
+// Clear member in set
+//
+
+#define ClearMember(Member, Set) \
+ Set = Set & (~(1 << (Member)))
+
+//
+// Set member in set
+//
+
+#define SetMember(Member, Set) \
+ Set = Set | (1 << (Member))
+
+#define FindFirstSetLeftMember(Set, Member) { \
+ ULONG _Bit; \
+ ULONG _Mask; \
+ ULONG _Offset = 16; \
+ if ((_Mask = Set >> 16) == 0) { \
+ _Offset = 0; \
+ _Mask = Set; \
+ } \
+ if (_Mask >> 8) { \
+ _Offset += 8; \
+ } \
+ if ((_Bit = Set >> _Offset) & 0xf0) { \
+ _Bit >>= 4; \
+ _Offset += 4; \
+ } \
+ *(Member) = KiFindLeftNibbleBitTable[_Bit] + _Offset; \
+}
+
+//
+// Lock and unlock APC queue lock.
+//
+
+#if defined(NT_UP)
+#define KiLockApcQueue(Thread, OldIrql) \
+ *(OldIrql) = KeRaiseIrqlToSynchLevel()
+#else
+#define KiLockApcQueue(Thread, OldIrql) \
+ *(OldIrql) = KeAcquireSpinLockRaiseToSynch(&(Thread)->ApcQueueLock)
+#endif
+
+#if defined(NT_UP)
+#define KiUnlockApcQueue(Thread, OldIrql) KeLowerIrql((OldIrql))
+#else
+#define KiUnlockApcQueue(Thread, OldIrql) KeReleaseSpinLock(&(Thread)->ApcQueueLock, (OldIrql))
+#endif
+
+//
+// Lock and unlock context swap lock.
+//
+
+#if defined(NT_UP)
+#define KiLockContextSwap(OldIrql) \
+ *(OldIrql) = KeRaiseIrqlToSynchLevel()
+#else
+#define KiLockContextSwap(OldIrql) \
+ *(OldIrql) = KeAcquireSpinLockRaiseToSynch(&KiContextSwapLock)
+#endif
+
+#if defined(NT_UP)
+#define KiUnlockContextSwap(OldIrql) KeLowerIrql((OldIrql))
+#else
+#define KiUnlockContextSwap(OldIrql) KeReleaseSpinLock(&KiContextSwapLock, (OldIrql))
+#endif
+
+//
+// Lock and unlock dispatcher database lock.
+//
+
+#if defined(NT_UP)
+#define KiLockDispatcherDatabase(OldIrql) \
+ *(OldIrql) = KeRaiseIrqlToSynchLevel()
+#else
+#define KiLockDispatcherDatabase(OldIrql) \
+ *(OldIrql) = KeAcquireSpinLockRaiseToSynch(&KiDispatcherLock)
+#endif
+
+VOID
+FASTCALL
+KiUnlockDispatcherDatabase (
+ IN KIRQL OldIrql
+ );
+
+
+// VOID
+// KiBoostPriorityThread (
+// IN PKTHREAD Thread,
+// IN KPRIORITY Increment
+// )
+//
+//*++
+//
+// Routine Description:
+//
+// This function boosts the priority of the specified thread using
+// the same algorithm used when a thread gets a boost from a wait
+// operation.
+//
+// Arguments:
+//
+// Thread - Supplies a pointer to a dispatcher object of type thread.
+//
+// Increment - Supplies the priority increment that is to be applied to
+// the thread's priority.
+//
+// Return Value:
+//
+// None.
+//
+//--*
+
+#define KiBoostPriorityThread(Thread, Increment) { \
+ KPRIORITY NewPriority; \
+ PKPROCESS Process; \
+ \
+ if ((Thread)->Priority < LOW_REALTIME_PRIORITY) { \
+ if ((Thread)->PriorityDecrement == 0) { \
+ NewPriority = (Thread)->BasePriority + (Increment); \
+ if (NewPriority > (Thread)->Priority) { \
+ if (NewPriority >= LOW_REALTIME_PRIORITY) { \
+ NewPriority = LOW_REALTIME_PRIORITY - 1; \
+ } \
+ \
+ Process = (Thread)->ApcState.Process; \
+ (Thread)->Quantum = Process->ThreadQuantum; \
+ KiSetPriorityThread((Thread), NewPriority); \
+ } \
+ } \
+ } \
+}
+
+// VOID
+// KiInsertWaitList (
+// IN KPROCESSOR_MODE WaitMode,
+// IN PKTHREAD Thread
+// )
+//
+//*++
+//
+// Routine Description:
+//
+// This function inserts the specified thread in the appropriate
+// wait list.
+//
+// Arguments:
+//
+// WaitMode - Supplies the processor mode of the wait operation.
+//
+// Thread - Supplies a pointer to a dispatcher object of type
+// thread.
+//
+// Return Value:
+//
+// None.
+//
+//--*
+
+#define KiInsertWaitList(_WaitMode, _Thread) { \
+ PLIST_ENTRY _ListHead; \
+ _ListHead = &KiWaitInListHead; \
+ if (((_WaitMode) == KernelMode) || \
+ ((_Thread)->EnableStackSwap == FALSE) || \
+ ((_Thread)->Priority >= (LOW_REALTIME_PRIORITY + 9))) { \
+ _ListHead = &KiWaitOutListHead; \
+ } \
+ InsertTailList(_ListHead, &(_Thread)->WaitListEntry); \
+}
+
+//
+// Private (internal) structure definitions.
+//
+// APC Parameter structure.
+//
+
+typedef struct _KAPC_RECORD {
+ PKNORMAL_ROUTINE NormalRoutine;
+ PVOID NormalContext;
+ PVOID SystemArgument1;
+ PVOID SystemArgument2;
+} KAPC_RECORD, *PKAPC_RECORD;
+
+//
+// Executive initialization.
+//
+
+VOID
+ExpInitializeExecutive (
+ IN ULONG Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ );
+
+//
+// Kernel executive object function defintions.
+//
+
+BOOLEAN
+KiChannelInitialization (
+ VOID
+ );
+
+VOID
+KiRundownChannel (
+ VOID
+ );
+
+//
+// Interprocessor interrupt function defintions.
+//
+// Define immediate interprocessor commands.
+//
+
+#define IPI_APC 1 // APC interrupt request
+#define IPI_DPC 2 // DPC interrupt request
+#define IPI_FREEZE 4 // freeze execution request
+#define IPI_PACKET_READY 8 // packet ready request
+
+//
+// Define interprocess interrupt types.
+//
+
+typedef ULONG KIPI_REQUEST;
+
+typedef
+ULONG
+(*PKIPI_BROADCAST_WORKER)(
+ IN ULONG Argument
+ );
+
+#if NT_INST
+
+#define IPI_INSTRUMENT_COUNT(a,b) KiIpiCounts[a].b++;
+
+#else
+
+#define IPI_INSTRUMENT_COUNT(a,b)
+
+#endif
+
+//
+// Define interprocessor interrupt function prototypes.
+//
+
+ULONG
+KiIpiGenericCall (
+ IN PKIPI_BROADCAST_WORKER BroadcastFunction,
+ IN ULONG Context
+ );
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+
+ULONG
+KiIpiProcessRequests (
+ VOID
+ );
+
+#endif
+
+VOID
+FASTCALL
+KiIpiSend (
+ IN KAFFINITY TargetProcessors,
+ IN KIPI_REQUEST Request
+ );
+
+VOID
+KiIpiSendPacket (
+ IN KAFFINITY TargetProcessors,
+ IN PKIPI_WORKER WorkerFunction,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+// begin_nthal
+
+BOOLEAN
+KiIpiServiceRoutine (
+ IN struct _KTRAP_FRAME *TrapFrame,
+ IN struct _KEXCEPTION_FRAME *ExceptionFrame
+ );
+
+// end_nthal
+
+VOID
+FASTCALL
+KiIpiSignalPacketDone (
+ IN PKIPI_CONTEXT SignalDone
+ );
+
+VOID
+KiIpiStallOnPacketTargets (
+ VOID
+ );
+
+//
+// Private (internal) function definitions.
+//
+
+VOID
+FASTCALL
+KiActivateWaiterQueue (
+ IN PRKQUEUE Queue
+ );
+
+VOID
+KiApcInterrupt (
+ VOID
+ );
+
+NTSTATUS
+KiCallUserMode (
+ IN PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ );
+
+VOID
+KiChainedDispatch (
+ VOID
+ );
+
+#if DBG
+
+VOID
+KiCheckTimerTable (
+ IN ULARGE_INTEGER SystemTime
+ );
+
+#endif
+
+LARGE_INTEGER
+KiComputeReciprocal (
+ IN LONG Divisor,
+ OUT PCCHAR Shift
+ );
+
+ULONG KiComputeTimerTableIndex (
+ IN LARGE_INTEGER Interval,
+ IN LARGE_INTEGER CurrentCount,
+ IN PRKTIMER Timer
+ );
+
+PLARGE_INTEGER
+FASTCALL
+KiComputeWaitInterval (
+ IN PRKTIMER Timer,
+ IN PLARGE_INTEGER OriginalTime,
+ IN OUT PLARGE_INTEGER NewTime
+ );
+
+NTSTATUS
+KiContinue (
+ IN PCONTEXT ContextRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+NTSTATUS
+KiContinueClientWait (
+ IN PVOID ClientEvent,
+ IN ULONG WaitReason,
+ IN ULONG WaitMode
+ );
+
+VOID
+KiDeliverApc (
+ IN KPROCESSOR_MODE PreviousMode,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ );
+
+BOOLEAN
+KiDisableInterrupts (
+ VOID
+ );
+
+VOID
+KiRestoreInterrupts (
+ IN BOOLEAN Enable
+ );
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ );
+
+KCONTINUE_STATUS
+KiSetDebugProcessor (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN KPROCESSOR_MODE PreviousMode
+ );
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ );
+
+VOID
+KiDispatchInterrupt (
+ VOID
+ );
+
+PKTHREAD
+FASTCALL
+KiFindReadyThread (
+ IN ULONG Processor,
+ KPRIORITY LowPriority
+ );
+
+VOID
+KiFloatingDispatch (
+ VOID
+ );
+
+VOID
+FASTCALL
+KiFlushSingleTb (
+ IN BOOLEAN Invalid,
+ IN PVOID Virtual
+ );
+
+VOID
+KiFlushSingleTbByPid (
+ IN BOOLEAN Invalid,
+ IN PVOID Virtual,
+ IN ULONG Pid
+ );
+
+VOID
+KiFlushMultipleTb (
+ IN BOOLEAN Invalid,
+ IN PVOID *Virtual,
+ IN ULONG Count
+ );
+
+VOID
+KiFlushMultipleTbByPid (
+ IN BOOLEAN Invalid,
+ IN PVOID *Virtual,
+ IN ULONG Count,
+ IN ULONG Pid
+ );
+
+#if defined(_MIPS_)
+
+VOID
+KiReadEntryTb (
+ IN ULONG Index,
+ IN PTB_ENTRY TbEntry
+ );
+
+ULONG
+KiProbeEntryTb (
+ IN PVOID VirtualAddress
+ );
+
+#endif
+
+PULONG
+KiGetUserModeStackAddress (
+ VOID
+ );
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextFrame OPTIONAL
+ );
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ );
+
+VOID
+KiInitSystem (
+ VOID
+ );
+
+BOOLEAN
+KiInitMachineDependent (
+ VOID
+ );
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ );
+
+LONG
+FASTCALL
+KiInsertQueue (
+ IN PRKQUEUE Queue,
+ IN PLIST_ENTRY Entry,
+ IN BOOLEAN Head
+ );
+
+BOOLEAN
+FASTCALL
+KiInsertQueueApc (
+ IN PKAPC Apc,
+ IN KPRIORITY Increment
+ );
+
+LOGICAL
+FASTCALL
+KiInsertTreeTimer (
+ IN PRKTIMER Timer,
+ IN LARGE_INTEGER Interval
+ );
+
+VOID
+KiInterruptDispatch (
+ VOID
+ );
+
+VOID
+KiInterruptDispatchRaise (
+ IN PKINTERRUPT Interrupt
+ );
+
+VOID
+KiInterruptDispatchSame (
+ IN PKINTERRUPT Interrupt
+ );
+#if defined(i386)
+
+VOID
+KiInitializePcr (
+ IN ULONG Processor,
+ IN PKPCR Pcr,
+ IN PKIDTENTRY Idt,
+ IN PKGDTENTRY Gdt,
+ IN PKTSS Tss,
+ IN PKTHREAD Thread
+ );
+
+VOID
+KiFlushNPXState (
+ VOID
+ );
+
+VOID
+Ke386ConfigureCyrixProcessor (
+ VOID
+ );
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ );
+
+VOID
+KiSetHardwareTrigger (
+ VOID
+ );
+
+#ifdef DBGMP
+VOID
+KiPollDebugger (
+ VOID
+ );
+#endif
+
+VOID
+FASTCALL
+KiIpiSignalPacketDoneAndStall (
+ IN PKIPI_CONTEXT Signaldone,
+ IN ULONG volatile *ReverseStall
+ );
+
+#endif
+
+
+KIRQL
+KiLockDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue
+ );
+
+VOID
+KiPassiveRelease (
+ VOID
+ );
+
+PRKTHREAD
+KiQuantumEnd (
+ VOID
+ );
+
+NTSTATUS
+KiRaiseException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PCONTEXT ContextRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN BOOLEAN FirstChance
+ );
+
+VOID
+FASTCALL
+KiReadyThread (
+ IN PRKTHREAD Thread
+ );
+
+LOGICAL
+FASTCALL
+KiReinsertTreeTimer (
+ IN PRKTIMER Timer,
+ IN ULARGE_INTEGER DueTime
+ );
+
+#if DBG
+
+#define KiRemoveTreeTimer(Timer) \
+ (Timer)->Header.Inserted = FALSE; \
+ RemoveEntryList(&(Timer)->TimerListEntry); \
+ (Timer)->TimerListEntry.Flink = NULL; \
+ (Timer)->TimerListEntry.Blink = NULL
+
+#else
+
+#define KiRemoveTreeTimer(Timer) \
+ (Timer)->Header.Inserted = FALSE; \
+ RemoveEntryList(&(Timer)->TimerListEntry)
+
+#endif
+
+#if defined(NT_UP)
+
+#define KiRequestApcInterrupt(Processor) KiRequestSoftwareInterrupt(APC_LEVEL)
+
+#else
+
+#define KiRequestApcInterrupt(Processor) \
+ if (KeGetCurrentPrcb()->Number == (CCHAR)Processor) { \
+ KiRequestSoftwareInterrupt(APC_LEVEL); \
+ } else { \
+ KiIpiSend((KAFFINITY)(1 << Processor), IPI_APC); \
+ }
+
+#endif
+
+#if defined(NT_UP)
+
+#define KiRequestDispatchInterrupt(Processor)
+
+#else
+
+#define KiRequestDispatchInterrupt(Processor) \
+ if (KeGetCurrentPrcb()->Number != (CCHAR)Processor) { \
+ KiIpiSend((KAFFINITY)(1 << Processor), IPI_DPC); \
+ }
+
+#endif
+
+PRKTHREAD
+FASTCALL
+KiSelectNextThread (
+ IN PRKTHREAD Thread
+ );
+
+VOID
+FASTCALL
+KiSetPriorityThread (
+ IN PRKTHREAD Thread,
+ IN KPRIORITY Priority
+ );
+
+VOID
+KiSetSystemTime (
+ IN PLARGE_INTEGER NewTime,
+ OUT PLARGE_INTEGER OldTime
+ );
+
+VOID
+KiSuspendNop (
+ IN struct _KAPC *Apc,
+ IN OUT PKNORMAL_ROUTINE *NormalRoutine,
+ IN OUT PVOID *NormalContext,
+ IN OUT PVOID *SystemArgument1,
+ IN OUT PVOID *SystemArgument2
+ );
+
+VOID
+KiSuspendThread (
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ );
+
+NTSTATUS
+FASTCALL
+KiSwapContext (
+ IN PRKTHREAD Thread,
+ IN BOOLEAN Ready
+ );
+
+BOOLEAN
+KiSwapProcess (
+ IN PKPROCESS NewProcess,
+ IN PKPROCESS OldProcess
+ );
+
+NTSTATUS
+FASTCALL
+KiSwapThread (
+ VOID
+ );
+
+NTSTATUS
+KiSwitchToThread (
+ IN PKTHREAD TargetThread,
+ IN ULONG WaitReason,
+ IN ULONG WaitMode,
+ IN PVOID WaitObject
+ );
+
+VOID
+KiThreadStartup (
+ IN PVOID StartContext
+ );
+
+VOID
+KiTimerExpiration (
+ IN PKDPC Dpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ );
+
+VOID
+FASTCALL
+KiTimerListExpire (
+ IN PLIST_ENTRY ExpiredListHead,
+ IN KIRQL OldIrql
+ );
+
+VOID
+KiUnexpectedInterrupt (
+ VOID
+ );
+
+VOID
+KiUnlockDeviceQueue (
+ IN PKDEVICE_QUEUE DeviceQueue,
+ IN KIRQL OldIrql
+ );
+
+VOID
+FASTCALL
+KiUnwaitThread (
+ IN PRKTHREAD Thread,
+ IN NTSTATUS WaitStatus,
+ IN KPRIORITY Increment
+ );
+
+VOID
+KiUserApcDispatcher (
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2,
+ IN PKNORMAL_ROUTINE NormalRoutine
+ );
+
+VOID
+KiUserExceptionDispatcher (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PCONTEXT ContextFrame
+ );
+
+VOID
+FASTCALL
+KiWaitSatisfyAll (
+ IN PRKWAIT_BLOCK WaitBlock
+ );
+
+//
+// VOID
+// FASTCALL
+// KiWaitSatisfyAny (
+// IN PKMUTANT Object,
+// IN PKTHREAD Thread
+// )
+//
+//
+// Routine Description:
+//
+// This function satisfies a wait for any type of object and performs
+// any side effects that are necessary.
+//
+// Arguments:
+//
+// Object - Supplies a pointer to a dispatcher object.
+//
+// Thread - Supplies a pointer to a dispatcher object of type thread.
+//
+// Return Value:
+//
+// None.
+//
+
+#define KiWaitSatisfyAny(_Object_, _Thread_) { \
+ if (((_Object_)->Header.Type & DISPATCHER_OBJECT_TYPE_MASK) == EventSynchronizationObject) { \
+ (_Object_)->Header.SignalState = 0; \
+ \
+ } else if ((_Object_)->Header.Type == SemaphoreObject) { \
+ (_Object_)->Header.SignalState -= 1; \
+ \
+ } else if ((_Object_)->Header.Type == MutantObject) { \
+ (_Object_)->Header.SignalState -= 1; \
+ if ((_Object_)->Header.SignalState == 0) { \
+ (_Thread_)->KernelApcDisable -= (_Object_)->ApcDisable; \
+ (_Object_)->OwnerThread = (_Thread_); \
+ if ((_Object_)->Abandoned == TRUE) { \
+ (_Object_)->Abandoned = FALSE; \
+ (_Thread_)->WaitStatus = STATUS_ABANDONED; \
+ } \
+ \
+ InsertHeadList((_Thread_)->MutantListHead.Blink, \
+ &(_Object_)->MutantListEntry); \
+ } \
+ } \
+}
+
+//
+// VOID
+// FASTCALL
+// KiWaitSatisfyMutant (
+// IN PKMUTANT Object,
+// IN PKTHREAD Thread
+// )
+//
+//
+// Routine Description:
+//
+// This function satisfies a wait for a mutant object.
+//
+// Arguments:
+//
+// Object - Supplies a pointer to a dispatcher object.
+//
+// Thread - Supplies a pointer to a dispatcher object of type thread.
+//
+// Return Value:
+//
+// None.
+//
+
+#define KiWaitSatisfyMutant(_Object_, _Thread_) { \
+ (_Object_)->Header.SignalState -= 1; \
+ if ((_Object_)->Header.SignalState == 0) { \
+ (_Thread_)->KernelApcDisable -= (_Object_)->ApcDisable; \
+ (_Object_)->OwnerThread = (_Thread_); \
+ if ((_Object_)->Abandoned == TRUE) { \
+ (_Object_)->Abandoned = FALSE; \
+ (_Thread_)->WaitStatus = STATUS_ABANDONED; \
+ } \
+ \
+ InsertHeadList((_Thread_)->MutantListHead.Blink, \
+ &(_Object_)->MutantListEntry); \
+ } \
+}
+
+//
+// VOID
+// FASTCALL
+// KiWaitSatisfyOther (
+// IN PKMUTANT Object
+// )
+//
+//
+// Routine Description:
+//
+// This function satisfies a wait for any type of object except a mutant
+// and performs any side effects that are necessary.
+//
+// Arguments:
+//
+// Object - Supplies a pointer to a dispatcher object.
+//
+// Return Value:
+//
+// None.
+//
+
+#define KiWaitSatisfyOther(_Object_) { \
+ if (((_Object_)->Header.Type & DISPATCHER_OBJECT_TYPE_MASK) == EventSynchronizationObject) { \
+ (_Object_)->Header.SignalState = 0; \
+ \
+ } else if ((_Object_)->Header.Type == SemaphoreObject) { \
+ (_Object_)->Header.SignalState -= 1; \
+ \
+ } \
+}
+
+VOID
+FASTCALL
+KiWaitTest (
+ IN PVOID Object,
+ IN KPRIORITY Increment
+ );
+
+VOID
+KiFreezeTargetExecution (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ );
+
+VOID
+KiSaveProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ );
+
+VOID
+KiSaveProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ );
+
+VOID
+KiRestoreProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ );
+
+VOID
+KiRestoreProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ );
+
+#if defined(_MIPS_) || defined(_ALPHA_)
+
+VOID
+KiSynchronizeProcessIds (
+ VOID
+ );
+
+#endif
+
+BOOLEAN
+KiTryToAcquireSpinLock (
+ IN PKSPIN_LOCK SpinLock
+ );
+
+#if defined(_ALPHA_)
+
+//
+// Prototypes for memory barrier instructions
+//
+
+VOID
+KiImb(
+ VOID
+ );
+
+VOID
+KiMb(
+ VOID
+ );
+
+#endif
+
+#endif // _KI_
+
+//
+// External references to private kernel data structures
+//
+
+#if DEVL
+extern PMESSAGE_RESOURCE_DATA KiBugCodeMessages;
+#endif
+
+extern ULONG KiDmaIoCoherency;
+extern ULONG KiMaximumDpcQueueDepth;
+extern ULONG KiMinimumDpcRate;
+extern ULONG KiAdjustDpcThreshold;
+extern KSPIN_LOCK KiContextSwapLock;
+extern PKDEBUG_ROUTINE KiDebugRoutine;
+extern PKDEBUG_SWITCH_ROUTINE KiDebugSwitchRoutine;
+extern KSPIN_LOCK KiDispatcherLock;
+extern LIST_ENTRY KiDispatcherReadyListHead[MAXIMUM_PRIORITY];
+extern CCHAR KiFindFirstSetLeft[256];
+extern CCHAR KiFindFirstSetRight[256];
+extern CALL_PERFORMANCE_DATA KiFlushSingleCallData;
+extern ULONG KiHardwareTrigger;
+extern KAFFINITY KiIdleSummary;
+extern UCHAR KiFindLeftNibbleBitTable[];
+extern KEVENT KiSwapEvent;
+extern LIST_ENTRY KiProcessInSwapListHead;
+extern LIST_ENTRY KiProcessOutSwapListHead;
+extern LIST_ENTRY KiStackInSwapListHead;
+extern LIST_ENTRY KiProfileSourceListHead;
+extern BOOLEAN KiProfileAlignmentFixup;
+extern ULONG KiProfileAlignmentFixupInterval;
+extern ULONG KiProfileAlignmentFixupCount;
+extern ULONG KiProfileInterval;
+extern LIST_ENTRY KiProfileListHead;
+extern KSPIN_LOCK KiProfileLock;
+extern ULONG KiReadySummary;
+extern UCHAR KiArgumentTable[];
+extern ULONG KiServiceLimit;
+extern ULONG KiServiceTable[];
+extern CALL_PERFORMANCE_DATA KiSetEventCallData;
+extern ULONG KiTickOffset;
+extern LARGE_INTEGER KiTimeIncrementReciprocal;
+extern CCHAR KiTimeIncrementShiftCount;
+extern LIST_ENTRY KiTimerTableListHead[TIMER_TABLE_SIZE];
+extern KAFFINITY KiTimeProcessor;
+extern KDPC KiTimerExpireDpc;
+extern KSPIN_LOCK KiFreezeExecutionLock;
+extern BOOLEAN KiSlavesStartExecution;
+extern PSWAP_CONTEXT_NOTIFY_ROUTINE KiSwapContextNotifyRoutine;
+extern PTHREAD_SELECT_NOTIFY_ROUTINE KiThreadSelectNotifyRoutine;
+extern PTIME_UPDATE_NOTIFY_ROUTINE KiTimeUpdateNotifyRoutine;
+extern LIST_ENTRY KiWaitInListHead;
+extern LIST_ENTRY KiWaitOutListHead;
+extern CALL_PERFORMANCE_DATA KiWaitSingleCallData;
+
+#if defined(_PPC_)
+
+extern ULONG KiMasterPid;
+extern ULONG KiMasterSequence;
+extern KSPIN_LOCK KiProcessIdWrapLock;
+
+#endif
+
+#if defined(i386)
+
+extern KIRQL KiProfileIrql;
+
+#endif
+
+#if defined(_MIPS_) || defined(_ALPHA_)
+
+extern ULONG KiSynchIrql;
+
+#endif
+
+#if defined(_MIPS_) || defined(_ALPHA_) || defined(_PPC_)
+
+extern KINTERRUPT KxUnexpectedInterrupt;
+
+#endif
+
+#if NT_INST
+
+extern KIPI_COUNTS KiIpiCounts[MAXIMUM_PROCESSORS];
+
+#endif
+
+extern KSPIN_LOCK KiFreezeLockBackup;
+extern ULONG KiFreezeFlag;
+extern volatile ULONG KiSuspendState;
+
+#if DBG
+
+extern ULONG KiMaximumSearchCount;
+
+#endif
diff --git a/private/ntos/ke/kiinit.c b/private/ntos/ke/kiinit.c
new file mode 100644
index 000000000..43c4f1eba
--- /dev/null
+++ b/private/ntos/ke/kiinit.c
@@ -0,0 +1,301 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+
+Module Name:
+
+ kiinit.c
+
+Abstract:
+
+ This module implements architecture independent kernel initialization.
+
+Author:
+
+ David N. Cutler 11-May-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Put all code for kernel initialization in the INIT section. It will be
+// deallocated by memory management when phase 1 initialization is completed.
+//
+
+#if defined(ALLOC_PRAGMA)
+
+#pragma alloc_text(INIT, KeInitSystem)
+#pragma alloc_text(INIT, KiInitSystem)
+#pragma alloc_text(INIT, KiComputeReciprocal)
+
+#endif
+
+BOOLEAN
+KeInitSystem (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes executive structures implemented by the
+ kernel.
+
+ N.B. This function is only called during phase 1 initialization.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ A value of TRUE is returned if initialization is successful. Otherwise,
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Initialized;
+
+ //
+ // Initialize the executive objects.
+ //
+
+#if 0
+
+ if ((Initialized = KiChannelInitialization()) == FALSE) {
+ KdPrint(("Kernel: Channel initialization failed\n"));
+ }
+
+#endif
+
+#if defined(i386)
+
+ //
+ // Perform platform dependent initialization
+ //
+
+ Initialized = KiInitMachineDependent();
+
+#endif
+
+
+ return Initialized;
+}
+
+VOID
+KiInitSystem (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes architecture independent kernel structures.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+
+ //
+ // Initialize dispatcher ready queue listheads.
+ //
+
+ for (Index = 0; Index < MAXIMUM_PRIORITY; Index += 1) {
+ InitializeListHead(&KiDispatcherReadyListHead[Index]);
+ }
+
+ //
+ // Initialize bug check callback listhead and spinlock.
+ //
+
+ InitializeListHead(&KeBugCheckCallbackListHead);
+ KeInitializeSpinLock(&KeBugCheckCallbackLock);
+
+ //
+ // Initialize the timer expiration DPC object.
+ //
+
+ KeInitializeDpc(&KiTimerExpireDpc,
+ (PKDEFERRED_ROUTINE)KiTimerExpiration, NIL);
+
+ //
+ // Initialize the profile listhead and profile locks
+ //
+
+ KeInitializeSpinLock(&KiProfileLock);
+ InitializeListHead(&KiProfileListHead);
+
+ //
+ // Initialize the active profile source listhead.
+ //
+
+ InitializeListHead(&KiProfileSourceListHead);
+
+ //
+ // Initialize the timer table, the timer completion listhead, and the
+ // timer completion DPC.
+ //
+
+ for (Index = 0; Index < TIMER_TABLE_SIZE; Index += 1) {
+ InitializeListHead(&KiTimerTableListHead[Index]);
+ }
+
+ //
+ // Initialize the swap event, the process inswap listhead, the
+ // process outswap listhead, the kernel stack inswap listhead,
+ // the wait in listhead, and the wait out listhead.
+ //
+
+ KeInitializeEvent(&KiSwapEvent,
+ SynchronizationEvent,
+ FALSE);
+
+ InitializeListHead(&KiProcessInSwapListHead);
+ InitializeListHead(&KiProcessOutSwapListHead);
+ InitializeListHead(&KiStackInSwapListHead);
+ InitializeListHead(&KiWaitInListHead);
+ InitializeListHead(&KiWaitOutListHead);
+
+ //
+ // Initialize the system service descriptor table.
+ //
+
+ KeServiceDescriptorTable[0].Base = &KiServiceTable[0];
+ KeServiceDescriptorTable[0].Count = NULL;
+ KeServiceDescriptorTable[0].Limit = KiServiceLimit;
+ KeServiceDescriptorTable[0].Number = &KiArgumentTable[0];
+ for (Index = 1; Index < NUMBER_SERVICE_TABLES; Index += 1) {
+ KeServiceDescriptorTable[Index].Limit = 0;
+ }
+
+ //
+ // Copy the system service descriptor table to the shadow table
+ // which is used to record the Win32 system services.
+ //
+
+ RtlCopyMemory(KeServiceDescriptorTableShadow,
+ KeServiceDescriptorTable,
+ sizeof(KeServiceDescriptorTable));
+
+ //
+ // Initialize call performance data structures.
+ //
+
+#if defined(_COLLECT_FLUSH_SINGLE_CALLDATA_)
+
+ ExInitializeCallData(&KiFlushSingleCallData);
+
+#endif
+
+#if defined(_COLLECT_SET_EVENT_CALLDATA_)
+
+ ExInitializeCallData(&KiSetEventCallData);
+
+#endif
+
+#if defined(_COLLECT_WAIT_SINGLE_CALLDATA_)
+
+ ExInitializeCallData(&KiWaitSingleCallData);
+
+#endif
+
+ return;
+}
+
+LARGE_INTEGER
+KiComputeReciprocal (
+ IN LONG Divisor,
+ OUT PCCHAR Shift
+ )
+
+/*++
+
+Routine Description:
+
+ This function computes the large integer reciprocal of the specified
+ value.
+
+Arguments:
+
+ Divisor - Supplies the value for which the large integer reciprocal is
+ computed.
+
+ Shift - Supplies a pointer to a variable that receives the computed
+ shift count.
+
+Return Value:
+
+ The large integer reciprocal is returned as the fucntion value.
+
+--*/
+
+{
+
+ LARGE_INTEGER Fraction;
+ LONG NumberBits;
+ LONG Remainder;
+
+ //
+ // Compute the large integer reciprocal of the specified value.
+ //
+
+ NumberBits = 0;
+ Remainder = 1;
+ Fraction.LowPart = 0;
+ Fraction.HighPart = 0;
+ while (Fraction.HighPart >= 0) {
+ NumberBits += 1;
+ Fraction.HighPart = (Fraction.HighPart << 1) | (Fraction.LowPart >> 31);
+ Fraction.LowPart <<= 1;
+ Remainder <<= 1;
+ if (Remainder >= Divisor) {
+ Remainder -= Divisor;
+ Fraction.LowPart |= 1;
+ }
+ }
+
+ if (Remainder != 0) {
+ if ((Fraction.LowPart == 0xffffffff) && (Fraction.HighPart == 0xffffffff)) {
+ Fraction.LowPart = 0;
+ Fraction.HighPart = 0x80000000;
+ NumberBits -= 1;
+
+ } else {
+ if (Fraction.LowPart == 0xffffffff) {
+ Fraction.LowPart = 0;
+ Fraction.HighPart += 1;
+
+ } else {
+ Fraction.LowPart += 1;
+ }
+ }
+ }
+
+ //
+ // Compute the shift count value and return the reciprocal fraction.
+ //
+
+ *Shift = NumberBits - 64;
+ return Fraction;
+}
diff --git a/private/ntos/ke/mips/alignem.c b/private/ntos/ke/mips/alignem.c
new file mode 100644
index 000000000..19af1d6e2
--- /dev/null
+++ b/private/ntos/ke/mips/alignem.c
@@ -0,0 +1,375 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ alignem.c
+
+Abstract:
+
+ This module implement the code necessary to emulate unaliged data
+ references.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+BOOLEAN
+KiEmulateReference (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate an unaligned data reference to an
+ address in the user part of the address space.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the data reference is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG BranchAddress;
+ PUCHAR DataAddress;
+
+ union {
+ ULONGLONG Longlong;
+ ULONG Long;
+ USHORT Short;
+ } DataReference;
+
+ PUCHAR DataValue;
+ PVOID ExceptionAddress;
+ MIPS_INSTRUCTION FaultInstruction;
+ ULONG Rt;
+ KIRQL OldIrql;
+
+ //
+ // If alignment profiling is active, then call the proper profile
+ // routine.
+ //
+
+ if (KiProfileAlignmentFixup) {
+ KiProfileAlignmentFixupCount += 1;
+ if (KiProfileAlignmentFixupCount >= KiProfileAlignmentFixupInterval) {
+ KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
+ KiProfileAlignmentFixupCount = 0;
+ KeProfileInterruptWithSource(TrapFrame, ProfileAlignmentFixup);
+ KeLowerIrql(OldIrql);
+ }
+ }
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // unaligned reference causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // If the exception PC is equal to the fault instruction address
+ // plus four, then the misalignment exception occurred in the delay
+ // slot of a branch instruction and the continuation address must
+ // be computed by emulating the branch instruction. Note that it
+ // is possible for an exception to occur when the branch instruction
+ // is read from user memory.
+ //
+
+ if ((TrapFrame->Fir + 4) == (ULONG)ExceptionRecord->ExceptionAddress) {
+ BranchAddress = KiEmulateBranch(ExceptionFrame, TrapFrame);
+
+ } else {
+ BranchAddress = TrapFrame->Fir + 4;
+ }
+
+ //
+ // Compute the effective address of the reference and check to make
+ // sure it is within the user part of the address space. Alignment
+ // exceptions take precedence over memory management exceptions and
+ // the address could be a system address.
+ //
+
+ FaultInstruction.Long = *((PULONG)ExceptionRecord->ExceptionAddress);
+ DataAddress = (PUCHAR)KiGetRegisterValue64(FaultInstruction.i_format.Rs,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress = (PUCHAR)((LONG)DataAddress +
+ (LONG)FaultInstruction.i_format.Simmediate);
+
+ //
+ // The emulated data reference must be in user space and must be less
+ // than 16 types from the end of user space.
+ //
+
+ if ((ULONG)DataAddress < 0x7ffffff0) {
+
+ //
+ // Dispatch on the opcode value.
+ //
+
+ DataValue = (PUCHAR)&DataReference;
+ Rt = FaultInstruction.i_format.Rt;
+ switch (FaultInstruction.i_format.Opcode) {
+
+ //
+ // Load halfword integer.
+ //
+
+ case LH_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ KiSetRegisterValue64(Rt,
+ (SHORT)DataReference.Short,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load halfword unsigned integer.
+ //
+
+ case LHU_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ KiSetRegisterValue64(Rt,
+ DataReference.Short,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load word floating.
+ //
+
+ case LWC1_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue(Rt + 32,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load word integer.
+ //
+
+ case LW_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue64(Rt,
+ (LONG)DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load double integer.
+ //
+
+ case LD_OP:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ DataValue[4] = DataAddress[4];
+ DataValue[5] = DataAddress[5];
+ DataValue[6] = DataAddress[6];
+ DataValue[7] = DataAddress[7];
+ KiSetRegisterValue64(Rt,
+ DataReference.Longlong,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Load double floating.
+ //
+
+ case LDC1_OP:
+ Rt = (Rt & 0x1e) + 32;
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue(Rt,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataValue[0] = DataAddress[4];
+ DataValue[1] = DataAddress[5];
+ DataValue[2] = DataAddress[6];
+ DataValue[3] = DataAddress[7];
+ KiSetRegisterValue(Rt + 1,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+
+ break;
+
+ //
+ // Store halfword integer.
+ //
+
+ case SH_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ break;
+
+ //
+ // Store word floating.
+ //
+
+ case SWC1_OP:
+ DataReference.Long = KiGetRegisterValue(Rt + 32,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ break;
+
+ //
+ // Store word integer.
+ //
+
+ case SW_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ break;
+
+ //
+ // Store double integer.
+ //
+
+ case SD_OP:
+ DataReference.Longlong = KiGetRegisterValue64(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ DataAddress[4] = DataValue[4];
+ DataAddress[5] = DataValue[5];
+ DataAddress[6] = DataValue[6];
+ DataAddress[7] = DataValue[7];
+ break;
+
+ //
+ // Store double floating.
+ //
+
+ case SDC1_OP:
+ Rt = (Rt & 0x1e) + 32;
+ DataReference.Long = KiGetRegisterValue(Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ DataReference.Long = KiGetRegisterValue(Rt + 1,
+ ExceptionFrame,
+ TrapFrame);
+
+ DataAddress[4] = DataValue[0];
+ DataAddress[5] = DataValue[1];
+ DataAddress[6] = DataValue[2];
+ DataAddress[7] = DataValue[3];
+ break;
+
+ //
+ // All other instructions are not emulated.
+ //
+
+ default:
+ return FALSE;
+ }
+
+ TrapFrame->Fir = BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ }
+
+ //
+ // Return a value of FALSE.
+ //
+
+ return FALSE;
+}
diff --git a/private/ntos/ke/mips/alignx.s b/private/ntos/ke/mips/alignx.s
new file mode 100644
index 000000000..8821d764f
--- /dev/null
+++ b/private/ntos/ke/mips/alignx.s
@@ -0,0 +1,312 @@
+// TITLE("Unaligned Branch Tests")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// alignx.s
+//
+// Abstract:
+//
+// This module implements the unaligned branch tests.
+//
+// Author:
+//
+// David N. Cutler (davec) 27-Feb-1991
+//
+// Environment:
+//
+// User mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Unaligned BEQ/BNE/BC1F/BC1T Branch Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement beq/bne/bc1f/bc1t tests with an unaligned
+// load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies first operand for branch test.
+// a1 - Supplies second operate for branch test.
+// a2 - Supplies a pointer to an unaligned word.
+// a3 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the brancd was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Beq)
+
+ .set noreorder
+ li v0,1 // set branched true
+ beq a0,a1,10f // if eq, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Beq
+
+ LEAF_ENTRY(Bne)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bne a0,a1,10f // if eq, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bne
+
+ LEAF_ENTRY(Bc1f)
+
+ .set noreorder
+ mtc1 a0,f0 // set comparand 1
+ mtc1 a1,f2 // set comparand 2
+ li v0,1 // set branched true
+ c.eq.s f0,f2 // compare for equality
+ bc1f 10f // if f, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bc1f
+
+ LEAF_ENTRY(Bc1t)
+
+ .set noreorder
+ mtc1 a0,f0 // set comparand 1
+ mtc1 a1,f2 // set comparand 2
+ li v0,1 // set branched true
+ c.eq.s f0,f2 // compare for equality
+ bc1t 10f // if t, branch
+ lw v1,0(a2) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a3) // store unaligned value
+ .set reorder
+
+ .end Bc1t
+
+ SBTTL("Unaligned BLEZ/BLTZ/BGEZ/BGTZ/BGEZAL/BLTZAL Branch Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement blez/bltz/bgez/bgtz/bgezal/bltzal
+// tests with an unaligned load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies the operand for branch test.
+// a1 - Supplies a pointer to an unaligned word.
+// a2 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the branch was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Blez)
+
+ .set noreorder
+ li v0,1 // set branched true
+ blez a0,10f // if lez, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Blez
+
+ LEAF_ENTRY(Bltz)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bltz a0,10f // if ltz, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bltz
+
+ LEAF_ENTRY(Bgez)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bgez a0,10f // if gez, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bgez
+
+ LEAF_ENTRY(Bgtz)
+
+ .set noreorder
+ li v0,1 // set branched true
+ bgtz a0,10f // if gtz, branch
+ lw v1,0(a1) // load unaligned data
+ move v0,zero // set branched false
+10: j ra // return
+ sw v1,0(a2) // store unaligned value
+ .set reorder
+
+ .end Bgtz
+
+ LEAF_ENTRY(Bgezal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ bgezal a0,10f // if gez, branch and link
+ lw v1,0(a1) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a2) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Bgezal
+
+ LEAF_ENTRY(Bltzal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ bltzal a0,10f // if ltz, branch and link
+ lw v1,0(a1) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a2) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Bltzal
+
+ SBTTL("Unaligned JAL/J Tests")
+//++
+//
+// Routine Description:
+//
+// The following routines implement jal/j tests with an unaligned
+// load word instruction in the delay slot.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to an unaligned word.
+// a1 - Supplies a pointer to an aligned word that receives the result
+// of the unaligned load.
+//
+// Return Value:
+//
+// A value of true is returned in the brancd was taken. Otherwise,
+// FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(Jal)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ jal 10f // jump and link
+ lw v1,0(a0) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jal
+
+ LEAF_ENTRY(Jalr)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ la t0,10f // get destination address
+ jal t0 // jump
+ lw v1,0(a0) // load unaligned data
+ lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j ra // jump back
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jalr
+
+ LEAF_ENTRY(J)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ j 10f // jump
+ lw v1,0(a0) // load unaligned data
+20: lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j 20b // jump back
+ li v0,1 // set branched true
+ .set reorder
+
+ .end J
+
+ LEAF_ENTRY(Jr)
+
+ .set noreorder
+ sw ra,4 * 4(sp) // save return address
+ move v0,zero // set branched false
+ la t0,10f // get destination address
+ j t0 // jump
+ lw v1,0(a0) // load unaligned data
+20: lw ra,4 * 4(sp) // restore return address
+ sw v1,0(a1) // store unaligned value
+ j ra // return
+ nop //
+
+10: j 20b // return
+ li v0,1 // set branched true
+ .set reorder
+
+ .end Jr
diff --git a/private/ntos/ke/mips/allproc.c b/private/ntos/ke/mips/allproc.c
new file mode 100644
index 000000000..19a319690
--- /dev/null
+++ b/private/ntos/ke/mips/allproc.c
@@ -0,0 +1,392 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ allproc.c
+
+Abstract:
+
+ This module allocates and intializes kernel resources required
+ to start a new processor, and passes a complete processor state
+ structure to the HAL to obtain a new processor.
+
+Author:
+
+ David N. Cutler 29-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+
+#pragma alloc_text(INIT, KeStartAllProcessors)
+
+#endif
+
+//
+// Define macro to round up to 64-byte boundary and define block sizes.
+//
+
+#define ROUND_UP(x) ((sizeof(x) + 63) & (~63))
+#define BLOCK1_SIZE ((3 * KERNEL_STACK_SIZE) + PAGE_SIZE)
+#define BLOCK2_SIZE (ROUND_UP(KPRCB) + ROUND_UP(ETHREAD) + 64)
+
+//
+// Define barrier wait static data.
+//
+
+#if !defined(NT_UP)
+
+ULONG KiBarrierWait = 0;
+
+#endif
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ );
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiInitializeSystem (
+ IN PLOADER_PARAMETER_BLOCK Loaderblock
+ );
+
+VOID
+KeStartAllProcessors(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called during phase 1 initialize on the master boot
+ processor to start all of the other registered processors.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG MemoryBlock1;
+ ULONG MemoryBlock2;
+ ULONG Number;
+ ULONG PcrAddress;
+ ULONG PcrPage;
+ PKPRCB Prcb;
+ KPROCESSOR_STATE ProcessorState;
+ PRESTART_BLOCK RestartBlock;
+ BOOLEAN Started;
+
+#if !defined(NT_UP)
+
+ //
+ // If the registered number of processors is greater than the maximum
+ // number of processors supported, then only allow the maximum number
+ // of supported processors.
+ //
+
+ if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
+ KeRegisteredProcessors = MAXIMUM_PROCESSORS;
+ }
+
+ //
+ // Set barrier that will prevent any other processor from entering the
+ // idle loop until all processors have been started.
+ //
+
+ KiBarrierWait = 1;
+
+ //
+ // Initialize the processor state that will be used to start each of
+ // processors. Each processor starts in the system initialization code
+ // with address of the loader parameter block as an argument.
+ //
+
+ Number = 1;
+ RtlZeroMemory(&ProcessorState, sizeof(KPROCESSOR_STATE));
+ ProcessorState.ContextFrame.IntA0 = (ULONG)KeLoaderBlock;
+ ProcessorState.ContextFrame.Fir = (ULONG)KiInitializeSystem;
+ while (Number < KeRegisteredProcessors) {
+
+ //
+ // Allocate a DPC stack, an idle thread kernel stack, a panic
+ // stack, a PCR page, a processor block, and an executive thread
+ // object. If the allocation fails or the allocation cannot be
+ // made from unmapped nonpaged pool, then stop starting processors.
+ //
+
+ MemoryBlock1 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK1_SIZE);
+ if (((PVOID)MemoryBlock1 == NULL) ||
+ ((MemoryBlock1 & 0xc0000000) != KSEG0_BASE)) {
+ if ((PVOID)MemoryBlock1 != NULL) {
+ ExFreePool((PVOID)MemoryBlock1);
+ }
+
+ break;
+ }
+
+ MemoryBlock2 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK2_SIZE);
+ if (((PVOID)MemoryBlock2 == NULL) ||
+ ((MemoryBlock2 & 0xc0000000) != KSEG0_BASE)) {
+ ExFreePool((PVOID)MemoryBlock1);
+ if ((PVOID)MemoryBlock2 != NULL) {
+ ExFreePool((PVOID)MemoryBlock2);
+ }
+
+ break;
+ }
+
+ //
+ // Zero both blocks of allocated memory.
+ //
+
+ RtlZeroMemory((PVOID)MemoryBlock1, BLOCK1_SIZE);
+ RtlZeroMemory((PVOID)MemoryBlock2, BLOCK2_SIZE);
+
+ //
+ // Set address of interrupt stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Mips.InterruptStack = MemoryBlock1 + (1 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of idle thread kernel stack in loader parameter block.
+ //
+
+ KeLoaderBlock->KernelStack = MemoryBlock1 + (2 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of panic stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Mips.PanicStack = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+
+ //
+ // Change the color of the PCR page to match the new mapping and
+ // set the page frame of the PCR page in the loader parameter block.
+ //
+
+ PcrAddress = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+ PcrPage = (PcrAddress ^ KSEG0_BASE) >> PAGE_SHIFT;
+ HalChangeColorPage((PVOID)KIPCR, (PVOID)PcrAddress, PcrPage);
+ KeLoaderBlock->u.Mips.PcrPage = PcrPage;
+
+ //
+ // Set the address of the processor block and executive thread in the
+ // loader parameter block.
+ //
+
+ KeLoaderBlock->Prcb = (MemoryBlock2 + 63) & ~63;
+ KeLoaderBlock->Thread = KeLoaderBlock->Prcb + ROUND_UP(KPRCB);
+
+ //
+ // Attempt to start the next processor. If attempt is successful,
+ // then wait for the processor to get initialized. Otherwise,
+ // deallocate the processor resources and terminate the loop.
+ //
+
+ Started = HalStartNextProcessor(KeLoaderBlock, &ProcessorState);
+ if (Started == FALSE) {
+ HalChangeColorPage((PVOID)PcrAddress, (PVOID)KIPCR, PcrPage);
+ ExFreePool((PVOID)MemoryBlock1);
+ ExFreePool((PVOID)MemoryBlock2);
+ break;
+
+ } else {
+
+ //
+ // Wait until boot is finished on the target processor before
+ // starting the next processor. Booting is considered to be
+ // finished when a processor completes its initialization and
+ // drops into the idle loop.
+ //
+
+ Prcb = (PKPRCB)(KeLoaderBlock->Prcb);
+ RestartBlock = Prcb->RestartBlock;
+ while (RestartBlock->BootStatus.BootFinished == 0) {
+ }
+ }
+
+ Number += 1;
+ }
+
+ //
+ // Allow all processor that were started to enter the idle loop and
+ // begin execution.
+ //
+
+ KiBarrierWait = 0;
+
+#endif
+
+ //
+ // Reset and synchronize the performance counters of all processors.
+ //
+
+ KiCalibratePerformanceCounter();
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function resets and synchronizes the performance counter on all
+ processors in the configuration.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Count = 1;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQl to synchronization level to avoid a possible context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Initialize the reset performance counter packet, compute the target
+ // set of processors, and send the packet to the target processors, if
+ // any, for execution.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ Count = (LONG)KeNumberProcessors;
+ KiIpiSendPacket(TargetProcessors,
+ KiCalibratePerformanceCounterTarget,
+ (PVOID)&Count,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Reset the performance counter on current processor.
+ //
+
+ HalCalibratePerformanceCounter((volatile PLONG)&Count);
+
+ //
+ // Wait until all target processors have reset and synchronized their
+ // performance counters.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for reseting the performance counter.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Count - Supplies a pointer to the number of processors in the host
+ configuration.
+
+ Parameter2 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Reset and synchronize the perfromance counter on the current processor
+ // and clear the reset performance counter address to signal the source to
+ // continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalCalibratePerformanceCounter((volatile PLONG)Count);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/apcuser.c b/private/ntos/ke/mips/apcuser.c
new file mode 100644
index 000000000..200fa9a04
--- /dev/null
+++ b/private/ntos/ke/mips/apcuser.c
@@ -0,0 +1,140 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ apcuser.c
+
+Abstract:
+
+ This module implements the machine dependent code necessary to initialize
+ a user mode APC.
+
+Author:
+
+ David N. Cutler (davec) 23-Apr-1990
+
+Environment:
+
+ Kernel mode only, IRQL APC_LEVEL.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the context for a user mode APC.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ NormalRoutine - Supplies a pointer to the user mode APC routine.
+
+ NormalContext - Supplies a pointer to the user context for the APC
+ routine.
+
+ SystemArgument1 - Supplies the first system supplied value.
+
+ SystemArgument2 - Supplies the second system supplied value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord;
+ EXCEPTION_RECORD ExceptionRecord;
+ LONG Length;
+ ULONG UserStack;
+
+ //
+ // Move the user mode state from the trap and exception frames to the
+ // context frame.
+ //
+
+ ContextRecord.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextRecord);
+
+ //
+ // Transfer the context information to the user stack, initialize the
+ // APC routine parameters, and modify the trap frame so execution will
+ // continue in user mode at the user mode APC dispatch routine.
+ //
+
+ try {
+
+ //
+ // Compute length of context record and new aligned user stack pointer.
+ //
+
+ Length = sizeof(CONTEXT);
+ UserStack = (ULONG)(ContextRecord.XIntSp & (~7)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack, Length, sizeof(QUAD));
+ RtlMoveMemory((PULONG)UserStack, &ContextRecord, sizeof(CONTEXT));
+
+ //
+ // Set the address of the user APC routine, the APC parameters, the
+ // new frame pointer, and the new stack pointer in the current trap
+ // frame. Set the continuation address so control will be transfered
+ // to the user APC dispatcher.
+ //
+
+ TrapFrame->XIntSp = (LONG)UserStack;
+ TrapFrame->XIntS8 = (LONG)UserStack;
+ TrapFrame->XIntA0 = (LONG)NormalContext;
+ TrapFrame->XIntA1 = (LONG)SystemArgument1;
+ TrapFrame->XIntA2 = (LONG)SystemArgument2;
+ TrapFrame->XIntA3 = (LONG)NormalRoutine;
+ TrapFrame->Fir = KeUserApcDispatcher;
+
+ //
+ // If an exception occurs, then copy the exception information to an
+ // exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(TrapFrame->Fir);
+ KiDispatchException(&ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ UserMode,
+ TRUE);
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/mips/branchem.c b/private/ntos/ke/mips/branchem.c
new file mode 100644
index 000000000..c9479deba
--- /dev/null
+++ b/private/ntos/ke/mips/branchem.c
@@ -0,0 +1,311 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ branchem.c
+
+Abstract:
+
+ This module implement the code necessary to emulate branches when an
+ alignment or floating exception occurs in the delay slot of a branch
+ instruction.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+ULONG
+KiEmulateBranch (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate the branch instruction specified by
+ the fault instruction address in the specified trap frame. The resultant
+ branch destination address is computed and returned as the function value.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The resultant target branch destination is returned as the function value.
+
+--*/
+
+{
+
+ MIPS_INSTRUCTION BranchInstruction;
+ ULONG BranchTaken;
+ ULONG BranchNotTaken;
+ ULONG RsValue;
+ ULONG RtValue;
+
+ //
+ // Get the branch instruction at the fault address.
+ //
+
+ BranchInstruction.Long = *((PULONG)TrapFrame->Fir);
+
+ //
+ // Assume the branch instruction is a conditional branch and get the
+ // Rs and Rt register values. Also compute the branch taken as well
+ // as the branch not taken target addresses.
+ //
+
+ RsValue = KiGetRegisterValue(BranchInstruction.r_format.Rs,
+ ExceptionFrame,
+ TrapFrame);
+
+ RtValue = KiGetRegisterValue(BranchInstruction.r_format.Rt,
+ ExceptionFrame,
+ TrapFrame);
+
+ BranchTaken = (TrapFrame->Fir + 4) +
+ (LONG)(BranchInstruction.i_format.Simmediate << 2);
+ BranchNotTaken = TrapFrame->Fir + 8;
+
+ //
+ // Dispatch on the opcode value.
+ //
+ // N.B. All branch likely instructions are guaranteed to branch since an
+ // exception would not have been generated in the delay slot if the
+ // the branch was not going to actually branch.
+ //
+
+ switch (BranchInstruction.r_format.Opcode) {
+
+ //
+ // Special opcode - dispatch on the function subopcode.
+ //
+
+ case SPEC_OP:
+ switch (BranchInstruction.r_format.Function) {
+
+ //
+ // Jalr - jump and link register.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case JALR_OP:
+
+ //
+ // Jr - jump register.
+ //
+
+ case JR_OP:
+ return RsValue;
+
+ //
+ // All other instruction are illegal and should never happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+
+ //
+ // Jal - jump and link.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case JAL_OP:
+
+ //
+ // J - jump.
+ //
+
+ case J_OP:
+ return ((TrapFrame->Fir + 4) & 0xf0000000) |
+ (BranchInstruction.j_format.Target << 2);
+
+ //
+ // Beq - branch equal.
+ // Beql - branch equal likely.
+ //
+
+ case BEQ_OP:
+ case BEQL_OP:
+ if ((LONG)RsValue == (LONG)RtValue) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bne - branch not equal.
+ // Bnel - branch not equal likely.
+ //
+
+ case BNE_OP:
+ case BNEL_OP:
+ if ((LONG)RsValue != (LONG)RtValue) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Blez - branch less than or equal zero.
+ // Blezl - branch less than or equal zero likely.
+ //
+
+ case BLEZ_OP:
+ case BLEZL_OP:
+ if ((LONG)RsValue <= 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bgtz - branch greater than zero.
+ // Bgtzl - branch greater than zero likely.
+ //
+
+ case BGTZ_OP:
+ case BGTZL_OP:
+ if ((LONG)RsValue > 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Branch conditional opcode - dispatch on the rt field.
+ //
+
+ case BCOND_OP:
+ switch (BranchInstruction.r_format.Rt) {
+
+ //
+ // Bltzal - branch on less than zero and link.
+ // Bltzall - branch on less than zero and link likely.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case BLTZAL_OP:
+ case BLTZALL_OP:
+
+ //
+ // Bltz - branch less than zero.
+ // Bltzl - branch less than zero likely.
+ //
+
+ case BLTZ_OP:
+ case BLTZL_OP:
+ if ((LONG)RsValue < 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // Bgezal - branch on greater than or euqal zero and link.
+ // Bgezall - branch on greater than or equal zero and link likely.
+ //
+ // N.B. Ra has already been loaded by the hardware before the
+ // exception condition occurred.
+ //
+
+ case BGEZAL_OP:
+ case BGEZALL_OP:
+
+ //
+ // Bgez - branch greater than zero.
+ // Bgezl - branch greater than zero likely.
+ //
+
+ case BGEZ_OP:
+ case BGEZL_OP:
+ if ((LONG)RsValue >= 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ //
+ // All other instructions are illegal and should not happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+
+ //
+ // Cop1 - coprocessor 1 branch operation.
+ //
+ // Bczf - Branch coprocessor z false.
+ // Bczfl - Branch coprocessor z false likely.
+ // Bczt - Branch coprocessor z true.
+ // Bcztl - Branch coprocessor z true likely.
+ //
+
+ case COP1_OP:
+ if ((BranchInstruction.Long & COPz_BC_MASK) == COPz_BF) {
+
+ //
+ // Branch on coprocessor 1 condition code false.
+ //
+
+ if (((PFSR)(&TrapFrame->Fsr))->CC == 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ } else if ((BranchInstruction.Long & COPz_BC_MASK) == COPz_BT) {
+
+ //
+ // Branch of coprocessor 1 condition code true.
+ //
+
+ if (((PFSR)(&TrapFrame->Fsr))->CC != 0) {
+ return BranchTaken;
+
+ } else {
+ return BranchNotTaken;
+ }
+
+ }
+
+ //
+ // All other instructions are illegal and should not happen.
+ //
+
+ default:
+ return TrapFrame->Fir;
+ }
+}
diff --git a/private/ntos/ke/mips/buserror.c b/private/ntos/ke/mips/buserror.c
new file mode 100644
index 000000000..4c4b7ef72
--- /dev/null
+++ b/private/ntos/ke/mips/buserror.c
@@ -0,0 +1,309 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ buserror.c
+
+Abstract:
+
+ This module implements the code necessary to process data and instruction
+ bus errors and to set the address of the cache error routine.
+
+Author:
+
+ David N. Cutler (davec) 31-Oct-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+BOOLEAN
+KeBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PVOID VirtualAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function provides the default bus error handling routine for NT.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ VirtualAddress - Supplies the virtual address of the bus error.
+
+ PhysicalAddress - Supplies the physical address of the bus error.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Bug check specifying the exception code, the virtual address, the
+ // low part of the physical address, the current processor state, and
+ // the exception PC.
+ //
+
+ KeBugCheckEx(ExceptionRecord->ExceptionCode & 0xffff,
+ (ULONG)VirtualAddress,
+ PhysicalAddress.LowPart,
+ TrapFrame->Psr,
+ TrapFrame->Fir);
+
+ return FALSE;
+}
+
+PHYSICAL_ADDRESS
+KiGetPhysicalAddress (
+ IN PVOID VirtualAddress
+ )
+
+/*++
+
+Routine Description:
+
+ This function computes the physical address for a given virtual address.
+
+Arguments:
+
+ VirtualAddress - Supplies the virtual address whose physical address is
+ to be computed.
+
+Return Value:
+
+ The physical address that correcponds to the specified virtual address.
+
+--*/
+
+{
+ PHYSICAL_ADDRESS PhysicalAddress;
+
+ //
+ // If the address is a KSEG0 or KSEG1 address, then mask off the high
+ // three address bits and return the result as the physical address.
+ // Otherwise, call memory management to convert the virtual address to
+ // a physical address.
+ //
+
+ if (((ULONG)VirtualAddress >= KSEG0_BASE) &&
+ ((ULONG)VirtualAddress < (KSEG1_BASE + 0x20000000))) {
+ PhysicalAddress.LowPart = (ULONG)VirtualAddress & 0x1fffffff;
+ PhysicalAddress.HighPart = 0;
+ return PhysicalAddress;
+
+ } else {
+ return MmGetPhysicalAddress(VirtualAddress);
+ }
+}
+
+VOID
+KiDataBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process a data bus error. The virtual and
+ physical address of the error are computed and the data bus error
+ processing routine is called indirectly through the PCR. NT provides
+ a standard routine to process the error and shutdown the system. A
+ vendor, however, can replace the standard NT routine and do additional
+ processing if necessary via the HAL.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PVOID VirtualAddress;
+ PHYSICAL_ADDRESS PhysicalAddress;
+ MIPS_INSTRUCTION FaultInstruction;
+
+ //
+ // Any exception that occurs during the attempted calculation of the
+ // virtual address causes the virtual address calculation to be
+ // aborted and the virtual address of the instruction itself is used
+ // instead.
+ //
+
+ try {
+
+ //
+ // Compute the effective address of the reference.
+ //
+
+ FaultInstruction.Long = *((PULONG)ExceptionRecord->ExceptionAddress);
+ VirtualAddress = (PVOID)(KiGetRegisterValue(FaultInstruction.i_format.Rs,
+ ExceptionFrame,
+ TrapFrame) +
+ FaultInstruction.i_format.Simmediate);
+
+ //
+ // If an exception occurs, then abort the calculation of the virtual
+ // address and set the virtual address equal to the instruction address.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ VirtualAddress = ExceptionRecord->ExceptionAddress;
+ }
+
+ //
+ // Compute the physical address that corresponds to the data address.
+ //
+
+ PhysicalAddress = KiGetPhysicalAddress(VirtualAddress);
+
+ //
+ // If a value of FALSE is returned by the data bus error handling routine,
+ // then bug check. Otherwise, assume that the error has been handled and
+ // return.
+ //
+
+ if ((PCR->DataBusError)(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ VirtualAddress,
+ PhysicalAddress) == FALSE) {
+
+ KeBugCheck(DATA_BUS_ERROR);
+ }
+
+ return;
+}
+
+VOID
+KiInstructionBusError (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to process an instruction bus error. The virtual
+ and physical address of the error are computed and the instruction bus
+ error processing routine is called indirectly through the PCR. NT provides
+ a standard routine to process the error and shutdown the system. A vendor,
+ however, can replace the standard NT routine and do additional processing
+ if necessary via the HAL.
+
+ N.B. There is no return from this routine.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PVOID VirtualAddress;
+ PHYSICAL_ADDRESS PhysicalAddress;
+
+ //
+ // Compute the physical address that corresponds to the data address.
+ //
+
+ VirtualAddress = ExceptionRecord->ExceptionAddress;
+ PhysicalAddress = KiGetPhysicalAddress(VirtualAddress);
+
+ //
+ // If a value of FALSE is returned by the instructiona bus error handling
+ // routine, then bug check. Otherwise, assume that the error has been
+ // handled and return.
+ //
+
+ if ((PCR->InstructionBusError)(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ VirtualAddress,
+ PhysicalAddress) == FALSE) {
+
+ KeBugCheck(INSTRUCTION_BUS_ERROR);
+ }
+
+ return;
+}
+
+VOID
+KeSetCacheErrorRoutine (
+ IN PKCACHE_ERROR_ROUTINE Routine
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the address of the cache error routine.
+ The cache error routine is called whenever a cache error occurs.
+
+Arguments:
+
+ Routine - Supplies a pointer to the cache error routine.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set the address of the cache error routine.
+ //
+
+ *((PULONG)CACHE_ERROR_VECTOR) = (ULONG)Routine | KSEG1_BASE;
+ return;
+}
diff --git a/private/ntos/ke/mips/callback.c b/private/ntos/ke/mips/callback.c
new file mode 100644
index 000000000..75b29c55e
--- /dev/null
+++ b/private/ntos/ke/mips/callback.c
@@ -0,0 +1,237 @@
+/*++
+
+Copyright (c) 1994 Microsoft Corporation
+
+Module Name:
+
+ callback.c
+
+Abstract:
+
+ This module implements user mode call back services.
+
+Author:
+
+ David N. Cutler (davec) 29-Oct-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KeUserModeCallback (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function call out from kernel mode to a user mode function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied
+ to the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that receives
+ the address of the output buffer.
+
+ Outputlength - Supplies a pointer to a variable that receives
+ the length of the output buffer.
+
+Return Value:
+
+ If the callout cannot be executed, then an error status is
+ returned. Otherwise, the status returned by the callback function
+ is returned.
+
+--*/
+
+{
+
+ PUCALLOUT_FRAME CalloutFrame;
+ ULONG Length;
+ ULONG OldStack;
+ NTSTATUS Status;
+ PKTRAP_FRAME TrapFrame;
+ PULONG UserStack;
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // Get the user mode stack pointer and attempt to copy input buffer
+ // to the user stack.
+ //
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ OldStack = (ULONG)TrapFrame->XIntSp;
+ try {
+
+ //
+ // Compute new user mode stack address, probe for writability,
+ // and copy the input buffer to the user stack.
+ //
+
+ Length = (InputLength +
+ sizeof(QUAD) - 1 + sizeof(UCALLOUT_FRAME)) & ~(sizeof(QUAD) - 1);
+
+ CalloutFrame = (PUCALLOUT_FRAME)(OldStack - Length);
+ ProbeForWrite(CalloutFrame, Length, sizeof(QUAD));
+ RtlMoveMemory(CalloutFrame + 1, InputBuffer, InputLength);
+
+ //
+ // Allocate stack frame fill in callout arguments.
+ //
+
+ CalloutFrame->Buffer = (PVOID)(CalloutFrame + 1);
+ CalloutFrame->Length = InputLength;
+ CalloutFrame->ApiNumber = ApiNumber;
+ CalloutFrame->Pad = 0;
+ CalloutFrame->Sp = TrapFrame->XIntSp;
+ CalloutFrame->Ra = TrapFrame->XIntRa;
+
+ //
+ // If an exception occurs during the probe of the user stack, then
+ // always handle the exception and return the exception code as the
+ // status value.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call user mode.
+ //
+
+ TrapFrame->XIntSp = (LONG)CalloutFrame;
+ Status = KiCallUserMode(OutputBuffer, OutputLength);
+ TrapFrame->XIntSp = (LONG)OldStack;
+
+ //
+ // If the GDI TEB batch contains any entries, it must be flushed.
+ //
+
+ if (((PTEB)KeGetCurrentThread()->Teb)->GdiBatchCount > 0) {
+ KeGdiFlushUserBatch();
+ }
+
+ return Status;
+}
+
+NTSTATUS
+NtW32Call (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ OUT PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls a W32 function.
+
+ N.B. ************** This is a temporary service *****************
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied to
+ the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that recevies the
+ output buffer address.
+
+ Outputlength - Supplies a pointer to a variable that recevies the
+ output buffer length.
+
+Return Value:
+
+ TBS.
+
+--*/
+
+{
+
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ NTSTATUS Status;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // If the current thread is not a GUI thread, then fail the service
+ // since the thread does not have a large stack.
+ //
+
+ if (KeGetCurrentThread()->Win32Thread == (PVOID)&KeServiceDescriptorTable[0]) {
+ return STATUS_NOT_IMPLEMENTED;
+ }
+
+ //
+ // Probe the output buffer address and length for writeability.
+ //
+
+ try {
+ ProbeForWriteUlong((PULONG)OutputBuffer);
+ ProbeForWriteUlong(OutputLength);
+
+ //
+ // If an exception occurs during the probe of the output buffer or
+ // length, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call out to user mode specifying the input buffer and API number.
+ //
+
+ Status = KeUserModeCallback(ApiNumber,
+ InputBuffer,
+ InputLength,
+ &ValueBuffer,
+ &ValueLength);
+
+ //
+ // If the callout is successful, then the output buffer address and
+ // length.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *OutputBuffer = ValueBuffer;
+ *OutputLength = ValueLength;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ return Status;
+}
diff --git a/private/ntos/ke/mips/callout.s b/private/ntos/ke/mips/callout.s
new file mode 100644
index 000000000..d40dd5b48
--- /dev/null
+++ b/private/ntos/ke/mips/callout.s
@@ -0,0 +1,411 @@
+// TITLE("Call Out to User Mode")
+//++
+//
+// Copyright (c) 1994 Microsoft Corporation
+//
+// Module Name:
+//
+// callout.s
+//
+// Abstract:
+//
+// This module implements the code necessary to call out from kernel
+// mode to user mode.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Oct-1994
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeUserCallbackDispatcher 4
+
+ SBTTL("Call User Mode Function")
+//++
+//
+// NTSTATUS
+// KiCallUserMode (
+// IN PVOID *OutputBuffer,
+// IN PULONG OutputLength
+// )
+//
+// Routine Description:
+//
+// This function calls a user mode function.
+//
+// N.B. This function calls out to user mode and the NtCallbackReturn
+// function returns back to the caller of this function. Therefore,
+// the stack layout must be consistent between the two routines.
+//
+// Arguments:
+//
+// OutputBuffer (a0) - Supplies a pointer to the variable that receivies
+// the address of the output buffer.
+//
+// OutputLength (a1) - Supplies a pointer to a variable that receives
+// the length of the output buffer.
+//
+// Return Value:
+//
+// The final status of the call out function is returned as the status
+// of the function.
+//
+// N.B. This function does not return to its caller. A return to the
+// caller is executed when a NtCallbackReturn system service is
+// executed.
+//
+// N.B. This function does return to its caller if a kernel stack
+// expansion is required and the attempted expansion fails.
+//
+//--
+
+ NESTED_ENTRY(KiCallUserMode, CuFrameLength, zero)
+
+ subu sp,sp,CuFrameLength // allocate stack frame
+ sw ra,CuRa(sp) // save return address
+
+//
+// Save nonvolatile integer registers.
+//
+
+ sw s0,CuS0(sp) // save integer registers s0-s8
+ sw s1,CuS1(sp) //
+ sw s2,CuS2(sp) //
+ sw s3,CuS3(sp) //
+ sw s4,CuS4(sp) //
+ sw s5,CuS5(sp) //
+ sw s6,CuS6(sp) //
+ sw s7,CuS7(sp) //
+ sw s8,CuS8(sp) //
+
+//
+// Save nonvolatile floating registers.
+//
+
+ sdc1 f20,CuF20(sp) // save floating registers f20-f31
+ sdc1 f22,CuF22(sp) //
+ sdc1 f24,CuF24(sp) //
+ sdc1 f26,CuF26(sp) //
+ sdc1 f28,CuF28(sp) //
+ sdc1 f30,CuF30(sp) //
+
+ PROLOGUE_END
+
+//
+// Save argument registers.
+//
+
+ sw a0,CuA0(sp) // save output buffer address
+ sw a1,CuA1(sp) // save output length address
+
+//
+// Check if sufficient room is available on the kernel stack for another
+// system call.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,KiPcr + PcInitialStack(zero) // get initial stack address
+ lw t2,ThStackLimit(t0) // get current stack limit
+ subu t3,sp,KERNEL_LARGE_STACK_COMMIT // compute bottom address
+ sltu t4,t3,t2 // check if limit exceeded
+ beq zero,t4,10f // if eq, limit not exceeded
+ move a0,sp // set current kernel stack address
+ jal MmGrowKernelStack // attempt to grow the kernel stack
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,KiPcr + PcInitialStack(zero) // get initial stack address
+ lw t2,ThStackLimit(t0) // get expanded stack limit
+ bne zero,v0,20f // if ne, attempt to grow failed
+ sw t2,KiPcr + PcStackLimit(zero) // set expanded stack limit
+
+//
+// Get the address of the current thread and save the previous trap frame
+// and callback stack addresses in the current frame. Also save the new
+// callback stack address in the thread object.
+//
+
+10: lw s8,ThTrapFrame(t0) // get trap frame address
+ lw t2,ThCallbackStack(t0) // get callback stack address
+ sw t1,CuInStk(sp) // save initial stack address
+ sw s8,CuTrFr(sp) // save trap frame address
+ sw t2,CuCbStk(sp) // save callback stack address
+ sw sp,ThCallbackStack(t0) // set callback stack address
+
+//
+// Restore state and callback to user mode.
+//
+
+ lw t2,TrFsr(s8) // get previous floating status
+ li t3,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ cfc1 t4,fsr // get current floating status
+ mtc0 t3,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t2,fsr // restore previous floating status
+ lw t3,TrPsr(s8) // get previous processor status
+ sw sp,ThInitialStack(t0) // reset initial stack address
+ sw sp,KiPcr + PcInitialStack(zero) //
+ sw t4,CuFsr(sp) // save current floating status
+ lw t4,KeUserCallbackDispatcher // set continuation address
+
+//
+// If a user mode APC is pending, then request an APC interrupt.
+//
+
+ lbu t1,ThApcState + AsUserApcPending(t0) // get user APC pending
+ sb zero,ThAlerted(t0) // clear kernel mode alerted
+ mfc0 t2,cause // get exception cause register
+ sll t1,t1,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t2,t2,t1 // merge possilbe APC interrupt request
+ mtc0 t2,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// miss is not possible, then restore the volatile register state.
+//
+
+ sw t3,KiPcr + PcSavedT7(zero) // save processor status
+ j KiServiceExit // join common code
+ sw t4,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// An attempt to grow the kernel stack failed.
+//
+
+20: lw ra,CuRa(sp) // restore return address
+ addu sp,sp,CuFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiCalluserMode
+
+ SBTTL("Switch Kernel Stack")
+//++
+//
+// PVOID
+// KeSwitchKernelStack (
+// IN PVOID StackBase,
+// IN PVOID StackLimit
+// )
+//
+// Routine Description:
+//
+// This function switches to the specified large kernel stack.
+//
+// N.B. This function can ONLY be called when there are no variables
+// in the stack that refer to other variables in the stack, i.e.,
+// there are no pointers into the stack.
+//
+// Arguments:
+//
+// StackBase (a0) - Supplies a pointer to the base of the new kernel
+// stack.
+//
+// StackLimit (a1) - supplies a pointer to the limit of the new kernel
+// stack.
+//
+// Return Value:
+//
+// The old kernel stack is returned as the function value.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument register save area
+SsRa: .space 4 // saved return address
+SsSp: .space 4 // saved new stack pointer
+ .space 2 * 4 // fill
+SsFrameLength: // length of stack frame
+SsA0: .space 4 // saved argument registers a0-a1
+SsA1: .space 4 //
+
+ NESTED_ENTRY(KeSwitchKernelStack, SsFrameLength, zero)
+
+ subu sp,sp,SsFrameLength // allocate stack frame
+ sw ra,SsRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the address of the new stack and copy the old stack to the new
+// stack.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ sw a0,SsA0(sp) // save new kernel stack base address
+ sw a1,SsA1(sp) // save new kernel stack limit address
+ lw a2,ThStackBase(t0) // get current stack base address
+ lw a3,ThTrapFrame(t0) // get current trap frame address
+ addu a3,a3,a0 // relocate current trap frame address
+ subu a3,a3,a2 //
+ sw a3,ThTrapFrame(t0) //
+ move a1,sp // set source address of copy
+ subu a2,a2,sp // compute length of copy
+ subu a0,a0,a2 // set destination address of copy
+ sw a0,SsSp(sp) // save new stack pointer address
+ jal RtlMoveMemory // copy old stack to new stack
+
+//
+// Switch to new kernel stack and return the address of the old kernel
+// stack.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+
+ DISABLE_INTERRUPTS(t1) // disable interrupts
+
+ lw v0,ThStackBase(t0) // get old kernel stack base address
+ lw a0,SsA0(sp) // get new kernel stack base address
+ lw a1,SsA1(sp) // get new kernel stack limit address
+ sw a0,ThInitialStack(t0) // set new initial stack address
+ sw a0,ThStackBase(t0) // set new stack base address
+ sw a1,ThStackLimit(t0) // set new stack limit address
+ li v1,TRUE // set large kernel stack TRUE
+ sb v1,ThLargeStack(t0) //
+ sw a0,KiPcr + PcInitialStack(zero) // set initial stack adddress
+ sw a1,KiPcr + PcStackLimit(zero) // set stack limit
+ lw sp,SsSp(sp) // switch to new kernel stack
+
+ ENABLE_INTERRUPTS(t1) // enable interrupts
+
+ lw ra,SsRa(sp) // restore return address
+ addu sp,sp,SsFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeSwitchKernelStack
+
+ SBTTL("Return from User Mode Callback")
+//++
+//
+// NTSTATUS
+// NtCallbackReturn (
+// IN PVOID OutputBuffer OPTIONAL,
+// IN ULONG OutputLength,
+// IN NTSTATUS Status
+// )
+//
+// Routine Description:
+//
+// This function returns from a user mode callout to the kernel
+// mode caller of the user mode callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode and the KiCallUserMode function calls out to user mode.
+// Therefore, the stack layout must be consistent between the
+// two routines.
+//
+// Arguments:
+//
+// OutputBuffer - Supplies an optional pointer to an output buffer.
+//
+// OutputLength - Supplies the length of the output buffer.
+//
+// Status - Supplies the status value returned to the caller of the
+// callback function.
+//
+// Return Value:
+//
+// If the callback return cannot be executed, then an error status is
+// returned. Otherwise, the specified callback status is returned to
+// the caller of the callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode is a callout is currently active.
+//
+//--
+
+ LEAF_ENTRY(NtCallbackReturn)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,ThCallbackStack(t0) // get callback stack address
+ beq zero,t1,10f // if eq, no callback stack present
+
+//
+// Restore nonvolatile integer registers.
+//
+
+ lw s0,CuS0(t1) // restore integer registers s0-s8
+ lw s1,CuS1(t1) //
+ lw s2,CuS2(t1) //
+ lw s3,CuS3(t1) //
+ lw s4,CuS4(t1) //
+ lw s5,CuS5(t1) //
+ lw s6,CuS6(t1) //
+ lw s7,CuS7(t1) //
+ lw s8,CuS8(t1) //
+
+//
+// Save nonvolatile floating registers.
+//
+
+ ldc1 f20,CuF20(t1) // restore floating registers f20-f31
+ ldc1 f22,CuF22(t1) //
+ ldc1 f24,CuF24(t1) //
+ ldc1 f26,CuF26(t1) //
+ ldc1 f28,CuF28(t1) //
+ ldc1 f30,CuF30(t1) //
+
+//
+// Restore the trap frame and callback stacks addresses, store the output
+// buffer address and length, restore the floating status, and set the
+// service status.
+//
+
+ lw t2,CuTrFr(t1) // get previous trap frame address
+ lw t3,CuCbStk(t1) // get previous callback stack address
+ lw t4,CuA0(t1) // get address to store output address
+ lw t5,CuA1(t1) // get address to store output length
+ lw t6,CuFsr(t1) // get previous floating status
+ sw t2,ThTrapFrame(t0) // restore trap frame address
+ sw t3,ThCallbackStack(t0) // restore callback stack address
+ sw a0,0(t4) // store output buffer address
+ sw a1,0(t5) // store output buffer length
+
+ .set noreorder
+ .set noat
+ ctc1 t6,fsr // restore previous floating status
+ .set at
+ .set reorder
+
+ move v0,a2 // set callback service status
+
+//
+// Restore initial stack pointer, trim stackback to callback frame,
+// deallocate callback stack frame, and return to callback caller.
+//
+
+ lw t2,CuInStk(t1) // get previous initial stack
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ sw t2,ThInitialStack(t0) // restore initial stack address
+ sw t2,KiPcr + PcInitialStack(zero) //
+ move sp,t1 // trim stack back callback frame
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ lw ra,CuRa(sp) // restore return address
+ addu sp,sp,CuFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// No callback is currently active.
+//
+
+10: li v0,STATUS_NO_CALLBACK_ACTIVE // set service status
+ j ra // return
+
+ .end NtCallbackReturn
diff --git a/private/ntos/ke/mips/dmpstate.c b/private/ntos/ke/mips/dmpstate.c
new file mode 100644
index 000000000..2215df217
--- /dev/null
+++ b/private/ntos/ke/mips/dmpstate.c
@@ -0,0 +1,713 @@
+/*++
+
+Copyright (c) 1992 Microsoft Corporation
+
+Module Name:
+
+ dmpstate.c
+
+Abstract:
+
+ This module implements the architecture specific routine that dumps
+ the machine state when a bug check occurs and no debugger is hooked
+ to the system. It is assumed that it is called from bug check.
+
+Author:
+
+ David N. Cutler (davec) 17-Jan-1992
+
+Environment:
+
+ Kernel mode.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ );
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ );
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ );
+
+//
+// Define external data.
+//
+
+extern LIST_ENTRY PsLoadedModuleList;
+
+VOID
+KeDumpMachineState (
+ IN PKPROCESSOR_STATE ProcessorState,
+ IN PCHAR Buffer,
+ IN PULONG BugCheckParameters,
+ IN ULONG NumberOfParameters,
+ IN PKE_BUGCHECK_UNICODE_TO_ANSI UnicodeToAnsiRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function formats and displays the machine state at the time of the
+ to bug check.
+
+Arguments:
+
+ ProcessorState - Supplies a pointer to a processor state record.
+
+ Buffer - Supplies a pointer to a buffer to be used to output machine
+ state information.
+
+ BugCheckParameters - Supplies a pointer to an array of additional
+ bug check information.
+
+ NumberOfParameters - Suppiles the size of the bug check parameters
+ array.
+
+ UnicodeToAnsiRoutine - Supplies a pointer to a routine to convert Unicode strings
+ to Ansi strings without touching paged translation tables.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PCONTEXT ContextRecord;
+ ULONG ControlPc;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG DisplayColumn;
+ ULONG DisplayHeight;
+ ULONG DisplayRow;
+ ULONG DisplayWidth;
+ UNICODE_STRING DllName;
+ ULONG EstablisherFrame;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PVOID ImageBase;
+ ULONG Index;
+ BOOLEAN InFunction;
+ ULONG LastStack;
+ PLIST_ENTRY ModuleListHead;
+ PLIST_ENTRY NextEntry;
+ ULONG NextPc;
+ ULONG StackLimit;
+ UCHAR AnsiBuffer[ 32 ];
+ ULONG DateStamp;
+
+ //
+ // Query display parameters.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ //
+ // Display any addresses that fall within the range of any module in
+ // the loaded module list.
+ //
+
+ for (Index = 0; Index < NumberOfParameters; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)*BugCheckParameters,
+ &ImageBase,
+ &DataTableEntry);
+
+ if (ImageBase != NULL) {
+ sprintf(Buffer,
+ "*** %08lX has base at %08lX - %s\n",
+ *BugCheckParameters,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+ }
+
+ BugCheckParameters += 1;
+ }
+
+ //
+ // Virtually unwind to the caller of bug check.
+ //
+
+ ContextRecord = &ProcessorState->ContextFrame;
+ LastStack = (ULONG)ContextRecord->XIntSp;
+ ControlPc = (ULONG)(ContextRecord->XIntRa - 4);
+ NextPc = ControlPc;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if (FunctionEntry != NULL) {
+ NextPc = RtlVirtualUnwind(ControlPc | 1,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ }
+
+ //
+ // At this point the context record contains the machine state at the
+ // call to bug check.
+ //
+ // Put out the machine state at the time of the bugcheck.
+ //
+
+ sprintf(Buffer,
+ "\nMachine State at Call to Bug Check PC : %08lX PSR : %08lX\n\n",
+ (ULONG)ContextRecord->XIntRa,
+ ContextRecord->Psr);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Format and output the integer registers.
+ //
+
+ sprintf(Buffer,
+ "AT :%8lX V0 :%8lX V1 :%8lX A0 :%8lX\n",
+ (ULONG)ContextRecord->XIntAt,
+ (ULONG)ContextRecord->XIntV0,
+ (ULONG)ContextRecord->XIntV1,
+ (ULONG)ContextRecord->XIntA0);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "A1 :%8lX A2 :%8lX A3 :%8lX T0 :%8lX\n",
+ (ULONG)ContextRecord->XIntA1,
+ (ULONG)ContextRecord->XIntA2,
+ (ULONG)ContextRecord->XIntA3,
+ (ULONG)ContextRecord->XIntT0);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T1 :%8lX T2 :%8lX T3 :%8lX T4 :%8lX\n",
+ (ULONG)ContextRecord->XIntT1,
+ (ULONG)ContextRecord->XIntT2,
+ (ULONG)ContextRecord->XIntT3,
+ (ULONG)ContextRecord->XIntT4);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T5 :%8lX T6 :%8lX T7 :%8lX T8 :%8lX\n",
+ (ULONG)ContextRecord->XIntT5,
+ (ULONG)ContextRecord->XIntT6,
+ (ULONG)ContextRecord->XIntT7,
+ (ULONG)ContextRecord->XIntT8);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "T9 :%8lX S0 :%8lX S1 :%8lX S2 :%8lX\n",
+ (ULONG)ContextRecord->XIntT9,
+ (ULONG)ContextRecord->XIntS0,
+ (ULONG)ContextRecord->XIntS1,
+ (ULONG)ContextRecord->XIntS2);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S3 :%8lX S4 :%8lX S5 :%8lX S6 :%8lX\n",
+ (ULONG)ContextRecord->XIntS3,
+ (ULONG)ContextRecord->XIntS4,
+ (ULONG)ContextRecord->XIntS5,
+ (ULONG)ContextRecord->XIntS6);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "S7 :%8lX S8 :%8lX GP :%8lX SP :%8lX\n",
+ (ULONG)ContextRecord->XIntS7,
+ (ULONG)ContextRecord->XIntS8,
+ (ULONG)ContextRecord->XIntGp,
+ (ULONG)ContextRecord->XIntSp);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "RA :%8lX LO :%8lX HI :%8lX FSR:%8lX\n",
+ (ULONG)ContextRecord->XIntRa,
+ (ULONG)ContextRecord->XIntLo,
+ (ULONG)ContextRecord->XIntHi,
+ (ULONG)ContextRecord->Fsr);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Format and output the firswt four floating registers.
+ //
+
+ sprintf(Buffer,
+ "F0 :%8lX F1 :%8lX F2 :%8lX F3 :%8lX\n",
+ ContextRecord->FltF0,
+ ContextRecord->FltF1,
+ ContextRecord->FltF2,
+ ContextRecord->FltF3);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F4 :%8lX F5 :%8lX F6 :%8lX F7 :%8lX\n",
+ ContextRecord->FltF4,
+ ContextRecord->FltF5,
+ ContextRecord->FltF6,
+ ContextRecord->FltF7);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F8 :%8lX F9 :%8lX F10:%8lX F11:%8lX\n",
+ ContextRecord->FltF8,
+ ContextRecord->FltF9,
+ ContextRecord->FltF10,
+ ContextRecord->FltF11);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F12:%8lX F13:%8lX F14:%8lX F15:%8lX\n",
+ ContextRecord->FltF12,
+ ContextRecord->FltF13,
+ ContextRecord->FltF14,
+ ContextRecord->FltF15);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F16:%8lX F17:%8lX F18:%8lX F19:%8lX\n",
+ ContextRecord->FltF16,
+ ContextRecord->FltF17,
+ ContextRecord->FltF18,
+ ContextRecord->FltF19);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F20:%8lX F21:%8lX F22:%8lX F23:%8lX\n",
+ ContextRecord->FltF20,
+ ContextRecord->FltF21,
+ ContextRecord->FltF22,
+ ContextRecord->FltF23);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F24:%8lX F25:%8lX F26:%8lX F27:%8lX\n",
+ ContextRecord->FltF24,
+ ContextRecord->FltF25,
+ ContextRecord->FltF26,
+ ContextRecord->FltF27);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "F28:%8lX F29:%8lX F30:%8lX F31:%8lX\n\n",
+ ContextRecord->FltF28,
+ ContextRecord->FltF29,
+ ContextRecord->FltF30,
+ ContextRecord->FltF31);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Output short stack back trace with base address.
+ //
+
+ DllName.Length = 0;
+ DllName.Buffer = L"";
+ if (FunctionEntry != NULL) {
+ StackLimit = (ULONG)KeGetCurrentThread()->KernelStack;
+ HalDisplayString("Callee-Sp Return-Ra Dll Base - Name\n");
+ for (Index = 0; Index < 8; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ sprintf(Buffer,
+ " %08lX %08lX : %08lX - %s\n",
+ (ULONG)ContextRecord->XIntSp,
+ NextPc + 4,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( (ImageBase != NULL) ? &DataTableEntry->BaseDllName : &DllName,
+ AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+ if ((NextPc != ControlPc) || ((ULONG)ContextRecord->XIntSp != LastStack)) {
+ ControlPc = NextPc;
+ LastStack = (ULONG)ContextRecord->XIntSp;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if ((FunctionEntry != NULL) && (LastStack < StackLimit)) {
+ NextPc = RtlVirtualUnwind(ControlPc | 1,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL);
+ } else {
+ NextPc = (ULONG)ContextRecord->XIntRa;
+ }
+
+ } else {
+ break;
+ }
+ }
+ }
+
+ //
+ // Output the build number and other useful information.
+ //
+
+ sprintf(Buffer,
+ "\nIRQL : %d, DPC Active : %s, SYSVER 0x%08x\n",
+ KeGetCurrentIrql(),
+ KeIsExecutingDpc() ? "TRUE" : "FALSE",
+ NtBuildNumber);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Output the processor id and the primary cache sizes.
+ //
+
+ sprintf(Buffer,
+ "Processor Id %d.%d, Icache : %d, Dcache : %d\n",
+ (PCR->ProcessorId >> 8) & 0xff,
+ PCR->ProcessorId & 0xff,
+ PCR->FirstLevelIcacheSize,
+ PCR->FirstLevelDcacheSize);
+
+ HalDisplayString(Buffer);
+
+ //
+ // If the display width is greater than 80 + 24 (the size of a DLL
+ // name and base address), then display all the modules loaded in
+ // the system.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ if (DisplayWidth > (80 + 24)) {
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ //
+ // Output display headers.
+ //
+
+ Index = 1;
+ KiDisplayString(80, Index, "Dll Base DateStmp - Name");
+ NextEntry = ModuleListHead->Flink;
+ if (NextEntry != NULL) {
+
+ //
+ // Scan the list of loaded modules and display their base
+ // address and name.
+ //
+
+ while (NextEntry != ModuleListHead) {
+ Index += 1;
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ if (MmDbgReadCheck(DataTableEntry->DllBase) != NULL) {
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase);
+ DateStamp = NtHeaders->FileHeader.TimeDateStamp;
+
+ } else {
+ DateStamp = 0;
+ }
+ sprintf(Buffer,
+ "%08lX %08lx - %s",
+ DataTableEntry->DllBase,
+ DateStamp,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ KiDisplayString(80, Index, Buffer);
+ NextEntry = NextEntry->Flink;
+ if (Index > DisplayHeight) {
+ break;
+ }
+ }
+ }
+ }
+
+ //
+ // Reset the current display position.
+ //
+
+ HalSetDisplayParameters(DisplayColumn, DisplayRow);
+ return;
+}
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ )
+
+/*++
+
+Routine Description:
+
+ This function display a string starting at the specified column and row
+ position on the screen.
+
+Arguments:
+
+ Column - Supplies the starting column of where the string is displayed.
+
+ Row - Supplies the starting row of where the string is displayed.
+
+ Bufer - Supplies a pointer to the string that is displayed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Position the cursor and display the string.
+ //
+
+ HalSetDisplayParameters(Column, Row);
+ HalDisplayString(Buffer);
+ return;
+}
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches the currently active function tables for an entry
+ that corresponds to the specified PC value.
+
+Arguments:
+
+ ControlPc - Supplies the address of an instruction within the specified
+ function.
+
+Return Value:
+
+ If there is no entry in the function table for the specified PC, then
+ NULL is returned. Otherwise, the address of the function table entry
+ that corresponds to the specified PC is returned.
+
+--*/
+
+{
+
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PRUNTIME_FUNCTION FunctionTable;
+ ULONG SizeOfExceptionTable;
+ LONG High;
+ PVOID ImageBase;
+ LONG Low;
+ LONG Middle;
+
+ //
+ // Search for the image that includes the specified PC value.
+ //
+
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ //
+ // If an image is found that includes the specified PC, then locate the
+ // function table for the image.
+ //
+
+ if (ImageBase != NULL) {
+ FunctionTable = (PRUNTIME_FUNCTION)RtlImageDirectoryEntryToData(
+ ImageBase, TRUE, IMAGE_DIRECTORY_ENTRY_EXCEPTION,
+ &SizeOfExceptionTable);
+
+ //
+ // If a function table is located, then search the function table
+ // for a function table entry for the specified PC.
+ //
+
+ if (FunctionTable != NULL) {
+
+ //
+ // Initialize search indicies.
+ //
+
+ Low = 0;
+ High = (SizeOfExceptionTable / sizeof(RUNTIME_FUNCTION)) - 1;
+
+ //
+ // Perform binary search on the function table for a function table
+ // entry that subsumes the specified PC.
+ //
+
+ while (High >= Low) {
+
+ //
+ // Compute next probe index and test entry. If the specified PC
+ // is greater than of equal to the beginning address and less
+ // than the ending address of the function table entry, then
+ // return the address of the function table entry. Otherwise,
+ // continue the search.
+ //
+
+ Middle = (Low + High) >> 1;
+ FunctionEntry = &FunctionTable[Middle];
+ if (ControlPc < FunctionEntry->BeginAddress) {
+ High = Middle - 1;
+
+ } else if (ControlPc >= FunctionEntry->EndAddress) {
+ Low = Middle + 1;
+
+ } else {
+
+ //
+ // The capability exists for more than one function entry
+ // to map to the same function. This permits a function to
+ // have discontiguous code segments described by separate
+ // function table entries. If the ending prologue address
+ // is not within the limits of the begining and ending
+ // address of the function able entry, then the prologue
+ // ending address is the address of a function table entry
+ // that accurately describes the ending prologue address.
+ //
+
+ if ((FunctionEntry->PrologEndAddress < FunctionEntry->BeginAddress) ||
+ (FunctionEntry->PrologEndAddress >= FunctionEntry->EndAddress)) {
+ FunctionEntry = (PRUNTIME_FUNCTION)FunctionEntry->PrologEndAddress;
+ }
+
+ return FunctionEntry;
+ }
+ }
+ }
+ }
+
+ //
+ // A function table entry for the specified PC was not found.
+ //
+
+ return NULL;
+}
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base of an image that contains the
+ specified PcValue. An image contains the PcValue if the PcValue
+ is within the ImageBase, and the ImageBase plus the size of the
+ virtual image.
+
+Arguments:
+
+ PcValue - Supplies a PcValue.
+
+ BaseOfImage - Returns the base address for the image containing the
+ PcValue. This value must be added to any relative addresses in
+ the headers to locate portions of the image.
+
+ DataTableEntry - Suppies a pointer to a variable that receives the
+ address of the data table entry that describes the image.
+
+Return Value:
+
+ NULL - No image was found that contains the PcValue.
+
+ NON-NULL - Returns the base address of the image that contain the
+ PcValue.
+
+--*/
+
+{
+
+ PLIST_ENTRY ModuleListHead;
+ PLDR_DATA_TABLE_ENTRY Entry;
+ PLIST_ENTRY Next;
+ ULONG Bounds;
+ PVOID ReturnBase, Base;
+
+ //
+ // If the module list has been initialized, then scan the list to
+ // locate the appropriate entry.
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ ReturnBase = NULL;
+ Next = ModuleListHead->Flink;
+ if (Next != NULL) {
+ while (Next != ModuleListHead) {
+ Entry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ Base = Entry->DllBase;
+ Bounds = (ULONG)Base + Entry->SizeOfImage;
+ if ((ULONG)PcValue >= (ULONG)Base && (ULONG)PcValue < Bounds) {
+ *DataTableEntry = Entry;
+ ReturnBase = Base;
+ break;
+ }
+ }
+ }
+
+ *BaseOfImage = ReturnBase;
+ return ReturnBase;
+}
diff --git a/private/ntos/ke/mips/exceptn.c b/private/ntos/ke/mips/exceptn.c
new file mode 100644
index 000000000..315d2bda7
--- /dev/null
+++ b/private/ntos/ke/mips/exceptn.c
@@ -0,0 +1,896 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ exceptn.c
+
+Abstract:
+
+ This module implement the code necessary to dispatch expections to the
+ proper mode and invoke the exception dispatcher.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define HEADER_FILE
+#include "kxmips.h"
+
+//
+// Define multiply overflow and divide by zero breakpoint instruction values.
+//
+
+#define KDDEBUG_BREAKPOINT ((SPEC_OP << 26) | (BREAKIN_BREAKPOINT << 16) | BREAK_OP)
+#define DIVIDE_BREAKPOINT ((SPEC_OP << 26) | (DIVIDE_BY_ZERO_BREAKPOINT << 16) | BREAK_OP)
+#define MULTIPLY_BREAKPOINT ((SPEC_OP << 26) | (MULTIPLY_OVERFLOW_BREAKPOINT << 16) | BREAK_OP)
+#define OVERFLOW_BREAKPOINT ((SPEC_OP << 26) | (DIVIDE_OVERFLOW_BREAKPOINT << 16) | BREAK_OP)
+
+//
+// Define external kernel breakpoint instruction value.
+//
+
+#define KERNEL_BREAKPOINT_INSTRUCTION 0x16000d
+
+VOID
+KeContextFromKframes (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PCONTEXT ContextFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified trap and exception frames
+ frames into the specified context frame according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame from which context
+ should be copied into the context record.
+
+ ContextFrame - Supplies a pointer to the context frame that receives the
+ context copied from the trap and exception frames.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG ContextFlags;
+
+ //
+ // Set control information if specified.
+ //
+
+ ContextFlags = ContextFrame->ContextFlags;
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, ra, sp, FIR, and PSR.
+ //
+
+ ContextFrame->XIntGp = TrapFrame->XIntGp;
+ ContextFrame->XIntSp = TrapFrame->XIntSp;
+ ContextFrame->Fir = TrapFrame->Fir;
+ ContextFrame->Psr = TrapFrame->Psr;
+ ContextFrame->XIntRa = TrapFrame->XIntRa;
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers zero, and, at - t9, k0, k1, lo, and hi.
+ //
+
+ ContextFrame->XIntZero = 0;
+ ContextFrame->XIntAt = TrapFrame->XIntAt;
+ ContextFrame->XIntV0 = TrapFrame->XIntV0;
+ ContextFrame->XIntV1 = TrapFrame->XIntV1;
+ ContextFrame->XIntA0 = TrapFrame->XIntA0;
+ ContextFrame->XIntA1 = TrapFrame->XIntA1;
+ ContextFrame->XIntA2 = TrapFrame->XIntA2;
+ ContextFrame->XIntA3 = TrapFrame->XIntA3;
+ ContextFrame->XIntT0 = TrapFrame->XIntT0;
+ ContextFrame->XIntT1 = TrapFrame->XIntT1;
+ ContextFrame->XIntT2 = TrapFrame->XIntT2;
+ ContextFrame->XIntT3 = TrapFrame->XIntT3;
+ ContextFrame->XIntT4 = TrapFrame->XIntT4;
+ ContextFrame->XIntT5 = TrapFrame->XIntT5;
+ ContextFrame->XIntT6 = TrapFrame->XIntT6;
+ ContextFrame->XIntT7 = TrapFrame->XIntT7;
+ ContextFrame->XIntT8 = TrapFrame->XIntT8;
+ ContextFrame->XIntT9 = TrapFrame->XIntT9;
+ ContextFrame->XIntK0 = 0;
+ ContextFrame->XIntK1 = 0;
+ ContextFrame->XIntLo = TrapFrame->XIntLo;
+ ContextFrame->XIntHi = TrapFrame->XIntHi;
+
+ //
+ // Set integer registers s0 - s7, and s8.
+ //
+
+ ContextFrame->XIntS0 = TrapFrame->XIntS0;
+ ContextFrame->XIntS1 = TrapFrame->XIntS1;
+ ContextFrame->XIntS2 = TrapFrame->XIntS2;
+ ContextFrame->XIntS3 = TrapFrame->XIntS3;
+ ContextFrame->XIntS4 = TrapFrame->XIntS4;
+ ContextFrame->XIntS5 = TrapFrame->XIntS5;
+ ContextFrame->XIntS6 = TrapFrame->XIntS6;
+ ContextFrame->XIntS7 = TrapFrame->XIntS7;
+ ContextFrame->XIntS8 = TrapFrame->XIntS8;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set floating registers f0 - f19.
+ //
+
+ RtlMoveMemory(&ContextFrame->FltF0, &TrapFrame->FltF0,
+ sizeof(ULONG) * (20));
+
+ //
+ // Set floating registers f20 - f31.
+ //
+
+ RtlMoveMemory(&ContextFrame->FltF20, &ExceptionFrame->FltF20,
+ sizeof(ULONG) * (12));
+
+ //
+ // Set floating status register.
+ //
+
+ ContextFrame->Fsr = TrapFrame->Fsr;
+ }
+
+ return;
+}
+
+VOID
+KeContextToKframes (
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN PCONTEXT ContextFrame,
+ IN ULONG ContextFlags,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified context frame into
+ the specified trap and exception frames according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that receives the volatile
+ context from the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that receives
+ the nonvolatile context from the context record.
+
+ ContextFrame - Supplies a pointer to a context frame that contains the
+ context that is to be copied into the trap and exception frames.
+
+ ContextFlags - Supplies the set of flags that specify which parts of the
+ context frame are to be copied into the trap and exception frames.
+
+ PreviousMode - Supplies the processor mode for which the trap and exception
+ frames are being built.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set integer register gp, sp, ra, FIR, and PSR.
+ //
+
+ TrapFrame->XIntGp = ContextFrame->XIntGp;
+ TrapFrame->XIntSp = ContextFrame->XIntSp;
+ TrapFrame->Fir = ContextFrame->Fir;
+ TrapFrame->Psr = SANITIZE_PSR(ContextFrame->Psr, PreviousMode);
+ TrapFrame->XIntRa = ContextFrame->XIntRa;
+ }
+
+ //
+ // Set integer registers contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Set integer registers at - t9, lo, and hi.
+ //
+
+ TrapFrame->XIntAt = ContextFrame->XIntAt;
+ TrapFrame->XIntV0 = ContextFrame->XIntV0;
+ TrapFrame->XIntV1 = ContextFrame->XIntV1;
+ TrapFrame->XIntA0 = ContextFrame->XIntA0;
+ TrapFrame->XIntA1 = ContextFrame->XIntA1;
+ TrapFrame->XIntA2 = ContextFrame->XIntA2;
+ TrapFrame->XIntA3 = ContextFrame->XIntA3;
+ TrapFrame->XIntT0 = ContextFrame->XIntT0;
+ TrapFrame->XIntT1 = ContextFrame->XIntT1;
+ TrapFrame->XIntT2 = ContextFrame->XIntT2;
+ TrapFrame->XIntT3 = ContextFrame->XIntT3;
+ TrapFrame->XIntT4 = ContextFrame->XIntT4;
+ TrapFrame->XIntT5 = ContextFrame->XIntT5;
+ TrapFrame->XIntT6 = ContextFrame->XIntT6;
+ TrapFrame->XIntT7 = ContextFrame->XIntT7;
+ TrapFrame->XIntT8 = ContextFrame->XIntT8;
+ TrapFrame->XIntT9 = ContextFrame->XIntT9;
+ TrapFrame->XIntLo = ContextFrame->XIntLo;
+ TrapFrame->XIntHi = ContextFrame->XIntHi;
+
+ //
+ // Set integer registers s0 - s7, and s8.
+ //
+
+ TrapFrame->XIntS0 = ContextFrame->XIntS0;
+ TrapFrame->XIntS1 = ContextFrame->XIntS1;
+ TrapFrame->XIntS2 = ContextFrame->XIntS2;
+ TrapFrame->XIntS3 = ContextFrame->XIntS3;
+ TrapFrame->XIntS4 = ContextFrame->XIntS4;
+ TrapFrame->XIntS5 = ContextFrame->XIntS5;
+ TrapFrame->XIntS6 = ContextFrame->XIntS6;
+ TrapFrame->XIntS7 = ContextFrame->XIntS7;
+ TrapFrame->XIntS8 = ContextFrame->XIntS8;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Set floating registers f0 - f19.
+ //
+
+ RtlMoveMemory(&TrapFrame->FltF0, &ContextFrame->FltF0,
+ sizeof(ULONG) * (20));
+
+ //
+ // Set floating registers f20 - f31.
+ //
+
+ RtlMoveMemory(&ExceptionFrame->FltF20, &ContextFrame->FltF20,
+ sizeof(ULONG) * (12));
+
+ //
+ // Set floating status register.
+ //
+
+ TrapFrame->Fsr = SANITIZE_FSR(ContextFrame->Fsr, PreviousMode);
+ }
+
+ return;
+}
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to dispatch an exception to the proper mode and
+ to cause the exception dispatcher to be called.
+
+ If the exception is a data misalignment, the previous mode is user, this
+ is the first chance for handling the exception, and the current thread
+ has enabled automatic alignment fixup, then an attempt is made to emulate
+ the unaligned reference. Data misalignment exceptions are never emulated
+ for kernel mode.
+
+ If the exception is a floating exception (N.B. the pseudo status
+ STATUS_FLOAT_STACK_CHECK is used to signify this and is converted to the
+ proper code by the floating emulation routine), then an attempt is made
+ to emulate the floating operation if it is not implemented.
+
+ If the exception is neither a data misalignment nor a floating point
+ exception and the the previous mode is kernel, then the exception
+ dispatcher is called directly to process the exception. Otherwise the
+ exception record, exception frame, and trap frame contents are copied
+ to the user mode stack. The contents of the exception frame and trap
+ are then modified such that when control is returned, execution will
+ commense in user mode in a routine which will call the exception
+ dispatcher.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ FirstChance - Supplies a boolean variable that specifies whether this
+ is the first (TRUE) or second (FALSE) time that this exception has
+ been processed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextFrame;
+ PULONG Destination;
+ EXCEPTION_RECORD ExceptionRecord1;
+ ULONG Index;
+ LONG Length;
+ PULONGLONG Source;
+ BOOLEAN UserApcPending;
+ ULONG UserStack1;
+ ULONG UserStack2;
+
+ //
+ // If the exception is an access violation, and the previous mode is
+ // user mode, then attempt to emulate a load or store operation if
+ // the exception address is at the end of a page.
+ //
+ // N.B. The following is a workaround for a r4000 chip bug where an
+ // address privilege violation is reported as a access violation
+ // on a load or store instruction that is the last instruction
+ // in a page.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
+ (((ULONG)ExceptionRecord->ExceptionAddress & 0xffc) == 0xffc) &&
+ (PreviousMode != KernelMode) &&
+ (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a data bus error, then process the error.
+ //
+ // N.B. A special exception code is used to signal a data bus error.
+ // This code is equivalent to the bug check code merged with a
+ // reserved facility code and the reserved bit set.
+ //
+ // N.B. If control returns, then it is assumed that the error has been
+ // corrected.
+ //
+
+ if (ExceptionRecord->ExceptionCode == (DATA_BUS_ERROR | 0xdfff0000)) {
+
+ //
+ // N.B. The following is a workaround for a r4000 chip bug where an
+ // address privilege violation is reported as a data bus error
+ // on a load or store instruction that is the last instruction
+ // in a page.
+ //
+
+ if ((ExceptionRecord->ExceptionInformation[1] < 0x80000000) &&
+ (((ULONG)ExceptionRecord->ExceptionAddress & 0xffc) == 0xffc) &&
+ (PreviousMode != KernelMode)) {
+ if (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ }
+
+ KiDataBusError(ExceptionRecord, ExceptionFrame, TrapFrame);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is an instruction bus error, then process the error.
+ //
+ // N.B. A special exception code is used to signal an instruction bus
+ // error. This code is equivalent to the bug check code merged
+ // with a reserved facility code and the reserved bit set.
+ //
+ // N.B. If control returns, then it is assumed that the error hand been
+ // corrected.
+ //
+
+ if (ExceptionRecord->ExceptionCode == (INSTRUCTION_BUS_ERROR | 0xdfff0000)) {
+ KiInstructionBusError(ExceptionRecord, ExceptionFrame, TrapFrame);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a data misalignment, this is the first change for
+ // handling the exception, and the current thread has enabled automatic
+ // alignment fixup, then attempt to emulate the unaligned reference.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_DATATYPE_MISALIGNMENT) &&
+ (FirstChance != FALSE) &&
+ ((KeGetCurrentThread()->AutoAlignment != FALSE) ||
+ (KeGetCurrentThread()->ApcState.Process->AutoAlignment != FALSE) ||
+ (((ExceptionRecord->ExceptionInformation[1] & 0x7fff0000) == 0x7fff0000) &&
+ (PreviousMode != KernelMode))) &&
+ (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a floating exception, then attempt to emulate the
+ // operation.
+ //
+ // N.B. The pseudo status STATUS_FLOAT_STACK_CHECK is used to signify
+ // that the exception is a floating exception and that this it the
+ // first chance for handling the exception. The floating emulation
+ // routine converts the status code to the proper floating status
+ // value.
+ //
+
+ if ((ExceptionRecord->ExceptionCode == STATUS_FLOAT_STACK_CHECK) &&
+ (KiEmulateFloating(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, PreviousMode);
+ goto Handled2;
+ }
+
+ //
+ // If the exception is a breakpoint, then translate it to an appropriate
+ // exception code if it is a division by zero or an integer overflow
+ // caused by multiplication.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) {
+ if (ExceptionRecord->ExceptionInformation[0] == DIVIDE_BREAKPOINT) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_DIVIDE_BY_ZERO;
+
+ } else if ((ExceptionRecord->ExceptionInformation[0] == MULTIPLY_BREAKPOINT) ||
+ (ExceptionRecord->ExceptionInformation[0] == OVERFLOW_BREAKPOINT)) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_OVERFLOW;
+
+ } else if (ExceptionRecord->ExceptionInformation[0] == KDDEBUG_BREAKPOINT) {
+ TrapFrame->Fir += 4;
+ }
+ }
+
+ //
+ // Move machine state from trap and exception frames to a context frame,
+ // and increment the number of exceptions dispatched.
+ //
+
+ ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+ KeGetCurrentPrcb()->KeExceptionDispatchCount += 1;
+
+ //
+ // Select the method of handling the exception based on the previous mode.
+ //
+
+ if (PreviousMode == KernelMode) {
+
+ //
+ // Previous mode was kernel.
+ //
+ // If this is the first chance, the kernel debugger is active, and
+ // the exception is a kernel breakpoint, then give the kernel debugger
+ // a chance to handle the exception.
+ //
+ // If this is the first chance and the kernel debugger is not active
+ // or does not handle the exception, then attempt to find a frame
+ // handler to handle the exception.
+ //
+ // If this is the second chance or the exception is not handled, then
+ // if the kernel debugger is active, then give the kernel debugger a
+ // second chance to handle the exception. If the kernel debugger does
+ // not handle the exception, then bug check.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a breakpoint,
+ // and the breakpoint is handled by the kernel debugger, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ KernelMode) != FALSE)) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ KernelMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (RtlDispatchException(ExceptionRecord, &ContextFrame) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if (KiDebugRoutine != NULL) {
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ TRUE)) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+
+ } else {
+
+ //
+ // Previous mode was user.
+ //
+ // If this is the first chance, the kernel debugger is active, the
+ // exception is a kernel breakpoint, and the current process is not
+ // being debugged, or the current process is being debugged, but the
+ // the breakpoint is not a kernel breakpoint instruction, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+ // If this is the first chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // transfer the exception information to the user stack, transition to
+ // user mode, and attempt to dispatch the exception to a frame based
+ // handler. If a frame based handler handles the exception, then continue
+ // execution. Otherwise, execute the raise exception system service
+ // which will call this routine a second time to process the exception.
+ //
+ // If this is the second chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // if the current process has a subsystem port, then send a message to
+ // the subsystem port and wait for a reply. If the subsystem handles the
+ // exception, then continue execution. Else terminate the thread.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a kernel
+ // breakpoint, and the current process is not being debugged,
+ // or the current process is being debugged, but the breakpoint
+ // is not a kernel breakpoint instruction, then give the kernel
+ // debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ UserMode) != FALSE) &&
+ ((PsGetCurrentProcess()->DebugPort == NULL) ||
+ ((PsGetCurrentProcess()->DebugPort != NULL) &&
+ (ExceptionRecord->ExceptionInformation[0] !=
+ KERNEL_BREAKPOINT_INSTRUCTION)))) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ UserMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, FALSE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+ }
+
+ //
+ // Transfer exception information to the user stack, transition
+ // to user mode, and attempt to dispatch the exception to a frame
+ // based handler.
+ //
+
+ repeat:
+ try {
+
+ //
+ // Coerce the 64-bit integer register context to 32-bits
+ // and store in the 32-bit context area of the context
+ // record.
+ //
+ // N.B. This only works becasue the 32- and 64-bit integer
+ // register context does not overlap in the context
+ // record.
+ //
+
+ Destination = &ContextFrame.IntZero;
+ Source = &ContextFrame.XIntZero;
+ for (Index = 0; Index < 32; Index += 1) {
+ *Destination++ = (ULONG)*Source++;
+ }
+
+ //
+ // Compute length of exception record and new aligned stack
+ // address.
+ //
+
+ Length = (sizeof(EXCEPTION_RECORD) + 7) & (~7);
+ UserStack1 = (ULONG)(ContextFrame.XIntSp & (~7)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // exception record to the user stack area.
+ //
+
+ ProbeForWrite((PCHAR)UserStack1, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack1, ExceptionRecord, Length);
+
+ //
+ // Compute length of context record and new aligned user stack
+ // pointer.
+ //
+
+ Length = sizeof(CONTEXT);
+ UserStack2 = UserStack1 - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack2, Length, sizeof(QUAD));
+ RtlMoveMemory((PVOID)UserStack2, &ContextFrame, sizeof(CONTEXT));
+
+ //
+ // Set address of exception record, context record, and the
+ // and the new stack pointer in the current trap frame.
+ //
+
+ TrapFrame->XIntSp = (LONG)UserStack2;
+ TrapFrame->XIntS8 = (LONG)UserStack2;
+ TrapFrame->XIntS0 = (LONG)UserStack1;
+ TrapFrame->XIntS1 = (LONG)UserStack2;
+
+ //
+ // Sanitize the floating status register so a recursive
+ // exception will not occur.
+ //
+
+ TrapFrame->Fsr = SANITIZE_FSR(ContextFrame.Fsr, UserMode);
+
+ //
+ // Set the address of the exception routine that will call the
+ // exception dispatcher and then return to the trap handler.
+ // The trap handler will restore the exception and trap frame
+ // context and continue execution in the routine that will
+ // call the exception dispatcher.
+ //
+
+ TrapFrame->Fir = KeUserExceptionDispatcher;
+ return;
+
+ //
+ // If an exception occurs, then copy the new exception information
+ // to an exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord1,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // If the exception is a stack overflow, then attempt
+ // to raise the stack overflow exception. Otherwise,
+ // the user's stack is not accessible, or is misaligned,
+ // and second chance processing is performed.
+ //
+
+ if (ExceptionRecord1.ExceptionCode == STATUS_STACK_OVERFLOW) {
+ ExceptionRecord1.ExceptionAddress = ExceptionRecord->ExceptionAddress;
+ RtlMoveMemory((PVOID)ExceptionRecord,
+ &ExceptionRecord1, sizeof(EXCEPTION_RECORD));
+ goto repeat;
+ }
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ UserApcPending = KeGetCurrentThread()->ApcState.UserApcPending;
+ if (DbgkForwardException(ExceptionRecord, TRUE, TRUE)) {
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+
+ } else if (DbgkForwardException(ExceptionRecord, FALSE, TRUE)) {
+
+ //
+ // If a user APC was not previously pending and one is now
+ // pending, then the thread has been terminated and the PC
+ // must be forced to a legal address so an infinite loop does
+ // not occur for the case where a jump to an unmapped address
+ // occured.
+ //
+
+ if ((UserApcPending == FALSE) &&
+ (KeGetCurrentThread()->ApcState.UserApcPending != FALSE)) {
+ TrapFrame->Fir = (ULONG)USPCR;
+ }
+
+ TrapFrame->Fsr = SANITIZE_FSR(TrapFrame->Fsr, UserMode);
+ goto Handled2;
+
+ } else {
+ ZwTerminateProcess(NtCurrentProcess(), ExceptionRecord->ExceptionCode);
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+ }
+ }
+
+ //
+ // Move machine state from context frame to trap and exception frames and
+ // then return to continue execution with the restored state.
+ //
+
+Handled1:
+ KeContextToKframes(TrapFrame, ExceptionFrame, &ContextFrame,
+ ContextFrame.ContextFlags, PreviousMode);
+
+ //
+ // Exception was handled by the debugger or the associated subsystem
+ // and state was modified, if necessary, using the get state and set
+ // state capabilities. Therefore the context frame does not need to
+ // be transfered to the trap and exception frames.
+ //
+
+Handled2:
+ return;
+}
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from an exception filter to copy the exception
+ information from one exception record to another when an exception occurs.
+
+Arguments:
+
+ ExceptionRecord1 - Supplies a pointer to the destination exception record.
+
+ ExceptionRecord2 - Supplies a pointer to the source exception record.
+
+Return Value:
+
+ A value of EXCEPTION_EXECUTE_HANDLER is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Copy one exception record to another and return value that causes
+ // an exception handler to be executed.
+ //
+
+ RtlMoveMemory((PVOID)ExceptionRecord1,
+ (PVOID)ExceptionRecord2,
+ sizeof(EXCEPTION_RECORD));
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+NTSTATUS
+KeRaiseUserException(
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an exception to be raised in the calling thread's
+ usermode context. This is accomplished by editing the trap frame the
+ kernel was entered with to point to trampoline code that raises the
+ requested exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ The status value that should be returned by the caller.
+
+--*/
+
+{
+
+ PKTRAP_FRAME TrapFrame;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ TrapFrame->Fir = KeRaiseUserExceptionDispatcher;
+ return ExceptionCode;
+}
diff --git a/private/ntos/ke/mips/floatem.c b/private/ntos/ke/mips/floatem.c
new file mode 100644
index 000000000..139a89c4b
--- /dev/null
+++ b/private/ntos/ke/mips/floatem.c
@@ -0,0 +1,4599 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ floatem.c
+
+Abstract:
+
+ This module implements a software emulation of the IEEE single and
+ double floating operations. It is required on MIPS processors since
+ the hardware does not fully support all of the operations required
+ by the IEEE standard. In particular, infinitives and Nans are not
+ handled by the hardware, but rather cause an exception. On receipt
+ of the exception, a software emulation of the floating operation
+ is performed to determine the real result of the operation and if
+ an exception will actually be raised.
+
+ Since floating exceptions are rather rare events, this routine is
+ written in C. Should a higher performance implementation be required,
+ then the algorithms contained herein, can be used to guide a higher
+ performance assembly language implementation.
+
+ N.B. This routine does not emulate floating loads, floating stores,
+ control to/from floating, or move to/from floating instructions.
+ These instructions either do not fault or are emulated elsewhere.
+
+ Floating point operations are carried out by unpacking the operands,
+ normalizing denormalized numbers, checking for NaNs, interpreting
+ infinities, and computing results.
+
+ Floating operands are converted to a format that has a value with the
+ appropriate number of leading zeros, an overflow bit, the mantissa, a
+ guard bit, a round bit, and a set of sticky bits.
+
+ The overflow bit is needed for addition and is also used for multiply.
+ The mantissa is 24-bits for single operations and 53-bits for double
+ operations. The guard bit and round bit are used to hold precise values
+ for normalization and rounding.
+
+ If the result of an operation is normalized, then the guard bit becomes
+ the round bit and the round bit is accumulated with the sticky bits. If
+ the result of an operation needs to be shifted left one bit for purposes
+ of nomalization, then the guard bit becomes part of the mantissa and the
+ round bit is used for rounding.
+
+ The round bit plus the sticky bits are used to determine how rounding is
+ performed.
+
+Author:
+
+ David N. Cutler (davec) 16-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define signaling NaN mask values.
+//
+
+#define DOUBLE_SIGNAL_NAN_MASK (1 << (53 - 32))
+#define SINGLE_SIGNAL_NAN_MASK (1 << 24)
+
+//
+// Define quite NaN mask values.
+//
+
+#define DOUBLE_QUIET_NAN_MASK (1 << (51 - 32))
+#define SINGLE_QUIET_NAN_MASK (1 << 22)
+
+//
+// Define quiet NaN prefix values.
+//
+
+#define DOUBLE_QUIET_NAN_PREFIX 0x7ff00000
+#define SINGLE_QUIET_NAN_PREFIX 0x7f800000
+
+//
+// Define compare function masks.
+//
+
+#define COMPARE_UNORDERED_MASK (1 << 0)
+#define COMPARE_EQUAL_MASK (1 << 1)
+#define COMPARE_LESS_MASK (1 << 2)
+#define COMPARE_ORDERED_MASK (1 << 3)
+
+//
+// Define context block structure.
+//
+
+typedef struct _FP_CONTEXT_BLOCK {
+ ULONG Fd;
+ ULONG BranchAddress;
+ PEXCEPTION_RECORD ExceptionRecord;
+ PKEXCEPTION_FRAME ExceptionFrame;
+ PKTRAP_FRAME TrapFrame;
+ ULONG Round;
+} FP_CONTEXT_BLOCK, *PFP_CONTEXT_BLOCK;
+
+//
+// Define single and double operand value structures.
+//
+
+typedef struct _FP_DOUBLE_OPERAND {
+ union {
+ struct {
+ ULONG MantissaLow;
+ LONG MantissaHigh;
+ };
+
+ LONGLONG Mantissa;
+ };
+
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+} FP_DOUBLE_OPERAND, *PFP_DOUBLE_OPERAND;
+
+typedef struct _FP_SINGLE_OPERAND {
+ LONG Mantissa;
+ LONG Exponent;
+ LONG Sign;
+ BOOLEAN Infinity;
+ BOOLEAN Nan;
+} FP_SINGLE_OPERAND, *PFP_SINGLE_OPERAND;
+
+//
+// Define forward referenced function protypes.
+//
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidCompareSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ );
+
+BOOLEAN
+KiInvalidOperationLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ );
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ );
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ );
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ );
+
+BOOLEAN
+KiNormalizeLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ );
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ );
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ );
+
+ULONG
+KiSquareRootDouble (
+ IN PULARGE_INTEGER DoubleValue
+ );
+
+ULONG
+KiSquareRootSingle (
+ IN PULONG SingleValue
+ );
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ );
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ );
+
+BOOLEAN
+KiEmulateFloating (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate a floating operation and convert the
+ exception status to the proper value. If the exception is an unimplemented
+ operation, then the operation is emulated. Otherwise, the status code is
+ just converted to its proper value.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the floating exception is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULARGE_INTEGER AhighBhigh;
+ ULARGE_INTEGER AhighBlow;
+ ULARGE_INTEGER AlowBhigh;
+ ULARGE_INTEGER AlowBlow;
+ ULONG Carry1;
+ ULONG Carry2;
+ BOOLEAN CompareEqual;
+ ULONG CompareFunction;
+ BOOLEAN CompareLess;
+ FP_CONTEXT_BLOCK ContextBlock;
+ LARGE_INTEGER DoubleDividend;
+ LARGE_INTEGER DoubleDivisor;
+ ULARGE_INTEGER DoubleValue;
+ ULONG DoubleMantissaLow;
+ LONG DoubleMantissaHigh;
+ FP_DOUBLE_OPERAND DoubleOperand1;
+ FP_DOUBLE_OPERAND DoubleOperand2;
+ FP_DOUBLE_OPERAND DoubleOperand3;
+ LARGE_INTEGER DoubleQuotient;
+ PVOID ExceptionAddress;
+ ULONG ExponentDifference;
+ ULONG ExponentSum;
+ ULONG Format;
+ ULONG Fs;
+ ULONG Ft;
+ ULONG Function;
+ ULONG Index;
+ MIPS_INSTRUCTION Instruction;
+ ULARGE_INTEGER LargeResult;
+ LONG Longword;
+ LONG Negation;
+ union {
+ LONGLONG Quadword;
+ LARGE_INTEGER LargeValue;
+ }u;
+
+ LONG SingleMantissa;
+ FP_SINGLE_OPERAND SingleOperand1;
+ FP_SINGLE_OPERAND SingleOperand2;
+ FP_SINGLE_OPERAND SingleOperand3;
+ ULONG SingleValue;
+ ULONG StickyBits;
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // floating operation causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // If the exception PC is equal to the fault instruction address
+ // plus four, then the floating exception occurred in the delay
+ // slot of a branch instruction and the continuation address must
+ // be computed by emulating the branch instruction. Note that it
+ // is possible for an exception to occur when the branch instruction
+ // is read from user memory.
+ //
+
+ if ((TrapFrame->Fir + 4) == (ULONG)ExceptionRecord->ExceptionAddress) {
+ ContextBlock.BranchAddress = KiEmulateBranch(ExceptionFrame,
+ TrapFrame);
+
+ } else {
+ ContextBlock.BranchAddress = TrapFrame->Fir + 4;
+ }
+
+ //
+ // Increment the floating emulation count.
+ //
+
+ KeGetCurrentPrcb()->KeFloatingEmulationCount += 1;
+
+ //
+ // Initialize the address of the exception record, exception frame,
+ // and trap frame in the context block used during the emulation of
+ // the floating point operation.
+ //
+
+ ContextBlock.ExceptionRecord = ExceptionRecord;
+ ContextBlock.ExceptionFrame = ExceptionFrame;
+ ContextBlock.TrapFrame = TrapFrame;
+ ContextBlock.Round = ((PFSR)&TrapFrame->Fsr)->RM;
+
+ //
+ // Initialize the number of exception information parameters, set
+ // the branch address, and clear the IEEE exception value.
+ //
+
+ ExceptionRecord->NumberParameters = 6;
+ ExceptionRecord->ExceptionInformation[0] = 0;
+ ExceptionRecord->ExceptionInformation[1] = ContextBlock.BranchAddress;
+ ExceptionRecord->ExceptionInformation[2] = 0;
+ ExceptionRecord->ExceptionInformation[3] = 0;
+ ExceptionRecord->ExceptionInformation[4] = 0;
+ ExceptionRecord->ExceptionInformation[5] = 0;
+
+ //
+ // Clear all exception flags and emulate the floating point operation
+ // The return value is dependent on the results of the emulation.
+ //
+
+ TrapFrame->Fsr &= ~(0x3f << 12);
+ Instruction = *((PMIPS_INSTRUCTION)ExceptionRecord->ExceptionAddress);
+ Function = Instruction.c_format.Function;
+ ContextBlock.Fd = Instruction.c_format.Fd;
+ Fs = Instruction.c_format.Fs;
+ Ft = Instruction.c_format.Ft;
+ Format = Instruction.c_format.Format;
+ Negation = 0;
+
+ //
+ // Check for illegal register specification or format code.
+ //
+
+ if (((ContextBlock.Fd & 0x1) != 0) || ((Fs & 0x1) != 0) || ((Ft & 0x1) != 0) ||
+ ((Format != FORMAT_LONGWORD) && (Format != FORMAT_QUADWORD) && (Format > FORMAT_DOUBLE))) {
+ Function = FLOAT_ILLEGAL;
+ }
+
+ //
+ // Decode operand values and dispose with NaNs.
+ //
+
+ if ((Function <= FLOAT_DIVIDE) || (Function >= FLOAT_COMPARE)) {
+
+ //
+ // The function has two operand values.
+ //
+
+ if (Format == FORMAT_SINGLE) {
+ KiUnpackSingle(Fs, &ContextBlock, &SingleOperand1);
+ KiUnpackSingle(Ft, &ContextBlock, &SingleOperand2);
+
+ //
+ // If either operand is a NaN, then check to determine if a
+ // compare instruction or other dyadic operation is being
+ // performed.
+ //
+
+ if ((SingleOperand1.Nan != FALSE) || (SingleOperand2.Nan != FALSE)) {
+ if (Function < FLOAT_COMPARE) {
+
+ //
+ // Dyadic operation.
+ //
+ // Store a quite Nan if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signally NaN.
+ //
+
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else {
+
+ //
+ // Compare operation.
+ //
+ // Set the condition based on the predicate of
+ // the floating comparison.
+ //
+ // If the compare is a signaling compare, then
+ // raise an exception if the invalid operation
+ // trap is enabled. Otherwise, raise an exception
+ // if one of the operands is a signaling NaN.
+ //
+
+ if ((Function & COMPARE_UNORDERED_MASK) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ if ((Function & COMPARE_ORDERED_MASK) != 0) {
+ return KiInvalidCompareSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else {
+ return KiInvalidCompareSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+ }
+
+ } else if (Function >= FLOAT_COMPARE) {
+ CompareFunction = Function;
+ Function = FLOAT_COMPARE_SINGLE;
+ }
+
+ } else if (Format == FORMAT_DOUBLE) {
+ KiUnpackDouble(Fs, &ContextBlock, &DoubleOperand1);
+ KiUnpackDouble(Ft, &ContextBlock, &DoubleOperand2);
+
+ //
+ // If either operand is a NaN, then check to determine if a
+ // compare instruction or other dyadic operation is being
+ // performed.
+ //
+
+ if ((DoubleOperand1.Nan != FALSE) || (DoubleOperand2.Nan != FALSE)) {
+ if (Function < FLOAT_COMPARE) {
+
+ //
+ // Dyadic operation.
+ //
+ // Store a quite Nan if the invalid operation trap
+ // is disabled, or raise an exception if the invalid
+ // operation trap is enabled and either of the NaNs
+ // is a signally NaN.
+ //
+
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else {
+
+ //
+ // Compare operation.
+ //
+ // Set the condition based on the predicate of
+ // the floating comparison.
+ //
+ // If the compare is a signaling compare, then
+ // raise an exception if the invalid operation
+ // trap is enabled. Othersie, raise an exception
+ // if one of the operands is a signaling NaN.
+ //
+
+ if ((Function & COMPARE_UNORDERED_MASK) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ if ((Function & COMPARE_ORDERED_MASK) != 0) {
+ return KiInvalidCompareDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else {
+ return KiInvalidCompareDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+ }
+
+ } else if (Function >= FLOAT_COMPARE) {
+ CompareFunction = Function;
+ Function = FLOAT_COMPARE_DOUBLE;
+ }
+
+ } else {
+ Function = FLOAT_ILLEGAL;
+ }
+
+ } else {
+
+ //
+ // The function has one operand value.
+ //
+
+ if (Format == FORMAT_SINGLE) {
+ KiUnpackSingle(Fs, &ContextBlock, &SingleOperand1);
+
+ //
+ // If the operand is a NaN and the function is not a convert
+ // operation, then store a quiet NaN if the invalid operation
+ // trap is disabled, or raise an exception if the invalid
+ // operation trap is enabled and the operand is a signaling
+ // NaN.
+ //
+
+ if ((SingleOperand1.Nan != FALSE) &&
+ (Function < FLOAT_ROUND_QUADWORD) ||
+ (Function > FLOAT_CONVERT_QUADWORD) ||
+ ((Function > FLOAT_FLOOR_LONGWORD) &&
+ (Function < FLOAT_CONVERT_SINGLE))) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand1);
+
+ }
+
+ } else if (Format == FORMAT_DOUBLE) {
+ KiUnpackDouble(Fs, &ContextBlock, &DoubleOperand1);
+
+ //
+ // If the operand is a NaN and the function is not a convert
+ // operation, then store a quiet NaN if the invalid operation
+ // trap is disabled, or raise an exception if the invalid
+ // operation trap is enabled and the operand is a signaling
+ // NaN.
+ //
+
+ if ((DoubleOperand1.Nan != FALSE) &&
+ (Function < FLOAT_ROUND_QUADWORD) ||
+ (Function > FLOAT_CONVERT_QUADWORD) ||
+ ((Function > FLOAT_FLOOR_LONGWORD) &&
+ (Function < FLOAT_CONVERT_SINGLE))) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+ }
+
+ } else if ((Format == FORMAT_LONGWORD) &&
+ (Function >= FLOAT_CONVERT_SINGLE)) {
+ Longword = KiGetRegisterValue(Fs + 32,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ } else if ((Format == FORMAT_QUADWORD) &&
+ (Function >= FLOAT_CONVERT_SINGLE)) {
+ u.LargeValue.LowPart = KiGetRegisterValue(Fs + 32,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ u.LargeValue.HighPart = KiGetRegisterValue(Fs + 33,
+ ContextBlock.ExceptionFrame,
+ ContextBlock.TrapFrame);
+
+ } else {
+ Function = FLOAT_ILLEGAL;
+ }
+ }
+
+ //
+ // Case to the proper function routine to emulate the operation.
+ //
+
+ switch (Function) {
+
+ //
+ // Floating subtract operation.
+ //
+ // Floating subtract is accomplished by complementing the sign
+ // of the second operand and then performing an add operation.
+ //
+
+ case FLOAT_SUBTRACT:
+ Negation = 0x1;
+
+ //
+ // Floating add operation.
+ //
+ // Floating add is accomplished using signed magnitude addition.
+ //
+ // The exponent difference is calculated and the smaller number
+ // is right shifted by the specified amount, but no more than
+ // the width of the operand values (i.e., 26 for single and 55
+ // for double). The shifted out value is saved for rounding.
+ //
+ // If the signs of the two operands are the same, then they
+ // are added together after having performed the alignment
+ // shift.
+ //
+ // If the signs of the two operands are different, then the
+ // sign of the result is the sign of the larger operand and
+ // the smaller operand is subtracted from the larger operand.
+ // In order to avoid making a double level test (i.e., one on
+ // the exponents, and one on the mantissas if the exponents
+ // are equal), it is posible that the result of the subtract
+ // could be negative (if the exponents are equal). If this
+ // occurs, then the result sign and mantissa are complemented
+ // to obtain the correct result.
+ //
+
+ case FLOAT_ADD:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ SingleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder then operands according to their exponent value.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 26 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ SingleOperand1.Exponent - SingleOperand2.Exponent;
+
+ if (ExponentDifference > 26) {
+ ExponentDifference = 26;
+ }
+
+ StickyBits =
+ SingleOperand2.Mantissa & ((1 << ExponentDifference) - 1);
+ SingleMantissa = SingleOperand2.Mantissa >> ExponentDifference;
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, perform
+ // the operation by subtracting the second operand from the
+ // first operand.
+ //
+
+ if ((SingleOperand1.Sign ^ SingleOperand2.Sign) == 0) {
+ SingleOperand1.Mantissa += SingleMantissa;
+
+ } else {
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ } else if (SingleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ SingleOperand1.Mantissa -= 1;
+ }
+
+ SingleOperand1.Mantissa -= SingleMantissa;
+ if (SingleOperand1.Mantissa < 0) {
+ SingleOperand1.Mantissa = -SingleOperand1.Mantissa;
+ SingleOperand1.Sign ^= 0x1;
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Complement the sign of the second operand if the operation
+ // is subtraction.
+ //
+
+ DoubleOperand2.Sign ^= Negation;
+
+ //
+ // Reorder then operands according to their exponent value.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // Compute the exponent difference and shift the smaller
+ // mantissa right by the difference value or 55 which ever
+ // is smaller. The bits shifted out are termed the sticky
+ // bits and are used later in the rounding operation.
+ //
+
+ ExponentDifference =
+ DoubleOperand1.Exponent - DoubleOperand2.Exponent;
+
+ if (ExponentDifference > 55) {
+ ExponentDifference = 55;
+ }
+
+ if (ExponentDifference >= 32) {
+ ExponentDifference -= 32;
+ StickyBits = (DoubleOperand2.MantissaLow) |
+ (DoubleOperand2.MantissaHigh & ((1 << ExponentDifference) - 1));
+
+ DoubleMantissaLow =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ DoubleMantissaHigh = 0;
+
+ } else if (ExponentDifference > 0) {
+ StickyBits =
+ DoubleOperand2.MantissaLow & ((1 << ExponentDifference) - 1);
+
+ DoubleMantissaLow =
+ (DoubleOperand2.MantissaLow >> ExponentDifference) |
+ (DoubleOperand2.MantissaHigh << (32 - ExponentDifference));
+
+ DoubleMantissaHigh =
+ DoubleOperand2.MantissaHigh >> ExponentDifference;
+
+ } else {
+ StickyBits = 0;
+ DoubleMantissaLow = DoubleOperand2.MantissaLow;
+ DoubleMantissaHigh = DoubleOperand2.MantissaHigh;
+ }
+
+ //
+ // If the operands both have the same sign, then perform the
+ // operation by adding the values together. Otherwise, perform
+ // the operation by subtracting the second operand from the
+ // first operand.
+ //
+
+ if ((DoubleOperand1.Sign ^ DoubleOperand2.Sign) == 0) {
+ DoubleOperand1.MantissaLow += DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh += DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh += 1;
+ }
+
+ } else {
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } else if (DoubleOperand1.Infinity == FALSE) {
+ if (StickyBits != 0) {
+ if (DoubleOperand1.MantissaLow < 1) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= 1;
+ }
+
+ if (DoubleOperand1.MantissaLow < DoubleMantissaLow) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.MantissaLow -= DoubleMantissaLow;
+ DoubleOperand1.MantissaHigh -= DoubleMantissaHigh;
+ if (DoubleOperand1.MantissaHigh < 0) {
+ DoubleOperand1.MantissaLow = ~DoubleOperand1.MantissaLow + 1;
+ DoubleOperand1.MantissaHigh = -DoubleOperand1.MantissaHigh;
+ if (DoubleOperand1.MantissaLow != 0) {
+ DoubleOperand1.MantissaHigh -= 1;
+ }
+
+ DoubleOperand1.Sign ^= 0x1;
+ }
+ }
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating multiply operation.
+ //
+ // Floating multiply is accomplished using unsigned multiplies
+ // of the mantissa values, and adding the parital results together
+ // to form the total product.
+ //
+ // The two mantissa values are preshifted such that the final
+ // result is properly aligned.
+ //
+
+ case FLOAT_MULTIPLY:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Reorder the operands according to their exponent value.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ SingleOperand3 = SingleOperand2;
+ SingleOperand2 = SingleOperand1;
+ SingleOperand1 = SingleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 64-bit value and then unsigned multiply
+ // the two mantissa values. The single result is the high part
+ // of the 64-bit product and the sticky bits are the low part
+ // of the 64-bit product.
+ //
+
+ LargeResult.QuadPart = UInt32x32To64(SingleOperand1.Mantissa << (32 - 26),
+ SingleOperand2.Mantissa << 1);
+
+ SingleOperand1.Mantissa = LargeResult.HighPart;
+ StickyBits = LargeResult.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent +=
+ SingleOperand2.Exponent - SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Reorder the operands according to their exponent value.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ DoubleOperand3 = DoubleOperand2;
+ DoubleOperand2 = DoubleOperand1;
+ DoubleOperand1 = DoubleOperand3;
+ }
+
+ //
+ // If the first operand is infinite and the second operand is
+ // zero, then an invalid operation is specified.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+
+ //
+ // Preshift the operand mantissas so the result will be a
+ // properly aligned 128-bit value and then unsigned multiply
+ // the two mantissa values. The double result is the high part
+ // of the 128-bit product and the sticky bits are the low part
+ // of the 128-bit product.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ (DoubleOperand1.MantissaHigh << 1) |
+ (DoubleOperand1.MantissaLow >> 31);
+
+ DoubleOperand1.MantissaLow <<= 1;
+ DoubleOperand2.MantissaHigh =
+ (DoubleOperand2.MantissaHigh << (64 - 55)) |
+ (DoubleOperand2.MantissaLow >> (32 - (64 -55)));
+
+ DoubleOperand2.MantissaLow <<= (64 - 55);
+
+ //
+ // The 128-bit product is formed by mutiplying and adding
+ // all the cross product values.
+ //
+ // Consider the operands (A and B) as being composed of two
+ // parts Ahigh, Alow, Bhigh, and Blow. The cross product sum
+ // is then:
+ //
+ // Ahigh * Bhigh * 2^64 +
+ // Ahigh * Blow * 2^32 +
+ // Alow * Bhigh * 2^32 +
+ // Alow * Blow
+ //
+
+ AhighBhigh.QuadPart = UInt32x32To64(DoubleOperand1.MantissaHigh,
+ DoubleOperand2.MantissaHigh);
+
+ AhighBlow.QuadPart = UInt32x32To64(DoubleOperand1.MantissaHigh,
+ DoubleOperand2.MantissaLow);
+
+ AlowBhigh.QuadPart = UInt32x32To64(DoubleOperand1.MantissaLow,
+ DoubleOperand2.MantissaHigh);
+
+ AlowBlow.QuadPart = UInt32x32To64(DoubleOperand1.MantissaLow,
+ DoubleOperand2.MantissaLow);
+
+ AlowBlow.HighPart += AhighBlow.LowPart;
+ if (AlowBlow.HighPart < AhighBlow.LowPart) {
+ Carry1 = 1;
+
+ } else {
+ Carry1 = 0;
+ }
+
+ AlowBlow.HighPart += AlowBhigh.LowPart;
+ if (AlowBlow.HighPart < AlowBhigh.LowPart) {
+ Carry1 += 1;
+ }
+
+ DoubleOperand1.MantissaLow = AhighBlow.HighPart + Carry1;
+ if (DoubleOperand1.MantissaLow < Carry1) {
+ Carry2 = 1;
+
+ } else {
+ Carry2 = 0;
+ }
+
+ DoubleOperand1.MantissaLow += AlowBhigh.HighPart;
+ if (DoubleOperand1.MantissaLow < AlowBhigh.HighPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaLow += AhighBhigh.LowPart;
+ if (DoubleOperand1.MantissaLow < AhighBhigh.LowPart) {
+ Carry2 += 1;
+ }
+
+ DoubleOperand1.MantissaHigh = AhighBhigh.HighPart + Carry2;
+ StickyBits = AlowBlow.HighPart | AlowBlow.LowPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent +=
+ DoubleOperand2.Exponent - DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating divide operation.
+ //
+ // Floating division is accomplished by repeated subtract using
+ // a single one-bit-at-a-time algorithm. The number of division
+ // steps performed is equal to the mantissa size plus one guard
+ // bit.
+ //
+ // The sticky bits are the remainder after the specified number
+ // of division steps.
+ //
+
+ case FLOAT_DIVIDE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((SingleOperand1.Infinity != FALSE) &&
+ (SingleOperand2.Infinity != FALSE)) ||
+ ((SingleOperand1.Infinity == FALSE) &&
+ (SingleOperand1.Mantissa == 0) &&
+ (SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0))) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand2);
+
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ return KiDivideByZeroSingle(&ContextBlock,
+ &SingleOperand1,
+ &SingleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (SingleOperand1.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (SingleOperand2.Infinity != FALSE) {
+ SingleOperand1.Sign ^= SingleOperand2.Sign;
+ SingleOperand1.Exponent = 0;
+ SingleOperand1.Mantissa = 0;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 26 iterations.
+ //
+
+ SingleOperand3.Mantissa = 0;
+ for (Index = 0; Index < 26; Index += 1) {
+ SingleOperand3.Mantissa <<=1;
+ if (SingleOperand1.Mantissa >= SingleOperand2.Mantissa) {
+ SingleOperand1.Mantissa -= SingleOperand2.Mantissa;
+ SingleOperand3.Mantissa |= 1;
+ }
+
+ SingleOperand1.Mantissa <<= 1;
+ }
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ SingleOperand3.Sign = SingleOperand1.Sign ^ SingleOperand2.Sign;
+ SingleOperand3.Exponent = SingleOperand1.Exponent -
+ SingleOperand2.Exponent + SINGLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ SingleOperand3.Infinity = FALSE;
+ SingleOperand3.Nan = FALSE;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand3,
+ SingleOperand1.Mantissa);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the first operand is infinite and the second operand
+ // is infinite, or both operands are zero, then an invalid
+ // operation is specified.
+ //
+
+ if (((DoubleOperand1.Infinity != FALSE) &&
+ (DoubleOperand2.Infinity != FALSE)) ||
+ ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0) &&
+ (DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0))) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ }
+
+ //
+ // If the second operand is zero, then a divide by zero
+ // operation is specified.
+ //
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ return KiDivideByZeroDouble(&ContextBlock,
+ &DoubleOperand1,
+ &DoubleOperand2);
+ }
+
+ //
+ // If the first operand is infinite, then the result is
+ // infinite. Otherwise, if the second operand is infinite,
+ // then the result is zero (note that both operands cannot
+ // be infinite).
+ //
+
+ if (DoubleOperand1.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (DoubleOperand2.Infinity != FALSE) {
+ DoubleOperand1.Sign ^= DoubleOperand2.Sign;
+ DoubleOperand1.Exponent = 0;
+ DoubleOperand1.MantissaHigh = 0;
+ DoubleOperand1.MantissaLow = 0;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ }
+
+ //
+ // Perform divide operation by repeating a single bit
+ // divide step 55 iterations.
+ //
+
+ DoubleDividend.LowPart = DoubleOperand1.MantissaLow;
+ DoubleDividend.HighPart = DoubleOperand1.MantissaHigh;
+ DoubleDivisor.LowPart = DoubleOperand2.MantissaLow;
+ DoubleDivisor.HighPart = DoubleOperand2.MantissaHigh;
+ DoubleQuotient.LowPart = 0;
+ DoubleQuotient.HighPart = 0;
+ for (Index = 0; Index < 55; Index += 1) {
+ DoubleQuotient.HighPart =
+ (DoubleQuotient.HighPart << 1) |
+ DoubleQuotient.LowPart >> 31;
+
+ DoubleQuotient.LowPart <<= 1;
+ if (DoubleDividend.QuadPart >= DoubleDivisor.QuadPart) {
+ DoubleDividend.QuadPart -= DoubleDivisor.QuadPart;
+ DoubleQuotient.LowPart |= 1;
+ }
+
+ DoubleDividend.HighPart =
+ (DoubleDividend.HighPart << 1) |
+ DoubleDividend.LowPart >> 31;
+
+ DoubleDividend.LowPart <<= 1;
+ }
+
+ DoubleOperand3.MantissaLow = DoubleQuotient.LowPart;
+ DoubleOperand3.MantissaHigh = DoubleQuotient.HighPart;
+
+ //
+ // Compute the sign and exponent of the result.
+ //
+
+ DoubleOperand3.Sign = DoubleOperand1.Sign ^ DoubleOperand2.Sign;
+ DoubleOperand3.Exponent = DoubleOperand1.Exponent -
+ DoubleOperand2.Exponent + DOUBLE_EXPONENT_BIAS;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ DoubleOperand3.Infinity = FALSE;
+ DoubleOperand3.Nan = FALSE;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand3,
+ DoubleDividend.LowPart | DoubleDividend.HighPart);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating square root.
+ //
+
+ case FLOAT_SQUARE_ROOT:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is plus infinity, then the result is
+ // plus infinity, or if the operand is plus or minus
+ // zero, then the result is plus or minus zero.
+ //
+
+ if (((SingleOperand1.Sign == 0) &&
+ (SingleOperand1.Infinity != FALSE)) ||
+ (SingleOperand1.Mantissa == 0)) {
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+ }
+
+ //
+ // If the operand is negative, then the operation is
+ // invalid.
+ //
+
+ if (SingleOperand1.Sign != 0) {
+ return KiInvalidOperationSingle(&ContextBlock,
+ FALSE,
+ &SingleOperand1,
+ &SingleOperand1);
+ }
+
+ //
+ // The only case remaining that could cause an exception
+ // is a denomalized source value. The square root of a
+ // denormalized value is computed by:
+ //
+ // 1. Converting the value to a normalized value with
+ // an exponent equal to the denormalization shift count
+ // plus the bias of the exponent plus one.
+ //
+ // 2. Computing the square root of the value and unpacking
+ // the result.
+ //
+ // 3. Converting the shift count back to a normalization
+ // shift count.
+ //
+ // 4. Rounding and packing the resultant value.
+ //
+ // N.B. The square root of all denormalized number is a
+ // normalized number.
+ //
+
+ SingleOperand1.Exponent = (SINGLE_EXPONENT_BIAS + 1 +
+ SingleOperand1.Exponent) << 23;
+
+ SingleValue = (SingleOperand1.Mantissa & ~(1 << 25)) >> 2;
+ SingleValue |= SingleOperand1.Exponent;
+ StickyBits = KiSquareRootSingle(&SingleValue);
+ SingleOperand1.Exponent = (SingleValue >> 23) -
+ ((SINGLE_EXPONENT_BIAS + 1) / 2);
+
+ SingleOperand1.Mantissa = ((SingleValue &
+ 0x7fffff) | 0x800000) << 2;
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is plus infinity, then the result is
+ // plus infinity, or if the operand is plus or minus
+ // zero, then the result is plus or minus zero.
+ //
+
+ if (((DoubleOperand1.Sign == 0) &&
+ (DoubleOperand1.Infinity != FALSE)) ||
+ (DoubleOperand1.MantissaHigh == 0)) {
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+ }
+
+ //
+ // If the operand is negative, then the operation is
+ // invalid.
+ //
+
+ if (DoubleOperand1.Sign != 0) {
+ return KiInvalidOperationDouble(&ContextBlock,
+ FALSE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+ }
+
+ //
+ // The only case remaining that could cause an exception
+ // is a denomalized source value. The square root of a
+ // denormalized value is computed by:
+ //
+ // 1. Converting the value to a normalized value with
+ // an exponent equal to the denormalization shift count
+ // plus the bias of the exponent plus one.
+ //
+ // 2. Computing the square root of the value and unpacking
+ // the result.
+ //
+ // 3. Converting the shift count back to a normalization
+ // shift count.
+ //
+ // 4. Rounding and packing the resultant value.
+ //
+ // N.B. The square root of all denormalized numbers is a
+ // normalized number.
+ //
+
+ DoubleOperand1.Exponent = (DOUBLE_EXPONENT_BIAS + 1 +
+ DoubleOperand1.Exponent) << 20;
+
+ DoubleValue.HighPart = (DoubleOperand1.MantissaHigh & ~(1 << 22)) >> 2;
+ DoubleValue.LowPart = (DoubleOperand1.MantissaHigh << 30) |
+ (DoubleOperand1.MantissaLow >> 2);
+
+ DoubleValue.HighPart |= DoubleOperand1.Exponent;
+ StickyBits = KiSquareRootDouble(&DoubleValue);
+ DoubleOperand1.Exponent = (DoubleValue.HighPart >> 20) -
+ ((DOUBLE_EXPONENT_BIAS + 1) / 2);
+
+ DoubleOperand1.MantissaLow = DoubleValue.LowPart << 2;
+ DoubleOperand1.MantissaHigh = ((DoubleValue.HighPart &
+ 0xfffff) | 0x100000) << 2;
+
+ DoubleOperand1.MantissaHigh |= (DoubleValue.LowPart >> 30);
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating absolute operation.
+ //
+ // Floating absolute is accomplished by clearing the sign
+ // of the floating value.
+ //
+
+ case FLOAT_ABSOLUTE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Clear the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ SingleOperand1.Sign = 0;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Clear the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ DoubleOperand1.Sign = 0;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating move operation.
+ //
+ // Floating move is accomplished by moving the source operand
+ // to the destination register.
+ //
+
+ case FLOAT_MOVE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Normalize the result and store in the destination
+ // register.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Normalize the result and store in the destination
+ // register.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating negate operation.
+ //
+ // Floating absolute is accomplished by complementing the sign
+ // of the floating value.
+ //
+
+ case FLOAT_NEGATE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // Complement the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ SingleOperand1.Sign ^= 0x1;
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // Complement the sign, normalize the result, and store in the
+ // destination register.
+ //
+
+ DoubleOperand1.Sign ^= 0x1;
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating compare single.
+ //
+ // This operation is performed after having separated out NaNs,
+ // and therefore the only comparison predicates left are equal
+ // and less.
+ //
+ // Floating compare single is accomplished by comparing signs,
+ // then exponents, and finally the mantissa if necessary.
+ //
+ // N.B. The sign of zero is ignorned.
+ //
+
+ case FLOAT_COMPARE_SINGLE:
+
+ //
+ // If either operand is zero, then set the sign of the operand
+ // positive.
+ //
+
+ if ((SingleOperand1.Infinity == FALSE) &&
+ (SingleOperand1.Mantissa == 0)) {
+ SingleOperand1.Sign = 0;
+ SingleOperand1.Exponent = - 23;
+ }
+
+ if ((SingleOperand2.Infinity == FALSE) &&
+ (SingleOperand2.Mantissa == 0)) {
+ SingleOperand2.Sign = 0;
+ SingleOperand2.Exponent = - 23;
+ }
+
+ //
+ // Compare signs first.
+ //
+
+ if (SingleOperand1.Sign < SingleOperand2.Sign) {
+
+ //
+ // The first operand is greater than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Sign > SingleOperand2.Sign) {
+
+ //
+ // The first operand is less than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+
+ //
+ // The operand signs are equal.
+ //
+ // If the sign of the operand is negative, then the sense of
+ // the comparison is reversed.
+ //
+
+ if (SingleOperand1.Sign == 0) {
+
+ //
+ // Compare positive operand with positive operand.
+ //
+
+ if (SingleOperand1.Exponent > SingleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Exponent < SingleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (SingleOperand1.Mantissa > SingleOperand2.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand1.Mantissa < SingleOperand2.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+
+ } else {
+
+ //
+ // Compare negative operand with negative operand.
+ //
+
+ if (SingleOperand2.Exponent > SingleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand2.Exponent < SingleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (SingleOperand2.Mantissa > SingleOperand1.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (SingleOperand2.Mantissa < SingleOperand1.Mantissa) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+ }
+
+ //
+ // Form the condition code using the comparison information
+ // and the compare function predicate bits.
+ //
+
+ if (((CompareLess != FALSE) &&
+ ((CompareFunction & COMPARE_LESS_MASK) != 0)) ||
+ ((CompareEqual != FALSE) &&
+ ((CompareFunction & COMPARE_EQUAL_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ TrapFrame->Fir = ContextBlock.BranchAddress;
+ return TRUE;
+
+ //
+ // Floating compare double.
+ //
+ // This operation is performed after having separated out NaNs,
+ // and therefore the only comparison predicates left are equal
+ // and less.
+ //
+ // Floating compare double is accomplished by comparing signs,
+ // then exponents, and finally the mantissa if necessary.
+ //
+ // N.B. The sign of zero is ignorned.
+ //
+
+ case FLOAT_COMPARE_DOUBLE:
+
+ //
+ // If either operand is zero, then set the sign of the operand
+ // positive.
+ //
+
+ if ((DoubleOperand1.Infinity == FALSE) &&
+ (DoubleOperand1.MantissaHigh == 0)) {
+ DoubleOperand1.Sign = 0;
+ DoubleOperand1.Exponent = - 52;
+ }
+
+ if ((DoubleOperand2.Infinity == FALSE) &&
+ (DoubleOperand2.MantissaHigh == 0)) {
+ DoubleOperand2.Sign = 0;
+ DoubleOperand2.Exponent = - 52;
+ }
+
+ //
+ // Compare signs first.
+ //
+
+ if (DoubleOperand1.Sign < DoubleOperand2.Sign) {
+
+ //
+ // The first operand is greater than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Sign > DoubleOperand2.Sign) {
+
+ //
+ // The first operand is less than the second operand.
+ //
+
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+
+ //
+ // The operand signs are equal.
+ //
+ // If the sign of the operand is negative, then the sense of
+ // the comparison is reversed.
+ //
+
+ if (DoubleOperand1.Sign == 0) {
+
+ //
+ // Compare positive operand with positive operand.
+ //
+
+ if (DoubleOperand1.Exponent > DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.Exponent < DoubleOperand2.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaHigh >
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaHigh <
+ DoubleOperand2.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand1.MantissaLow >
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand1.MantissaLow <
+ DoubleOperand2.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+
+ } else {
+
+ //
+ // Compare negative operand with negative operand.
+ //
+
+ if (DoubleOperand2.Exponent > DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.Exponent < DoubleOperand1.Exponent) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaHigh >
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaHigh <
+ DoubleOperand1.MantissaHigh) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ if (DoubleOperand2.MantissaLow >
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = FALSE;
+
+ } else if (DoubleOperand2.MantissaLow <
+ DoubleOperand1.MantissaLow) {
+ CompareEqual = FALSE;
+ CompareLess = TRUE;
+
+ } else {
+ CompareEqual = TRUE;
+ CompareLess = FALSE;
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Form the condition code using the comparison information
+ // and the compare function predicate bits.
+ //
+
+ if (((CompareLess != FALSE) &&
+ ((CompareFunction & COMPARE_LESS_MASK) != 0)) ||
+ ((CompareEqual != FALSE) &&
+ ((CompareFunction & COMPARE_EQUAL_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->CC = 1;
+
+ } else {
+ ((PFSR)&TrapFrame->Fsr)->CC = 0;
+ }
+
+ TrapFrame->Fir = ContextBlock.BranchAddress;
+ return TRUE;
+
+ //
+ // Floating convert to single.
+ //
+ // This operation is only legal for conversion from quadword,
+ // longword, and double formats to single format. This operation
+ // can not be used to convert from a single format to a single format.
+ //
+ // Floating conversion to single is accompished by forming a
+ // single floating operand and then normalize and storing the
+ // result value.
+ //
+
+ case FLOAT_CONVERT_SINGLE:
+ if (Format == FORMAT_SINGLE) {
+ break;
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (DoubleOperand1.Nan != FALSE) {
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ SingleOperand1.Exponent = SINGLE_MAXIMUM_EXPONENT;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = TRUE;
+ return KiInvalidOperationSingle(&ContextBlock,
+ TRUE,
+ &SingleOperand1,
+ &SingleOperand1);
+
+ }
+
+ //
+ // Transform the double operand to single format.
+ //
+
+ SingleOperand1.Mantissa =
+ (DoubleOperand1.MantissaHigh << (26 - (55 - 32))) |
+ (DoubleOperand1.MantissaLow >> (32 - (26 - (55 - 32))));
+ StickyBits = DoubleOperand1.MantissaLow << (26 - (55 - 32));
+ SingleOperand1.Exponent = DoubleOperand1.Exponent +
+ SINGLE_EXPONENT_BIAS - DOUBLE_EXPONENT_BIAS;
+ SingleOperand1.Sign = DoubleOperand1.Sign;
+ SingleOperand1.Infinity = DoubleOperand1.Infinity;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_LONGWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Longword < 0) {
+ SingleOperand1.Sign = 0x1;
+ Longword = -Longword;
+
+ } else {
+ SingleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the longword
+ // value.
+ //
+
+ if (Longword != 0) {
+ SingleOperand1.Exponent = SINGLE_EXPONENT_BIAS + 31;
+ while (Longword > 0) {
+ Longword <<= 1;
+ SingleOperand1.Exponent -= 1;
+ }
+
+ SingleOperand1.Mantissa = (ULONG)Longword >> (32 - 26);
+ StickyBits = Longword << 26;
+
+ } else {
+ SingleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ SingleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else if (Format == FORMAT_QUADWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (u.Quadword < 0) {
+ SingleOperand1.Sign = 0x1;
+ u.Quadword = -u.Quadword;
+
+ } else {
+ SingleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ SingleOperand1.Infinity = FALSE;
+ SingleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (u.Quadword != 0) {
+ SingleOperand1.Exponent = SINGLE_EXPONENT_BIAS + 63;
+ while (u.Quadword > 0) {
+ u.Quadword <<= 1;
+ SingleOperand1.Exponent -= 1;
+ }
+
+ SingleOperand1.Mantissa = (LONG)((ULONGLONG)u.Quadword >> (64 - 26));
+ StickyBits = (u.Quadword << 26) ? 1 : 0;
+
+ } else {
+ SingleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ SingleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeSingle(&ContextBlock,
+ &SingleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to double.
+ //
+ // This operation is only legal for conversion from quadword,
+ // longword, and single formats to double format. This operation
+ // cannot be used to convert from a double format to a double
+ // format.
+ //
+ // Floating conversion to double is accomplished by forming
+ // double floating operand and then normalizing and storing
+ // the result value.
+ //
+
+ case FLOAT_CONVERT_DOUBLE:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is a NaN, then store a quiet NaN if the
+ // invalid operation trap is disabled, or raise an exception
+ // if the invalid operation trap is enabled and the operand
+ // is a signaling NaN.
+ //
+
+ if (SingleOperand1.Nan != FALSE) {
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow = (0xffffffff >> (26 - 2 - (55 - 32))) |
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = DOUBLE_MAXIMUM_EXPONENT;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = TRUE;
+ return KiInvalidOperationDouble(&ContextBlock,
+ TRUE,
+ &DoubleOperand1,
+ &DoubleOperand1);
+
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = SingleOperand1.Infinity;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_DOUBLE) {
+ break;
+
+ } else if (Format == FORMAT_LONGWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (Longword < 0) {
+ DoubleOperand1.Sign = 0x1;
+ Longword = -Longword;
+
+ } else {
+ DoubleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the longword
+ // value.
+ //
+
+ if (Longword != 0) {
+ SingleOperand1.Exponent = DOUBLE_EXPONENT_BIAS + 31;
+ while (Longword > 0) {
+ Longword <<= 1;
+ DoubleOperand1.Exponent -= 1;
+ }
+
+ DoubleOperand1.Mantissa = (ULONGLONG)Longword >> (64 - 55);
+
+ } else {
+ DoubleOperand1.Mantissa = 0;
+ DoubleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ 0);
+
+ } else if (Format == FORMAT_QUADWORD) {
+
+ //
+ // Compute the sign of the result.
+ //
+
+ if (u.Quadword < 0) {
+ DoubleOperand1.Sign = 0x1;
+ u.Quadword = -u.Quadword;
+
+ } else {
+ DoubleOperand1.Sign = 0;
+ }
+
+ //
+ // Initialize the infinity and NaN values.
+ //
+
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Compute the exponent value and normalize the quadword
+ // value.
+ //
+
+ if (u.Quadword != 0) {
+ DoubleOperand1.Exponent = DOUBLE_EXPONENT_BIAS + 63;
+ while (u.Quadword > 0) {
+ u.Quadword <<= 1;
+ DoubleOperand1.Exponent -= 1;
+ }
+
+ DoubleOperand1.Mantissa = (ULONGLONG)u.Quadword >> (64 - 55);
+ StickyBits = (u.Quadword << 55) ? 1 : 0;
+
+ } else {
+ DoubleOperand1.Mantissa = 0;
+ StickyBits = 0;
+ DoubleOperand1.Exponent = 0;
+ }
+
+ //
+ // Normalize and store the result value.
+ //
+
+ return KiNormalizeDouble(&ContextBlock,
+ &DoubleOperand1,
+ StickyBits);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to quadword.
+ //
+ // This operation is only legal for conversion from double
+ // and single formats to quadword format. This operation
+ // cannot be used to convert from a quadword format to a
+ // longword or quadword format.
+ //
+ // Floating conversion to quadword is accomplished by forming
+ // a quadword value from a single or double floating value.
+ //
+ // There is one general conversion operation and four directed
+ // rounding operations.
+ //
+
+ case FLOAT_ROUND_QUADWORD:
+ ContextBlock.Round = ROUND_TO_NEAREST;
+ goto ConvertQuadword;
+
+ case FLOAT_TRUNC_QUADWORD:
+ ContextBlock.Round = ROUND_TO_ZERO;
+ goto ConvertQuadword;
+
+ case FLOAT_CEIL_QUADWORD:
+ ContextBlock.Round = ROUND_TO_PLUS_INFINITY;
+ goto ConvertQuadword;
+
+ case FLOAT_FLOOR_QUADWORD:
+ ContextBlock.Round = ROUND_TO_MINUS_INFINITY;
+ goto ConvertQuadword;
+
+ case FLOAT_CONVERT_QUADWORD:
+ ConvertQuadword:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) ||
+ (SingleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationQuadword(&ContextBlock,
+ SingleOperand1.Infinity,
+ SingleOperand1.Sign);
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.Mantissa = (LONGLONG)SingleOperand1.Mantissa << (55 - 26);
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Convert double to quadword and store the result value.
+ //
+
+ return KiNormalizeQuadword(&ContextBlock, &DoubleOperand1);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) ||
+ (DoubleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationQuadword(&ContextBlock,
+ DoubleOperand1.Infinity,
+ DoubleOperand1.Sign);
+ }
+
+ //
+ // Convert double to quadword and store the result value.
+ //
+
+ return KiNormalizeQuadword(&ContextBlock, &DoubleOperand1);
+
+ } else {
+ break;
+ }
+
+ //
+ // Floating convert to longword.
+ //
+ // This operation is only legal for conversion from double
+ // and single formats to longword format. This operation
+ // cannot be used to convert from a longword format to a
+ // longword format.
+ //
+ // Floating conversion to longword is accomplished by forming
+ // a longword value from a single or double floating value.
+ //
+ // There is one general conversion operation and four directed
+ // rounding operations.
+ //
+
+ case FLOAT_ROUND_LONGWORD:
+ ContextBlock.Round = ROUND_TO_NEAREST;
+ goto ConvertLongword;
+
+ case FLOAT_TRUNC_LONGWORD:
+ ContextBlock.Round = ROUND_TO_ZERO;
+ goto ConvertLongword;
+
+ case FLOAT_CEIL_LONGWORD:
+ ContextBlock.Round = ROUND_TO_PLUS_INFINITY;
+ goto ConvertLongword;
+
+ case FLOAT_FLOOR_LONGWORD:
+ ContextBlock.Round = ROUND_TO_MINUS_INFINITY;
+ goto ConvertLongword;
+
+ case FLOAT_CONVERT_LONGWORD:
+ ConvertLongword:
+ if (Format == FORMAT_SINGLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((SingleOperand1.Infinity != FALSE) ||
+ (SingleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationLongword(&ContextBlock,
+ SingleOperand1.Infinity,
+ SingleOperand1.Sign);
+ }
+
+ //
+ // Transform the single operand to double format.
+ //
+
+ DoubleOperand1.MantissaHigh =
+ SingleOperand1.Mantissa >> (26 - (55 - 32));
+ DoubleOperand1.MantissaLow =
+ SingleOperand1.Mantissa << (32 - (26 - (55 - 32)));
+ DoubleOperand1.Exponent = SingleOperand1.Exponent +
+ DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS;
+ DoubleOperand1.Sign = SingleOperand1.Sign;
+ DoubleOperand1.Infinity = FALSE;
+ DoubleOperand1.Nan = FALSE;
+
+ //
+ // Convert double to longword and store the result value.
+ //
+
+ return KiNormalizeLongword(&ContextBlock, &DoubleOperand1);
+
+ } else if (Format == FORMAT_DOUBLE) {
+
+ //
+ // If the operand is infinite or is a NaN, then store a
+ // quiet NaN or an appropriate infinity if the invalid
+ // operation trap is disabled, or raise an exception if
+ // the invalid trap is enabled.
+ //
+
+ if ((DoubleOperand1.Infinity != FALSE) ||
+ (DoubleOperand1.Nan != FALSE)) {
+ return KiInvalidOperationLongword(&ContextBlock,
+ DoubleOperand1.Infinity,
+ DoubleOperand1.Sign);
+ }
+
+ //
+ // Convert double to longword and store the result value.
+ //
+
+ return KiNormalizeLongword(&ContextBlock, &DoubleOperand1);
+
+ } else {
+ break;
+ }
+
+ //
+ // An illegal function, format value, or field value.
+ //
+
+ default :
+ break;
+ }
+
+ //
+ // An illegal function, format value, or field value was encoutnered.
+ // Generate and illegal instruction exception.
+ //
+
+ ExceptionRecord->ExceptionCode = STATUS_ILLEGAL_INSTRUCTION;
+ return FALSE;
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address and branch destination.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ return FALSE;
+ }
+}
+
+BOOLEAN
+KiDivideByZeroDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero double
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quite NaN or a properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = DoubleOperand1->Sign ^ DoubleOperand2->Sign;
+ ResultValueHigh = DOUBLE_INFINITY_VALUE_HIGH | (ResultSign << 31);
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriatly signed
+ // infinity and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (DoubleOperand1->Infinity == FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SZ = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EZ != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ ((PFSR)&TrapFrame->Fsr)->XZ = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiDivideByZeroSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise an exception or store a
+ quiet NaN or properly signed infinity for a divide by zero single
+ floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the divide by zero trap is enabled and the dividend is not infinite,
+ then a value of FALSE is returned. Otherwise, a quite NaN is or properly
+ signed infinity is stored as the destination result and a value of TRUE
+ is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultSign;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // The result value is a properly signed infinity.
+ //
+
+ ResultSign = SingleOperand1->Sign ^ SingleOperand2->Sign;
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultSign << 31);
+
+ //
+ // If the first operand is not infinite and the divide by zero trap is
+ // enabled, then store the proper exception code and exception flags
+ // and return a value of FALSE. Otherwise, store the appropriatly signed
+ // infinity and return a value of TRUE.
+ //
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (SingleOperand1->Infinity == FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SZ = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EZ != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ ((PFSR)&TrapFrame->Fsr)->XZ = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidCompareDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to determine whether an invalid operation
+ exception should be raised for a double compare operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, no operation is performed and a value
+ of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, perform no operation and return a
+ // value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((DoubleOperand1->Nan != FALSE) &&
+ ((DoubleOperand1->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((DoubleOperand2->Nan != FALSE) &&
+ ((DoubleOperand2->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.CompareValue = FpCompareUnordered;
+ return FALSE;
+ }
+ }
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidCompareSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to determine whether an invalid operation
+ exception should be raised for a single compare operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, no operation is performed and a value
+ of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, perform no operation and return a
+ // value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((SingleOperand1->Nan != FALSE) &&
+ ((SingleOperand1->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((SingleOperand2->Nan != FALSE) &&
+ ((SingleOperand2->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.CompareValue = FpCompareUnordered;
+ return FALSE;
+ }
+ }
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_DOUBLE_OPERAND DoubleOperand1,
+ IN PFP_DOUBLE_OPERAND DoubleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid double floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ DoubleOperand1 - Supplies a pointer to the first operand value.
+
+ DoubleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quite NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG MantissaHigh;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the first operand is a NaN, then compute a quite NaN from its
+ // value. Otherwise, if the second operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quite
+ // NaN.
+
+ if (DoubleOperand1->Nan != FALSE) {
+ MantissaHigh = DoubleOperand1->MantissaHigh & ~DOUBLE_SIGNAL_NAN_MASK;
+ if ((DoubleOperand1->MantissaLow | MantissaHigh) != 0) {
+ ResultValueLow = DoubleOperand1->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand1->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand1->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX;
+ ResultValueHigh &= ~DOUBLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ } else if (DoubleOperand2->Nan != FALSE) {
+ MantissaHigh = DoubleOperand2->MantissaHigh & ~DOUBLE_SIGNAL_NAN_MASK;
+ if ((DoubleOperand2->MantissaLow | MantissaHigh) != 0) {
+ ResultValueLow = DoubleOperand2->MantissaLow >> 2;
+ ResultValueLow |= DoubleOperand2->MantissaHigh << 30;
+ ResultValueHigh = DoubleOperand2->MantissaHigh >> 2;
+ ResultValueHigh |= DOUBLE_QUIET_NAN_PREFIX;
+ ResultValueHigh &= ~DOUBLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ } else {
+ ResultValueLow = DOUBLE_NAN_LOW;
+ ResultValueHigh = DOUBLE_QUIET_NAN;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((DoubleOperand1->Nan != FALSE) &&
+ ((DoubleOperand1->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((DoubleOperand2->Nan != FALSE) &&
+ ((DoubleOperand2->MantissaHigh & DOUBLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiInvalidOperationLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid conversion to longword.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ Infinity - Suuplies a boolean variable that specifies whether the
+ invalid operand is infinite.
+
+ Sign - Supplies the infinity sign if the invalid operand is infinite.
+
+Return Value:
+
+ If the invalid operation trap is enabled, then a value of FALSE is
+ returned. Otherwise, an appropriate longword value is stored as the
+ destination result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the value is infinite, then the result is a properly signed value
+ // whose magnitude is the largest that will fit in 32-bits. Otherwise,
+ // the result is an integer NaN.
+ //
+
+ if (Infinity != FALSE) {
+ if (Sign == 0) {
+ ResultValue = 0x7fffffff;
+
+ } else {
+ ResultValue = 0x80000000;
+ }
+
+ } else {
+ ResultValue = SINGLE_INTEGER_NAN;
+ }
+
+ //
+ // If the invalid operation trap is enabled then store the proper
+ // exception code and exception flags and return a value of FALSE.
+ // Otherwise, store a quiet NaN as the destination result and return
+ // a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U32Value = ResultValue;
+ return FALSE;
+
+ } else {
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+}
+
+BOOLEAN
+KiInvalidOperationQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN Infinity,
+ IN LONG Sign
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid conversion to quadword.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ Infinity - Suuplies a boolean variable that specifies whether the
+ invalid operand is infinite.
+
+ Sign - Supplies the infinity sign if the invalid operand is infinite.
+
+Return Value:
+
+ If the invalid operation trap is enabled, then a value of FALSE is
+ returned. Otherwise, an appropriate longword value is stored as the
+ destination result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ union {
+ ULONGLONG ResultValue;
+ ULARGE_INTEGER LargeValue;
+ }u;
+
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the value is infinite, then the result is a properly signed value
+ // whose magnitude is the largest that will fit in 64-bits. Otherwise,
+ // the result is an integer NaN.
+ //
+
+ if (Infinity != FALSE) {
+ if (Sign == 0) {
+ u.ResultValue = 0x7fffffffffffffff;
+
+ } else {
+ u.ResultValue = 0x8000000000000000;
+ }
+
+ } else {
+ u.ResultValue = DOUBLE_INTEGER_NAN;
+ }
+
+ //
+ // If the invalid operation trap is enabled then store the proper
+ // exception code and exception flags and return a value of FALSE.
+ // Otherwise, store a quiet NaN as the destination result and return
+ // a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U64Value.QuadPart = u.ResultValue;
+ return FALSE;
+
+ } else {
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ u.LargeValue.LowPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 33,
+ u.LargeValue.HighPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+}
+
+BOOLEAN
+KiInvalidOperationSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN BOOLEAN CheckForNan,
+ IN PFP_SINGLE_OPERAND SingleOperand1,
+ IN PFP_SINGLE_OPERAND SingleOperand2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to either raise and exception or store a
+ quiet NaN for an invalid single floating operation.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ CheckForNan - Supplies a boolean value that detetermines whether the
+ operand values should be checked for a signaling NaN.
+
+ SingleOperand1 - Supplies a pointer to the first operand value.
+
+ SingleOperand2 - Supplies a pointer ot the second operand value.
+
+Return Value:
+
+ If the invalid operation trap is enabled and either the operation is
+ invalid or one of the operands in a signaling NaN, then a value of
+ FALSE is returned. Otherwise, a quite NaN is stored as the destination
+ result and a value of TRUE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ PFP_IEEE_VALUE IeeeValue;
+ ULONG ResultValue;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // If the first operand is a NaN, then compute a quite NaN from its
+ // value. Otherwise, if the second operand is a NaN, then compute a
+ // quiet NaN from its value. Otherwise, the result value is a quite
+ // NaN.
+
+ if (SingleOperand1->Nan != FALSE) {
+ if ((SingleOperand1->Mantissa & ~SINGLE_SIGNAL_NAN_MASK) != 0) {
+ ResultValue = SingleOperand1->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue &= ~SINGLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ } else if (SingleOperand2->Nan != FALSE) {
+ if ((SingleOperand2->Mantissa & ~SINGLE_SIGNAL_NAN_MASK) != 0) {
+ ResultValue = SingleOperand2->Mantissa >> 2;
+ ResultValue |= SINGLE_QUIET_NAN_PREFIX;
+ ResultValue &= ~SINGLE_QUIET_NAN_MASK;
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ } else {
+ ResultValue = SINGLE_QUIET_NAN;
+ }
+
+ //
+ // If an invalid operation is specified or one of the operands is a
+ // signaling NaN and the invalid operation trap is enabled, then
+ // store the proper exception code and exception flags and return
+ // a value of FALSE. Otherwise, store a quiet NaN as the destination
+ // result and return a value of TRUE.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if ((CheckForNan == FALSE) ||
+ ((SingleOperand1->Nan != FALSE) &&
+ ((SingleOperand1->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0)) ||
+ ((SingleOperand2->Nan != FALSE) &&
+ ((SingleOperand2->Mantissa & SINGLE_SIGNAL_NAN_MASK) != 0))) {
+ ((PFSR)&TrapFrame->Fsr)->SV = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EV != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ ((PFSR)&TrapFrame->Fsr)->XV = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+ }
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeDouble (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a double floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <63:56> - zero
+ <55> - overflow bit
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computable.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG ExceptionResultHigh;
+ ULONG ExceptionResultLow;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValueHigh;
+ ULONG ResultValueLow;
+ ULONG RoundBit;
+ PKTRAP_FRAME TrapFrame;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (ResultOperand->Infinity != FALSE) {
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ DOUBLE_INFINITY_VALUE_LOW,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ DOUBLE_INFINITY_VALUE_HIGH | (ResultOperand->Sign << 31),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumlate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((ResultOperand->MantissaHigh & (1 << (55 - 32))) != 0) {
+ StickyBits |= (ResultOperand->MantissaLow & 0x1);
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is not zero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 54.
+ //
+
+ if ((ResultOperand->MantissaLow != 0) || (ResultOperand->MantissaHigh != 0)) {
+ while ((ResultOperand->MantissaHigh & (1 << (54 - 32))) == 0) {
+ ResultOperand->MantissaHigh =
+ (ResultOperand->MantissaHigh << 1) |
+ (ResultOperand->MantissaLow >> 31);
+
+ ResultOperand->MantissaLow <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa one bit and accumlate the lost bit with the
+ // sticky bits.
+ //
+
+ StickyBits |= (ResultOperand->MantissaLow & 0x1);
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = ResultOperand->MantissaLow & 0x1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultOperand->MantissaLow & 0x2) != 0)) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->MantissaLow += 2;
+ if (ResultOperand->MantissaLow < 2) {
+ ResultOperand->MantissaHigh += 1;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 54, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((ResultOperand->MantissaHigh & (1 << (54 - 32))) != 0) {
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // Right shift the mantissa one bit to normalize the final result.
+ //
+
+ StickyBits |= ResultOperand->MantissaLow & 0x1;
+ ResultOperand->MantissaLow =
+ (ResultOperand->MantissaLow >> 1) |
+ (ResultOperand->MantissaHigh << 31);
+
+ ResultOperand->MantissaHigh >>= 1;
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow sticky bits being set in FSR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has
+ // occurred. If denormals are being flushed to zero, then a result of
+ // zero is returned. Otherwise, both the inexact and underflow sticky
+ // bits are set in FSR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in FSR.
+ //
+
+ if (ResultOperand->Exponent >= DOUBLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh =
+ DOUBLE_INFINITY_VALUE_HIGH | (ResultOperand->Sign << 31);
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh =
+ DOUBLE_MAXIMUM_VALUE_HIGH | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh = DOUBLE_INFINITY_VALUE_HIGH;
+
+ } else {
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh = (ULONG)(DOUBLE_MAXIMUM_VALUE_HIGH | (1 << 31));
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValueLow = DOUBLE_INFINITY_VALUE_LOW;
+ ResultValueHigh = (ULONG)(DOUBLE_INFINITY_VALUE_HIGH | (1 << 31));
+
+ } else {
+ ResultValueLow = DOUBLE_MAXIMUM_VALUE_LOW;
+ ResultValueHigh = DOUBLE_MAXIMUM_VALUE_HIGH;
+ }
+
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 1536
+ // from the exponent.
+ //
+
+ ExceptionResultLow = ResultOperand->MantissaLow;
+ ExceptionResultHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ExceptionResultHigh |= ((ResultOperand->Exponent - 1536) << (52 - 32));
+ ExceptionResultHigh |= (ResultOperand->Sign << 31);
+
+ } else {
+ if ((ResultOperand->Exponent <= DOUBLE_MINIMUM_EXPONENT) &&
+ (ResultOperand->MantissaHigh != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->FS == 0) {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift >= 53) {
+ DenormalizeShift = 53;
+ }
+
+ if (DenormalizeShift >= 32) {
+ DenormalizeShift -= 32;
+ StickyBits |= ResultOperand->MantissaLow |
+ (ResultOperand->MantissaHigh & ((1 << DenormalizeShift) - 1));
+
+ ResultValueLow = ResultOperand->MantissaHigh >> DenormalizeShift;
+ ResultValueHigh = 0;
+
+ } else if (DenormalizeShift > 0) {
+ StickyBits |=
+ ResultOperand->MantissaLow & ((1 << DenormalizeShift) - 1);
+
+ ResultValueLow =
+ (ResultOperand->MantissaLow >> DenormalizeShift) |
+ (ResultOperand->MantissaHigh << (32 - DenormalizeShift));
+
+ ResultValueHigh =
+ (ResultOperand->MantissaHigh >> DenormalizeShift);
+
+ } else {
+ ResultValueLow = ResultOperand->MantissaLow;
+ ResultValueHigh = ResultOperand->MantissaHigh;
+ }
+
+ ResultValueHigh |= (ResultOperand->Sign << 31);
+ if (StickyBits != 0) {
+ Inexact = TRUE;
+ Overflow = FALSE;
+ Underflow = TRUE;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 1536 to the exponent.
+ //
+
+ ExceptionResultLow = ResultOperand->MantissaLow;
+ ExceptionResultHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ExceptionResultHigh |= ((ResultOperand->Exponent + 1536) << (52 - 32));
+ ExceptionResultHigh |= (ResultOperand->Sign << 31);
+
+ } else {
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ ResultValueLow = 0;
+ ResultValueHigh = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ if (ResultOperand->MantissaHigh == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValueLow = ResultOperand->MantissaLow;
+ ResultValueHigh = ResultOperand->MantissaHigh & ((1 << (52 - 32)) - 1);
+ ResultValueHigh |= (ResultOperand->Exponent << (52 - 32));
+ ResultValueHigh |= (ResultOperand->Sign << 31);
+ Inexact = StickyBits ? TRUE : FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SO = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EO != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EO != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ExceptionResultLow;
+ IeeeValue->Value.Fp64Value.W[1] = ExceptionResultHigh;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XO = 1;
+ return FALSE;
+ }
+
+ } else if (Underflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SU = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EU != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EU != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ExceptionResultLow;
+ IeeeValue->Value.Fp64Value.W[1] = ExceptionResultHigh;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XU = 1;
+ return FALSE;
+ }
+
+ } else if (Inexact != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp64Value.W[0] = ResultValueLow;
+ IeeeValue->Value.Fp64Value.W[1] = ResultValueHigh;
+ return FALSE;
+ }
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValueLow,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 32 + 1,
+ ResultValueHigh,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeLongword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a result value to a longword result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and an overlfow bit of zero.
+ The result format is:
+
+ <63:55> - zero
+ <54 - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ There are no sticky bits.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ LONG ExponentShift;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValue;
+ ULONG RoundBit;
+ ULONG StickyBits;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // Subtract out the exponent bias and divide the cases into right
+ // and left shifts.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ExponentShift = ResultOperand->Exponent - DOUBLE_EXPONENT_BIAS;
+ if (ExponentShift < 23) {
+
+ //
+ // The integer value is less than 2**23 and a right shift must
+ // be performed.
+ //
+
+ ExponentShift = 22 - ExponentShift;
+ if (ExponentShift > 24) {
+ ExponentShift = 24;
+ }
+
+ StickyBits =
+ (ResultOperand->MantissaLow >> 2) |
+ (ResultOperand->MantissaHigh << (32 - ExponentShift));
+
+ ResultValue = ResultOperand->MantissaHigh >> ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+
+ //
+ // The integer value is two or greater and a left shift must be
+ // performed.
+ //
+
+ ExponentShift -= 22;
+ if (ExponentShift <= (31 - 22)) {
+ StickyBits = ResultOperand->MantissaLow << ExponentShift;
+ ResultValue =
+ (ResultOperand->MantissaHigh << ExponentShift) |
+ (ResultOperand->MantissaLow >> (32 - ExponentShift));
+
+ Overflow = FALSE;
+
+ } else {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = StickyBits >> 31;
+ StickyBits <<= 1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultValue & 0x1) != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) && (StickyBits != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) && (StickyBits != 0)) {
+ ResultValue += 1;
+ if (ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If the result value is positive and the result is negative, then
+ // overflow has occurred. Otherwise, negate the result value and
+ // check if the result is negative. If the result is positive, then
+ // overflow has occurred.
+ //
+
+ if (ResultOperand->Sign == 0) {
+ if ((ResultValue >> 31) != 0) {
+ Overflow = TRUE;
+ }
+
+ } else {
+ ResultValue = ~ResultValue + 1;
+ if ((ResultValue >> 31) == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ return KiInvalidOperationLongword(ContextBlock,
+ FALSE,
+ 0);
+
+ } else if ((StickyBits | RoundBit) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U32Value = ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeQuadword (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_DOUBLE_OPERAND ResultOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to convert a result value to a quadword result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and an overlfow bit of zero.
+ The result format is:
+
+ <63:55> - zero
+ <54 - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ There are no sticky bits.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+ LONG ExponentShift;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ union {
+ ULONGLONG ResultValue;
+ ULARGE_INTEGER LargeValue;
+ }u;
+
+ ULONG RoundBit;
+ ULONG StickyBits;
+ PKTRAP_FRAME TrapFrame;
+
+ //
+ // Subtract out the exponent bias and divide the cases into right
+ // and left shifts.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ ExponentShift = ResultOperand->Exponent - DOUBLE_EXPONENT_BIAS;
+ if (ExponentShift < 54) {
+
+ //
+ // The integer value is less than 2**52 and a right shift must
+ // be performed.
+ //
+
+ ExponentShift = 54 - ExponentShift;
+ if (ExponentShift > 54) {
+ ExponentShift = 54;
+ }
+
+ StickyBits = (ULONG)(ResultOperand->Mantissa << (32 - ExponentShift));
+ u.ResultValue = ResultOperand->Mantissa >> ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+
+ //
+ // The integer value is two or greater and a left shift must be
+ // performed.
+ //
+
+ ExponentShift -= 54;
+ if (ExponentShift <= (63 - 54)) {
+ StickyBits = 0;
+ u.ResultValue = ResultOperand->Mantissa << ExponentShift;
+ Overflow = FALSE;
+
+ } else {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Round the result value using the mantissa and the sticky bits,
+ //
+
+ RoundBit = StickyBits >> 31;
+ StickyBits <<= 1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((u.ResultValue & 0x1) != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) && (StickyBits != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) && (StickyBits != 0)) {
+ u.ResultValue += 1;
+ if (u.ResultValue == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ break;
+ }
+
+ //
+ // If the result value is positive and the result is negative, then
+ // overflow has occurred. Otherwise, negate the result value and
+ // check if the result is negative. If the result is positive, then
+ // overflow has occurred.
+ //
+
+ if (ResultOperand->Sign == 0) {
+ if ((u.ResultValue >> 63) != 0) {
+ Overflow = TRUE;
+ }
+
+ } else {
+ u.ResultValue = ~u.ResultValue + 1;
+ if ((u.ResultValue >> 63) == 0) {
+ Overflow = TRUE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ return KiInvalidOperationQuadword(ContextBlock,
+ FALSE,
+ 0);
+
+ } else if ((StickyBits | RoundBit) != 0) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.U64Value.QuadPart = u.ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ u.LargeValue.LowPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ KiSetRegisterValue(ContextBlock->Fd + 33,
+ u.LargeValue.HighPart,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+BOOLEAN
+KiNormalizeSingle (
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ IN PFP_SINGLE_OPERAND ResultOperand,
+ IN ULONG StickyBits
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to normalize a single floating result.
+
+ N.B. The result value is specified with a guard bit on the right,
+ the hidden bit (if appropriate), and a possible overflow bit.
+ The result format is:
+
+ <31:27> - zero
+ <26> - overflow bit
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+ The sticky bits specify bits that were lost during the computable.
+
+Arguments:
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ ResultOperand - Supplies a pointer to the result operand value.
+
+ StickyBits - Supplies the value of the sticky bits.
+
+Return Value:
+
+ If there is not an exception, or the exception is handled, then a proper
+ result is stored in the destination result, the continuation address is
+ set, and a value of TRUE is returned. Otherwise, no value is stored and
+ a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG DenormalizeShift;
+ PEXCEPTION_RECORD ExceptionRecord;
+ ULONG ExceptionResult;
+ PFP_IEEE_VALUE IeeeValue;
+ BOOLEAN Inexact;
+ BOOLEAN Overflow;
+ ULONG ResultValue;
+ ULONG RoundBit;
+ PKTRAP_FRAME TrapFrame;
+ BOOLEAN Underflow;
+
+ //
+ // If the result is infinite, then store a properly signed infinity
+ // in the destination register and return a value of TRUE. Otherwise,
+ // round and normalize the result and check for overflow and underflow.
+ //
+
+ ExceptionRecord = ContextBlock->ExceptionRecord;
+ TrapFrame = ContextBlock->TrapFrame;
+ if (ResultOperand->Infinity != FALSE) {
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31),
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If the overflow bit is set, then right shift the mantissa one bit,
+ // accumlate the lost bit with the sticky bits, and adjust the exponent
+ // value.
+ //
+
+ if ((ResultOperand->Mantissa & (1 << 26)) != 0) {
+ StickyBits |= (ResultOperand->Mantissa & 0x1);
+ ResultOperand->Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // If the mantissa is not zero, then normalize the mantissa by left
+ // shifting one bit at a time until there is a one bit in bit 25.
+ //
+
+ if (ResultOperand->Mantissa != 0) {
+ while ((ResultOperand->Mantissa & (1 << 25)) == 0) {
+ ResultOperand->Mantissa <<= 1;
+ ResultOperand->Exponent -= 1;
+ }
+ }
+
+ //
+ // Right shift the mantissa one bit and accumlate the lost bit with the
+ // sticky bits.
+ //
+
+ StickyBits |= (ResultOperand->Mantissa & 0x1);
+ ResultOperand->Mantissa >>= 1;
+
+ //
+ // Round the result value using the mantissa, the round bit, and the
+ // sticky bits,
+ //
+
+ RoundBit = ResultOperand->Mantissa & 0x1;
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+
+ case ROUND_TO_NEAREST:
+ if (RoundBit != 0) {
+ if ((StickyBits != 0) || ((ResultOperand->Mantissa & 0x2) != 0)) {
+ ResultOperand->Mantissa += 2;
+ }
+ }
+
+ break;
+
+ //
+ // Round toward zero.
+ //
+
+ case ROUND_TO_ZERO:
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if ((ResultOperand->Sign == 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->Mantissa += 2;
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+
+ case ROUND_TO_MINUS_INFINITY:
+ if ((ResultOperand->Sign != 0) &&
+ ((StickyBits != 0) || (RoundBit != 0))) {
+ ResultOperand->Mantissa += 2;
+ }
+
+ break;
+ }
+
+ //
+ // If rounding resulted in a carry into bit 25, then right shift the
+ // mantissa one bit and adjust the exponent.
+ //
+
+ if ((ResultOperand->Mantissa & (1 << 25)) != 0) {
+ ResultOperand->Mantissa >>= 1;
+ ResultOperand->Exponent += 1;
+ }
+
+ //
+ // Right shift the mantissa one bit to normalize the final result.
+ //
+
+ StickyBits |= RoundBit;
+ ResultOperand->Mantissa >>= 1;
+
+ //
+ // If the exponent value is greater than or equal to the maximum
+ // exponent value, then overflow has occurred. This results in both
+ // the inexact and overflow stickt bits being set in FSR.
+ //
+ // If the exponent value is less than or equal to the minimum exponent
+ // value, the mantissa is nonzero, and the result is inexact or the
+ // denormalized result causes loss of accuracy, then underflow has
+ // occurred. If denormals are being flushed to zero, then a result of
+ // zero is returned. Otherwise, both the inexact and underflow sticky
+ // bits are set in FSR.
+ //
+ // Otherwise, a normal result can be delivered, but it may be inexact.
+ // If the result is inexact, then the inexact sticky bit is set in FSR.
+ //
+
+ if (ResultOperand->Exponent >= SINGLE_MAXIMUM_EXPONENT) {
+ Inexact = TRUE;
+ Overflow = TRUE;
+ Underflow = FALSE;
+
+ //
+ // The overflow value is dependent on the rounding mode.
+ //
+
+ switch (ContextBlock->Round) {
+
+ //
+ // Round to nearest representable number.
+ //
+ // The result value is infinity with the sign of the result.
+ //
+
+ case ROUND_TO_NEAREST:
+ ResultValue = SINGLE_INFINITY_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward zero.
+ //
+ // The result is the maximum number with the sign of the result.
+ //
+
+ case ROUND_TO_ZERO:
+ ResultValue = SINGLE_MAXIMUM_VALUE | (ResultOperand->Sign << 31);
+ break;
+
+ //
+ // Round toward plus infinity.
+ //
+ // If the sign of the result is positive, then the result is
+ // plus infinity. Otherwise, the result is the maximum negative
+ // number.
+ //
+
+ case ROUND_TO_PLUS_INFINITY:
+ if (ResultOperand->Sign == 0) {
+ ResultValue = SINGLE_INFINITY_VALUE;
+
+ } else {
+ ResultValue = (ULONG)(SINGLE_MAXIMUM_VALUE | (1 << 31));
+ }
+
+ break;
+
+ //
+ // Round toward minus infinity.
+ //
+ // If the sign of the result is negative, then the result is
+ // negative infinity. Otherwise, the result is the maximum
+ // positive number.
+ //
+
+
+ case ROUND_TO_MINUS_INFINITY:
+ if (ResultOperand->Sign != 0) {
+ ResultValue = (ULONG)(SINGLE_INFINITY_VALUE | (1 << 31));
+
+ } else {
+ ResultValue = SINGLE_MAXIMUM_VALUE;
+ }
+
+ break;
+ }
+
+ //
+ // Compute the overflow exception result value by subtracting 192
+ // from the exponent.
+ //
+
+ ExceptionResult = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent - 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ } else {
+ if ((ResultOperand->Exponent <= SINGLE_MINIMUM_EXPONENT) &&
+ (ResultOperand->Mantissa != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->FS == 0) {
+ DenormalizeShift = 1 - ResultOperand->Exponent;
+ if (DenormalizeShift >= 24) {
+ DenormalizeShift = 24;
+ }
+
+ ResultValue = ResultOperand->Mantissa >> DenormalizeShift;
+ ResultValue |= (ResultOperand->Sign << 31);
+ if ((StickyBits != 0) ||
+ ((ResultOperand->Mantissa & ((1 << DenormalizeShift) - 1)) != 0)) {
+ Inexact = TRUE;
+ Overflow = FALSE;
+ Underflow = TRUE;
+
+ //
+ // Compute the underflow exception result value by adding
+ // 192 to the exponent.
+ //
+
+ ExceptionResult = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ExceptionResult |= ((ResultOperand->Exponent + 192) << 23);
+ ExceptionResult |= (ResultOperand->Sign << 31);
+
+ } else {
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ ResultValue = 0;
+ Inexact = FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+
+ } else {
+ if (ResultOperand->Mantissa == 0) {
+ ResultOperand->Exponent = 0;
+ }
+
+ ResultValue = ResultOperand->Mantissa & ((1 << 23) - 1);
+ ResultValue |= (ResultOperand->Exponent << 23);
+ ResultValue |= (ResultOperand->Sign << 31);
+ Inexact = StickyBits ? TRUE : FALSE;
+ Overflow = FALSE;
+ Underflow = FALSE;
+ }
+ }
+
+ //
+ // Check to determine if an exception should be delivered or the result
+ // should be written to the destination register.
+ //
+
+ if (Overflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SO = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EO != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EO != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XO = 1;
+ return FALSE;
+ }
+
+ } else if (Underflow != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ ((PFSR)&TrapFrame->Fsr)->SU = 1;
+ if ((((PFSR)&TrapFrame->Fsr)->EU != 0) ||
+ (((PFSR)&TrapFrame->Fsr)->EI != 0)) {
+ if (((PFSR)&TrapFrame->Fsr)->EU != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+
+ } else {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ }
+
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ExceptionResult;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ ((PFSR)&TrapFrame->Fsr)->XU = 1;
+ return FALSE;
+ }
+
+ } else if (Inexact != FALSE) {
+ ((PFSR)&TrapFrame->Fsr)->SI = 1;
+ if (((PFSR)&TrapFrame->Fsr)->EI != 0) {
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ ((PFSR)&TrapFrame->Fsr)->XI = 1;
+ IeeeValue = (PFP_IEEE_VALUE)&ExceptionRecord->ExceptionInformation[2];
+ IeeeValue->Value.Fp32Value.W[0] = ResultValue;
+ return FALSE;
+ }
+
+ }
+
+ //
+ // Set the destination register value, update the return address,
+ // and return a value of TRUE.
+ //
+
+ KiSetRegisterValue(ContextBlock->Fd + 32,
+ ResultValue,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ TrapFrame->Fir = ContextBlock->BranchAddress;
+ return TRUE;
+}
+
+VOID
+KiUnpackDouble (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_DOUBLE_OPERAND DoubleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a double floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <63:55> - zero
+ <54> - hidden bit
+ <53:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ DoubleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Value1;
+ ULONG Value2;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value1 = KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ Value2 = KiGetRegisterValue(Source + 32 + 1,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ DoubleOperand->Sign = Value2 >> 31;
+ DoubleOperand->Exponent = (Value2 >> (52 - 32)) & 0x7ff;
+ DoubleOperand->MantissaHigh = Value2 & 0xfffff;
+ DoubleOperand->MantissaLow = Value1;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a Nan or an infinity.
+ //
+
+ if (DoubleOperand->Exponent == DOUBLE_MAXIMUM_EXPONENT) {
+ if ((DoubleOperand->MantissaLow | DoubleOperand->MantissaHigh) != 0) {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = TRUE;
+
+ } else {
+ DoubleOperand->Infinity = TRUE;
+ DoubleOperand->Nan = FALSE;
+ }
+
+ } else {
+ DoubleOperand->Infinity = FALSE;
+ DoubleOperand->Nan = FALSE;
+ if (DoubleOperand->Exponent == DOUBLE_MINIMUM_EXPONENT) {
+ if ((DoubleOperand->MantissaHigh | DoubleOperand->MantissaLow) != 0) {
+ DoubleOperand->Exponent += 1;
+ while ((DoubleOperand->MantissaHigh & (1 << 20)) == 0) {
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 1) |
+ (DoubleOperand->MantissaLow >> 31);
+ DoubleOperand->MantissaLow <<= 1;
+ DoubleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ DoubleOperand->MantissaHigh |= (1 << 20);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ DoubleOperand->MantissaHigh =
+ (DoubleOperand->MantissaHigh << 2) | (DoubleOperand->MantissaLow >> 30);
+ DoubleOperand->MantissaLow <<= 2;
+ return;
+}
+
+VOID
+KiUnpackSingle (
+ IN ULONG Source,
+ IN PFP_CONTEXT_BLOCK ContextBlock,
+ OUT PFP_SINGLE_OPERAND SingleOperand
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to unpack a single floating value from the
+ specified source register.
+
+ N.B. The unpacked mantissa value is returned with a guard bit and a
+ round bit on the right and the hidden bit inserted if appropriate.
+ The format of the returned value is:
+
+ <31:26> - zero
+ <25> - hidden bit
+ <24:2> - mantissa
+ <1> - guard bit
+ <0> - round bit
+
+Arguments:
+
+ Source - Supplies the number of the register that contains the operand.
+
+ ContextBlock - Supplies a pointer to the emulation context block.
+
+ SingleOperand - Supplies a pointer to a structure that is to receive the
+ operand value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Value;
+
+ //
+ // Get the source register value and unpack the sign, exponent, and
+ // mantissa value.
+ //
+
+ Value = KiGetRegisterValue(Source + 32,
+ ContextBlock->ExceptionFrame,
+ ContextBlock->TrapFrame);
+
+ SingleOperand->Sign = Value >> 31;
+ SingleOperand->Exponent = (Value >> 23) & 0xff;
+ SingleOperand->Mantissa = Value & 0x7fffff;
+
+ //
+ // If the exponent is the largest possible value, then the number is
+ // either a Nan or an infinity.
+ //
+
+ if (SingleOperand->Exponent == SINGLE_MAXIMUM_EXPONENT) {
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = TRUE;
+
+ } else {
+ SingleOperand->Infinity = TRUE;
+ SingleOperand->Nan = FALSE;
+ }
+
+ } else {
+ SingleOperand->Infinity = FALSE;
+ SingleOperand->Nan = FALSE;
+ if (SingleOperand->Exponent == SINGLE_MINIMUM_EXPONENT) {
+ if (SingleOperand->Mantissa != 0) {
+ SingleOperand->Exponent += 1;
+ while ((SingleOperand->Mantissa & (1 << 23)) == 0) {
+ SingleOperand->Mantissa <<= 1;
+ SingleOperand->Exponent -= 1;
+ }
+ }
+
+ } else {
+ SingleOperand->Mantissa |= (1 << 23);
+ }
+ }
+
+ //
+ // Left shift the mantissa 2-bits to provide for a guard bit and a round
+ // bit.
+ //
+
+ SingleOperand->Mantissa <<= 2;
+ return;
+}
diff --git a/private/ntos/ke/mips/flush.c b/private/ntos/ke/mips/flush.c
new file mode 100644
index 000000000..4aa394099
--- /dev/null
+++ b/private/ntos/ke/mips/flush.c
@@ -0,0 +1,820 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ flush.c
+
+Abstract:
+
+ This module implements MIPS machine dependent kernel functions to flush
+ the data and instruction caches and to flush I/O buffers.
+
+Author:
+
+ David N. Cutler (davec) 26-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+ULONG ChangeColor;
+
+//
+// Define forward referenced prototyes.
+//
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ );
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ );
+
+VOID
+KeChangeColorPage (
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN ULONG PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine changes the color of a page.
+
+Arguments:
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ ChangeColor += 1;
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the change color
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiChangeColorPageTarget,
+ (PVOID)NewColor,
+ (PVOID)OldColor,
+ (PVOID)PageFrame);
+ }
+
+#endif
+
+ //
+ // Change the color of the page on the current processor.
+ //
+
+ HalChangeColorPage(NewColor, OldColor, PageFrame);
+
+ //
+ // Wait until all target processors have finished changing the color
+ // of the page.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for changing the color of a page.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+
+ //
+ // Change the color of the page on the current processor and clear
+ // change color packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalChangeColorPage(NewColor, OldColor, (ULONG)PageFrame);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepDcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the data cache on all processors that are currently
+ running threads which are children of the current process or flushes the
+ data cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which data
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepDcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the data cache on the current processor.
+ //
+
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // data cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the data cache on target
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the data cache on the current processor and clear the sweep
+ // data cache packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache on all processors that are
+ currently running threads which are children of the current process or
+ flushes the instruction cache on all processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // Raise IRQL to synchrnization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the instruction cache on the current processor.
+ //
+
+ HalSweepIcache();
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // instruction cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the instruction cache on
+ target processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the instruction cache on the current processor and clear
+ // the sweep instruction cache packet address to signal the source
+ // to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepIcache();
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcacheRange (
+ IN BOOLEAN AllProcessors,
+ IN PVOID BaseAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the an range of virtual addresses from the primary
+ instruction cache on all processors that are currently running threads
+ which are children of the current process or flushes the range of virtual
+ addresses from the primary instruction cache on all processors in the host
+ configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // If the length of the range is greater than the size of the primary
+ // instruction cache, then set the length of the flush to the size of
+ // the primary instruction cache and set the base address of zero.
+ //
+ // N.B. It is assumed that the size of the primary instruction and
+ // data caches are the same.
+ //
+
+ if (Length > PCR->FirstLevelIcacheSize) {
+ BaseAddress = (PVOID)0;
+ Length = PCR->FirstLevelIcacheSize;
+ }
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the sweep range
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheRangeTarget,
+ (PVOID)BaseAddress,
+ (PVOID)Length,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified range of virtual addresses from the primary
+ // instruction cache.
+ //
+
+ Offset = (ULONG)BaseAddress & PCR->IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)BaseAddress & ~PCR->IcacheAlignment),
+ (Offset + Length + PCR->IcacheAlignment) & ~PCR->IcacheAlignment);
+
+ Offset = (ULONG)BaseAddress & PCR->DcacheAlignment;
+ HalSweepDcacheRange((PVOID)((ULONG)BaseAddress & ~PCR->DcacheAlignment),
+ (Offset + Length + PCR->DcacheAlignment) & ~PCR->DcacheAlignment);
+
+ //
+ // Wait until all target processors have finished sweeping the specified
+ // range of addresses from the instruction cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping a range of addresses from the
+ instruction cache.
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+
+ //
+ // Sweep the specified instruction cache range on the current processor.
+ //
+
+#if !defined(NT_UP)
+
+ Offset = (ULONG)(BaseAddress) & PCR->IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)(BaseAddress) & ~PCR->IcacheAlignment),
+ (Offset + (ULONG)Length + PCR->IcacheAlignment) & ~PCR->IcacheAlignment);
+
+ Offset = (ULONG)(BaseAddress) & PCR->DcacheAlignment;
+ HalSweepDcacheRange((PVOID)((ULONG)(BaseAddress) & ~PCR->DcacheAlignment),
+ (Offset + (ULONG)Length + PCR->DcacheAlignment) & ~PCR->DcacheAlignment);
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeFlushIoBuffers (
+ IN PMDL Mdl,
+ IN BOOLEAN ReadOperation,
+ IN BOOLEAN DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the I/O buffer specified by the memory descriptor
+ list from the data cache on all processors.
+
+Arguments:
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= KiSynchIrql);
+
+ //
+ // If the operation is a DMA operation, then check if the flush
+ // can be avoided because the host system supports the right set
+ // of cache coherency attributes. Otherwise, the flush can also
+ // be avoided if the operation is a programmed I/O and not a page
+ // read.
+ //
+
+ if (DmaOperation != FALSE) {
+ if (ReadOperation != FALSE) {
+ if ((KiDmaIoCoherency & DMA_READ_ICACHE_INVALIDATE) != 0) {
+
+ ASSERT((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0);
+
+ return;
+
+ } else if (((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) &&
+ ((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0)) {
+ return;
+ }
+
+ } else if ((KiDmaIoCoherency & DMA_WRITE_DCACHE_SNOOP) != 0) {
+ return;
+ }
+
+ } else if ((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) {
+ return;
+ }
+
+ //
+ // Either the operation is a DMA operation and the right coherency
+ // atributes are not supported by the host system, or the operation
+ // is programmed I/O and a page read.
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the flush I/O
+ // parameters to the target processors, if any, for execution.
+ //
+
+#if !defined(NT_UP)
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushIoBuffersTarget,
+ (PVOID)Mdl,
+ (PVOID)((ULONG)ReadOperation),
+ (PVOID)((ULONG)DmaOperation));
+ }
+
+#endif
+
+ //
+ // Flush I/O buffer on current processor.
+ //
+
+ HalFlushIoBuffers(Mdl, ReadOperation, DmaOperation);
+
+ //
+ // Wait until all target processors have finished flushing the
+ // specified I/O buffer.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+ return;
+}
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing an I/O buffer on target
+ processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the specified I/O buffer on the current processor.
+ //
+
+#if !defined(NT_UP)
+
+ HalFlushIoBuffers((PMDL)Mdl,
+ (BOOLEAN)((ULONG)ReadOperation),
+ (BOOLEAN)((ULONG)DmaOperation));
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/genmips.c b/private/ntos/ke/mips/genmips.c
new file mode 100644
index 000000000..81003ea32
--- /dev/null
+++ b/private/ntos/ke/mips/genmips.c
@@ -0,0 +1,1015 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ genmips.c
+
+Abstract:
+
+ This module implements a program which generates MIPS machine dependent
+ structure offset definitions for kernel structures that are accessed in
+ assembly code.
+
+Author:
+
+ David N. Cutler (davec) 27-Mar-1990
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define HEADER_FILE
+#include "excpt.h"
+#include "ntdef.h"
+#include "ntkeapi.h"
+#include "ntmips.h"
+#include "ntimage.h"
+#include "ntseapi.h"
+#include "ntobapi.h"
+#include "ntlpcapi.h"
+#include "ntioapi.h"
+#include "ntmmapi.h"
+#include "ntldr.h"
+#include "ntpsapi.h"
+#include "ntexapi.h"
+#include "ntnls.h"
+#include "nturtl.h"
+#include "ntcsrmsg.h"
+#include "ntcsrsrv.h"
+#include "ntxcapi.h"
+#include "arc.h"
+#include "ntstatus.h"
+#include "kxmips.h"
+#include "stdarg.h"
+#include "setjmp.h"
+
+//
+// Define architecture specific generation macros.
+//
+
+#define genAlt(Name, Type, Member) \
+ dumpf("#define " #Name " 0x%lx\n", OFFSET(Type, Member))
+
+#define genCom(Comment) \
+ dumpf("\n"); \
+ dumpf("//\n"); \
+ dumpf("// " Comment "\n"); \
+ dumpf("//\n"); \
+ dumpf("\n")
+
+#define genDef(Prefix, Type, Member) \
+ dumpf("#define " #Prefix #Member " 0x%lx\n", OFFSET(Type, Member))
+
+#define genVal(Name, Value) \
+ dumpf("#define " #Name " 0x%lx\n", Value)
+
+#define genSpc() dumpf("\n");
+
+//
+// Define member offset computation macro.
+//
+
+#define OFFSET(type, field) ((LONG)(&((type *)0)->field))
+
+FILE *KsMips;
+FILE *HalMips;
+
+//
+// EnableInc(a) - Enables output to goto specified include file
+//
+
+#define EnableInc(a) OutputEnabled |= a;
+
+//
+// DisableInc(a) - Disables output to goto specified include file
+//
+
+#define DisableInc(a) OutputEnabled &= ~a;
+
+ULONG OutputEnabled;
+
+#define KSMIPS 0x01
+#define HALMIPS 0x02
+
+#define KERNEL KSMIPS
+#define HAL HALMIPS
+
+VOID dumpf (const char *format, ...);
+
+
+//
+// This routine returns the bit number right to left of a field.
+//
+
+LONG
+t (
+ IN ULONG z
+ )
+
+{
+ LONG i;
+
+ for (i = 0; i < 32; i += 1) {
+ if ((z >> i) & 1) {
+ break;
+ }
+ }
+ return i;
+}
+
+//
+// This program generates the MIPS machine dependent assembler offset
+// definitions.
+//
+
+VOID
+main (argc, argv)
+ int argc;
+ char *argv[];
+{
+
+ char *outName;
+ LONG EventOffset;
+
+ //
+ // Create file for output.
+ //
+
+ if (argc == 2) {
+ outName = argv[ 1 ];
+ } else {
+ outName = "\\nt\\public\\sdk\\inc\\ksmips.h";
+ }
+
+ outName = argc >= 2 ? argv[1] : "\\nt\\public\\sdk\\inc\\ksmips.h";
+ KsMips = fopen( outName, "w" );
+ if (KsMips == NULL) {
+ fprintf( stderr, "GENMIPS: Cannot open %s for writing.\n", outName);
+ perror("GENMIPS");
+ exit(1);
+ }
+
+ fprintf(stderr, "GENMIPS: Writing %s header file.\n", outName);
+ outName = argc >= 3 ? argv[2] : "\\nt\\private\\ntos\\inc\\halmips.h";
+ HalMips = fopen( outName, "w" );
+ if (HalMips == NULL) {
+ fprintf( stderr, "GENMIPS: Cannot open %s for writing.\n", outName);
+ perror("GENMIPS");
+ exit(1);
+ }
+
+ fprintf(stderr, "GENMIPS: Writing %s header file.\n", outName);
+
+ //
+ // Include statement for MIPS architecture static definitions.
+ //
+
+ EnableInc (KSMIPS | HALMIPS);
+ dumpf("#include \"kxmips.h\"\n");
+ DisableInc (HALMIPS);
+
+ //
+ // Include architecture independent definitions.
+ //
+
+#include "..\genxx.inc"
+
+ //
+ // Generate architecture dependent definitions.
+ //
+ // Processor block structure definitions.
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Processor Block Structure Offset Definitions");
+
+ genVal(PRCB_MINOR_VERSION, PRCB_MINOR_VERSION);
+ genVal(PRCB_MAJOR_VERSION, PRCB_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pb, KPRCB, MinorVersion);
+ genDef(Pb, KPRCB, MajorVersion);
+ genDef(Pb, KPRCB, CurrentThread);
+ genDef(Pb, KPRCB, NextThread);
+ genDef(Pb, KPRCB, IdleThread);
+ genDef(Pb, KPRCB, Number);
+ genDef(Pb, KPRCB, SetMember);
+ genDef(Pb, KPRCB, RestartBlock);
+ genDef(Pb, KPRCB, SystemReserved);
+ genDef(Pb, KPRCB, HalReserved);
+
+ DisableInc(HALMIPS);
+
+ genDef(Pb, KPRCB, DpcTime);
+ genDef(Pb, KPRCB, InterruptTime);
+ genDef(Pb, KPRCB, KernelTime);
+ genDef(Pb, KPRCB, UserTime);
+ genDef(Pb, KPRCB, AdjustDpcThreshold);
+ genDef(Pb, KPRCB, InterruptCount);
+ genDef(Pb, KPRCB, ApcBypassCount);
+ genDef(Pb, KPRCB, DpcBypassCount);
+ genDef(Pb, KPRCB, IpiFrozen);
+ genDef(Pb, KPRCB, ProcessorState);
+ genAlt(PbAlignmentFixupCount, KPRCB, KeAlignmentFixupCount);
+ genAlt(PbContextSwitches, KPRCB, KeContextSwitches);
+ genAlt(PbDcacheFlushCount, KPRCB, KeDcacheFlushCount);
+ genAlt(PbExceptionDispatchCount, KPRCB, KeExceptionDispatchCount);
+ genAlt(PbFirstLevelTbFills, KPRCB, KeFirstLevelTbFills);
+ genAlt(PbFloatingEmulationCount, KPRCB, KeFloatingEmulationCount);
+ genAlt(PbIcacheFlushCount, KPRCB, KeIcacheFlushCount);
+ genAlt(PbSecondLevelTbFills, KPRCB, KeSecondLevelTbFills);
+ genAlt(PbSystemCalls, KPRCB, KeSystemCalls);
+ genDef(Pb, KPRCB, CurrentPacket);
+ genDef(Pb, KPRCB, TargetSet);
+ genDef(Pb, KPRCB, WorkerRoutine);
+ genDef(Pb, KPRCB, RequestSummary);
+ genDef(Pb, KPRCB, SignalDone);
+ genDef(Pb, KPRCB, DpcInterruptRequested);
+ genDef(Pb, KPRCB, MaximumDpcQueueDepth);
+ genDef(Pb, KPRCB, MinimumDpcRate);
+ genDef(Pb, KPRCB, IpiCounts);
+ genDef(Pb, KPRCB, StartCount);
+ genDef(Pb, KPRCB, DpcLock);
+ genDef(Pb, KPRCB, DpcListHead);
+ genDef(Pb, KPRCB, DpcQueueDepth);
+ genDef(Pb, KPRCB, DpcCount);
+ genDef(Pb, KPRCB, DpcLastCount);
+ genDef(Pb, KPRCB, DpcRequestRate);
+ genDef(Pb, KPRCB, DpcRoutineActive);
+ genVal(ProcessorBlockLength, ((sizeof(KPRCB) + 15) & ~15));
+
+ //
+ // Processor control register structure definitions.
+ //
+
+#if defined(_MIPS_)
+
+ EnableInc(HALMIPS);
+
+ genCom("Processor Control Registers Structure Offset Definitions");
+
+ genVal(PCR_MINOR_VERSION, PCR_MINOR_VERSION);
+ genVal(PCR_MAJOR_VERSION, PCR_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pc, KPCR, MinorVersion);
+ genDef(Pc, KPCR, MajorVersion);
+ genDef(Pc, KPCR, InterruptRoutine);
+ genDef(Pc, KPCR, XcodeDispatch);
+ genDef(Pc, KPCR, FirstLevelDcacheSize);
+ genDef(Pc, KPCR, FirstLevelDcacheFillSize);
+ genDef(Pc, KPCR, FirstLevelIcacheSize);
+ genDef(Pc, KPCR, FirstLevelIcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelDcacheSize);
+ genDef(Pc, KPCR, SecondLevelDcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelIcacheSize);
+ genDef(Pc, KPCR, SecondLevelIcacheFillSize);
+ genDef(Pc, KPCR, Prcb);
+ genDef(Pc, KPCR, Teb);
+ genDef(Pc, KPCR, TlsArray);
+ genDef(Pc, KPCR, DcacheFillSize);
+ genDef(Pc, KPCR, IcacheAlignment);
+ genDef(Pc, KPCR, IcacheFillSize);
+ genDef(Pc, KPCR, ProcessorId);
+ genDef(Pc, KPCR, ProfileInterval);
+ genDef(Pc, KPCR, ProfileCount);
+ genDef(Pc, KPCR, StallExecutionCount);
+ genDef(Pc, KPCR, StallScaleFactor);
+ genDef(Pc, KPCR, Number);
+ genDef(Pc, KPCR, DataBusError);
+ genDef(Pc, KPCR, InstructionBusError);
+ genDef(Pc, KPCR, CachePolicy);
+ genDef(Pc, KPCR, IrqlMask);
+ genDef(Pc, KPCR, IrqlTable);
+ genDef(Pc, KPCR, CurrentIrql);
+ genDef(Pc, KPCR, SetMember);
+ genDef(Pc, KPCR, CurrentThread);
+ genDef(Pc, KPCR, AlignedCachePolicy);
+ genDef(Pc, KPCR, NotMember);
+ genDef(Pc, KPCR, SystemReserved);
+ genDef(Pc, KPCR, DcacheAlignment);
+ genDef(Pc, KPCR, HalReserved);
+
+ DisableInc(HALMIPS);
+
+ genDef(Pc, KPCR, FirstLevelActive);
+ genDef(Pc, KPCR, DpcRoutineActive);
+ genDef(Pc, KPCR, CurrentPid);
+ genDef(Pc, KPCR, OnInterruptStack);
+ genDef(Pc, KPCR, SavedInitialStack);
+ genDef(Pc, KPCR, SavedStackLimit);
+ genDef(Pc, KPCR, SystemServiceDispatchStart);
+ genDef(Pc, KPCR, SystemServiceDispatchEnd);
+ genDef(Pc, KPCR, InterruptStack);
+ genDef(Pc, KPCR, PanicStack);
+ genDef(Pc, KPCR, BadVaddr);
+ genDef(Pc, KPCR, InitialStack);
+ genDef(Pc, KPCR, StackLimit);
+ genDef(Pc, KPCR, SavedEpc);
+ genDef(Pc, KPCR, SavedT7);
+ genDef(Pc, KPCR, SavedT8);
+ genDef(Pc, KPCR, SavedT9);
+ genDef(Pc, KPCR, SystemGp);
+ genDef(Pc, KPCR, QuantumEnd);
+ genVal(ProcessorControlRegisterLength, ((sizeof(KPCR) + 15) & ~15));
+
+ genSpc();
+
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountLow);
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountMultiplier);
+ genDef(Pc2, KUSER_SHARED_DATA, InterruptTime);
+ genDef(Pc2, KUSER_SHARED_DATA, SystemTime);
+
+#endif
+
+ //
+ // TB entry structure offset definitions.
+ //
+
+#if defined(_MIPS_)
+
+ genCom("TB Entry Structure Offset Definitions");
+
+ genDef(Tb, TB_ENTRY, Entrylo0);
+ genDef(Tb, TB_ENTRY, Entrylo1);
+ genDef(Tb, TB_ENTRY, Entryhi);
+ genDef(Tb, TB_ENTRY, Pagemask);
+
+#endif
+
+ //
+ //
+ // Interprocessor command definitions.
+ //
+
+ genCom("Immediate Interprocessor Command Definitions");
+
+ genVal(IPI_APC, IPI_APC);
+ genVal(IPI_DPC, IPI_DPC);
+ genVal(IPI_FREEZE, IPI_FREEZE);
+ genVal(IPI_PACKET_READY, IPI_PACKET_READY);
+
+ //
+ // Interprocessor interrupt count structure offset definitions.
+ //
+
+ genCom("Interprocessor Interrupt Count Structure Offset Definitions");
+
+ genDef(Ic, KIPI_COUNTS, Freeze);
+ genDef(Ic, KIPI_COUNTS, Packet);
+ genDef(Ic, KIPI_COUNTS, DPC);
+ genDef(Ic, KIPI_COUNTS, APC);
+ genDef(Ic, KIPI_COUNTS, FlushSingleTb);
+ genDef(Ic, KIPI_COUNTS, FlushMultipleTb);
+ genDef(Ic, KIPI_COUNTS, FlushEntireTb);
+ genDef(Ic, KIPI_COUNTS, GenericCall);
+ genDef(Ic, KIPI_COUNTS, ChangeColor);
+ genDef(Ic, KIPI_COUNTS, SweepDcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcacheRange);
+ genDef(Ic, KIPI_COUNTS, FlushIoBuffers);
+ genDef(Ic, KIPI_COUNTS, GratuitousDPC);
+
+ //
+ // Context frame offset definitions and flag definitions.
+ //
+
+ EnableInc (HALMIPS);
+
+ genCom("Context Frame Offset and Flag Definitions");
+
+ genVal(CONTEXT_FULL, CONTEXT_FULL);
+ genVal(CONTEXT_CONTROL, CONTEXT_CONTROL);
+ genVal(CONTEXT_FLOATING_POINT, CONTEXT_FLOATING_POINT);
+ genVal(CONTEXT_INTEGER, CONTEXT_INTEGER);
+ genVal(CONTEXT_EXTENDED_FLOAT, CONTEXT_EXTENDED_FLOAT);
+ genVal(CONTEXT_EXTENDED_INTEGER, CONTEXT_EXTENDED_INTEGER);
+
+ genCom("32-bit Context Frame Offset Definitions");
+
+ genDef(Cx, CONTEXT, FltF0);
+ genDef(Cx, CONTEXT, FltF1);
+ genDef(Cx, CONTEXT, FltF2);
+ genDef(Cx, CONTEXT, FltF3);
+ genDef(Cx, CONTEXT, FltF4);
+ genDef(Cx, CONTEXT, FltF5);
+ genDef(Cx, CONTEXT, FltF6);
+ genDef(Cx, CONTEXT, FltF7);
+ genDef(Cx, CONTEXT, FltF8);
+ genDef(Cx, CONTEXT, FltF9);
+ genDef(Cx, CONTEXT, FltF10);
+ genDef(Cx, CONTEXT, FltF11);
+ genDef(Cx, CONTEXT, FltF12);
+ genDef(Cx, CONTEXT, FltF13);
+ genDef(Cx, CONTEXT, FltF14);
+ genDef(Cx, CONTEXT, FltF15);
+ genDef(Cx, CONTEXT, FltF16);
+ genDef(Cx, CONTEXT, FltF17);
+ genDef(Cx, CONTEXT, FltF18);
+ genDef(Cx, CONTEXT, FltF19);
+ genDef(Cx, CONTEXT, FltF20);
+ genDef(Cx, CONTEXT, FltF21);
+ genDef(Cx, CONTEXT, FltF22);
+ genDef(Cx, CONTEXT, FltF23);
+ genDef(Cx, CONTEXT, FltF24);
+ genDef(Cx, CONTEXT, FltF25);
+ genDef(Cx, CONTEXT, FltF26);
+ genDef(Cx, CONTEXT, FltF27);
+ genDef(Cx, CONTEXT, FltF28);
+ genDef(Cx, CONTEXT, FltF29);
+ genDef(Cx, CONTEXT, FltF30);
+ genDef(Cx, CONTEXT, FltF31);
+ genDef(Cx, CONTEXT, IntZero);
+ genDef(Cx, CONTEXT, IntAt);
+ genDef(Cx, CONTEXT, IntV0);
+ genDef(Cx, CONTEXT, IntV1);
+ genDef(Cx, CONTEXT, IntA0);
+ genDef(Cx, CONTEXT, IntA1);
+ genDef(Cx, CONTEXT, IntA2);
+ genDef(Cx, CONTEXT, IntA3);
+ genDef(Cx, CONTEXT, IntT0);
+ genDef(Cx, CONTEXT, IntT1);
+ genDef(Cx, CONTEXT, IntT2);
+ genDef(Cx, CONTEXT, IntT3);
+ genDef(Cx, CONTEXT, IntT4);
+ genDef(Cx, CONTEXT, IntT5);
+ genDef(Cx, CONTEXT, IntT6);
+ genDef(Cx, CONTEXT, IntT7);
+ genDef(Cx, CONTEXT, IntS0);
+ genDef(Cx, CONTEXT, IntS1);
+ genDef(Cx, CONTEXT, IntS2);
+ genDef(Cx, CONTEXT, IntS3);
+ genDef(Cx, CONTEXT, IntS4);
+ genDef(Cx, CONTEXT, IntS5);
+ genDef(Cx, CONTEXT, IntS6);
+ genDef(Cx, CONTEXT, IntS7);
+ genDef(Cx, CONTEXT, IntT8);
+ genDef(Cx, CONTEXT, IntT9);
+ genDef(Cx, CONTEXT, IntK0);
+ genDef(Cx, CONTEXT, IntK1);
+ genDef(Cx, CONTEXT, IntGp);
+ genDef(Cx, CONTEXT, IntSp);
+ genDef(Cx, CONTEXT, IntS8);
+ genDef(Cx, CONTEXT, IntRa);
+ genDef(Cx, CONTEXT, IntLo);
+ genDef(Cx, CONTEXT, IntHi);
+ genDef(Cx, CONTEXT, Fsr);
+ genDef(Cx, CONTEXT, Fir);
+ genDef(Cx, CONTEXT, Psr);
+ genDef(Cx, CONTEXT, ContextFlags);
+
+ genCom("64-bit Context Frame Offset Definitions");
+
+ genDef(Cx, CONTEXT, XFltF0);
+ genDef(Cx, CONTEXT, XFltF1);
+ genDef(Cx, CONTEXT, XFltF2);
+ genDef(Cx, CONTEXT, XFltF3);
+ genDef(Cx, CONTEXT, XFltF4);
+ genDef(Cx, CONTEXT, XFltF5);
+ genDef(Cx, CONTEXT, XFltF6);
+ genDef(Cx, CONTEXT, XFltF7);
+ genDef(Cx, CONTEXT, XFltF8);
+ genDef(Cx, CONTEXT, XFltF9);
+ genDef(Cx, CONTEXT, XFltF10);
+ genDef(Cx, CONTEXT, XFltF11);
+ genDef(Cx, CONTEXT, XFltF12);
+ genDef(Cx, CONTEXT, XFltF13);
+ genDef(Cx, CONTEXT, XFltF14);
+ genDef(Cx, CONTEXT, XFltF15);
+ genDef(Cx, CONTEXT, XFltF16);
+ genDef(Cx, CONTEXT, XFltF17);
+ genDef(Cx, CONTEXT, XFltF18);
+ genDef(Cx, CONTEXT, XFltF19);
+ genDef(Cx, CONTEXT, XFltF20);
+ genDef(Cx, CONTEXT, XFltF21);
+ genDef(Cx, CONTEXT, XFltF22);
+ genDef(Cx, CONTEXT, XFltF23);
+ genDef(Cx, CONTEXT, XFltF24);
+ genDef(Cx, CONTEXT, XFltF25);
+ genDef(Cx, CONTEXT, XFltF26);
+ genDef(Cx, CONTEXT, XFltF27);
+ genDef(Cx, CONTEXT, XFltF28);
+ genDef(Cx, CONTEXT, XFltF29);
+ genDef(Cx, CONTEXT, XFltF30);
+ genDef(Cx, CONTEXT, XFltF31);
+ genDef(Cx, CONTEXT, XFsr);
+ genDef(Cx, CONTEXT, XFir);
+ genDef(Cx, CONTEXT, XPsr);
+ genDef(Cx, CONTEXT, XContextFlags);
+ genDef(Cx, CONTEXT, XIntZero);
+ genDef(Cx, CONTEXT, XIntAt);
+ genDef(Cx, CONTEXT, XIntV0);
+ genDef(Cx, CONTEXT, XIntV1);
+ genDef(Cx, CONTEXT, XIntA0);
+ genDef(Cx, CONTEXT, XIntA1);
+ genDef(Cx, CONTEXT, XIntA2);
+ genDef(Cx, CONTEXT, XIntA3);
+ genDef(Cx, CONTEXT, XIntT0);
+ genDef(Cx, CONTEXT, XIntT1);
+ genDef(Cx, CONTEXT, XIntT2);
+ genDef(Cx, CONTEXT, XIntT3);
+ genDef(Cx, CONTEXT, XIntT4);
+ genDef(Cx, CONTEXT, XIntT5);
+ genDef(Cx, CONTEXT, XIntT6);
+ genDef(Cx, CONTEXT, XIntT7);
+ genDef(Cx, CONTEXT, XIntS0);
+ genDef(Cx, CONTEXT, XIntS1);
+ genDef(Cx, CONTEXT, XIntS2);
+ genDef(Cx, CONTEXT, XIntS3);
+ genDef(Cx, CONTEXT, XIntS4);
+ genDef(Cx, CONTEXT, XIntS5);
+ genDef(Cx, CONTEXT, XIntS6);
+ genDef(Cx, CONTEXT, XIntS7);
+ genDef(Cx, CONTEXT, XIntT8);
+ genDef(Cx, CONTEXT, XIntT9);
+ genDef(Cx, CONTEXT, XIntK0);
+ genDef(Cx, CONTEXT, XIntK1);
+ genDef(Cx, CONTEXT, XIntGp);
+ genDef(Cx, CONTEXT, XIntSp);
+ genDef(Cx, CONTEXT, XIntS8);
+ genDef(Cx, CONTEXT, XIntRa);
+ genDef(Cx, CONTEXT, XIntLo);
+ genDef(Cx, CONTEXT, XIntHi);
+ genVal(ContextFrameLength, sizeof(CONTEXT));
+
+ //
+ // Exception frame offset definitions.
+ //
+
+ genCom("Exception Frame Offset Definitions and Length");
+
+ genAlt(ExArgs, KEXCEPTION_FRAME, Argument);
+
+ genCom("32-bit Nonvolatile Floating State");
+
+ genDef(Ex, KEXCEPTION_FRAME, FltF20);
+ genDef(Ex, KEXCEPTION_FRAME, FltF21);
+ genDef(Ex, KEXCEPTION_FRAME, FltF22);
+ genDef(Ex, KEXCEPTION_FRAME, FltF23);
+ genDef(Ex, KEXCEPTION_FRAME, FltF24);
+ genDef(Ex, KEXCEPTION_FRAME, FltF25);
+ genDef(Ex, KEXCEPTION_FRAME, FltF26);
+ genDef(Ex, KEXCEPTION_FRAME, FltF27);
+ genDef(Ex, KEXCEPTION_FRAME, FltF28);
+ genDef(Ex, KEXCEPTION_FRAME, FltF29);
+ genDef(Ex, KEXCEPTION_FRAME, FltF30);
+ genDef(Ex, KEXCEPTION_FRAME, FltF31);
+
+ genCom("64-bit Nonvolatile Floating State");
+
+ genDef(Ex, KEXCEPTION_FRAME, XFltF20);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF22);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF24);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF26);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF28);
+ genDef(Ex, KEXCEPTION_FRAME, XFltF30);
+
+ genCom("32-bit Nonvolatile Integer State");
+
+ genDef(Ex, KEXCEPTION_FRAME, IntS0);
+ genDef(Ex, KEXCEPTION_FRAME, IntS1);
+ genDef(Ex, KEXCEPTION_FRAME, IntS2);
+ genDef(Ex, KEXCEPTION_FRAME, IntS3);
+ genDef(Ex, KEXCEPTION_FRAME, IntS4);
+ genDef(Ex, KEXCEPTION_FRAME, IntS5);
+ genDef(Ex, KEXCEPTION_FRAME, IntS6);
+ genDef(Ex, KEXCEPTION_FRAME, IntS7);
+ genDef(Ex, KEXCEPTION_FRAME, IntS8);
+ genDef(Ex, KEXCEPTION_FRAME, SwapReturn);
+ genDef(Ex, KEXCEPTION_FRAME, IntRa);
+ genVal(ExceptionFrameLength, sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Jump buffer offset definitions.
+ //
+
+ DisableInc (HALMIPS);
+
+ genCom("Jump Offset Definitions and Length");
+
+ genDef(Jb, _JUMP_BUFFER, FltF20);
+ genDef(Jb, _JUMP_BUFFER, FltF21);
+ genDef(Jb, _JUMP_BUFFER, FltF22);
+ genDef(Jb, _JUMP_BUFFER, FltF23);
+ genDef(Jb, _JUMP_BUFFER, FltF24);
+ genDef(Jb, _JUMP_BUFFER, FltF25);
+ genDef(Jb, _JUMP_BUFFER, FltF26);
+ genDef(Jb, _JUMP_BUFFER, FltF27);
+ genDef(Jb, _JUMP_BUFFER, FltF28);
+ genDef(Jb, _JUMP_BUFFER, FltF29);
+ genDef(Jb, _JUMP_BUFFER, FltF30);
+ genDef(Jb, _JUMP_BUFFER, FltF31);
+ genDef(Jb, _JUMP_BUFFER, IntS0);
+ genDef(Jb, _JUMP_BUFFER, IntS1);
+ genDef(Jb, _JUMP_BUFFER, IntS2);
+ genDef(Jb, _JUMP_BUFFER, IntS3);
+ genDef(Jb, _JUMP_BUFFER, IntS4);
+ genDef(Jb, _JUMP_BUFFER, IntS5);
+ genDef(Jb, _JUMP_BUFFER, IntS6);
+ genDef(Jb, _JUMP_BUFFER, IntS7);
+ genDef(Jb, _JUMP_BUFFER, IntS8);
+ genDef(Jb, _JUMP_BUFFER, IntSp);
+ genDef(Jb, _JUMP_BUFFER, Type);
+ genDef(Jb, _JUMP_BUFFER, Fir);
+
+ //
+ // Trap frame offset definitions.
+ //
+
+ EnableInc (HALMIPS);
+
+ genCom("Trap Frame Offset Definitions and Length");
+
+ genAlt(TrArgs, KTRAP_FRAME, Argument);
+
+ genCom("32-bit Volatile Floating State");
+
+ genDef(Tr, KTRAP_FRAME, FltF0);
+ genDef(Tr, KTRAP_FRAME, FltF1);
+ genDef(Tr, KTRAP_FRAME, FltF2);
+ genDef(Tr, KTRAP_FRAME, FltF3);
+ genDef(Tr, KTRAP_FRAME, FltF4);
+ genDef(Tr, KTRAP_FRAME, FltF5);
+ genDef(Tr, KTRAP_FRAME, FltF6);
+ genDef(Tr, KTRAP_FRAME, FltF7);
+ genDef(Tr, KTRAP_FRAME, FltF8);
+ genDef(Tr, KTRAP_FRAME, FltF9);
+ genDef(Tr, KTRAP_FRAME, FltF10);
+ genDef(Tr, KTRAP_FRAME, FltF11);
+ genDef(Tr, KTRAP_FRAME, FltF12);
+ genDef(Tr, KTRAP_FRAME, FltF13);
+ genDef(Tr, KTRAP_FRAME, FltF14);
+ genDef(Tr, KTRAP_FRAME, FltF15);
+ genDef(Tr, KTRAP_FRAME, FltF16);
+ genDef(Tr, KTRAP_FRAME, FltF17);
+ genDef(Tr, KTRAP_FRAME, FltF18);
+ genDef(Tr, KTRAP_FRAME, FltF19);
+
+ genCom("64-bit Volatile Floating State");
+
+ genDef(Tr, KTRAP_FRAME, XFltF0);
+ genDef(Tr, KTRAP_FRAME, XFltF1);
+ genDef(Tr, KTRAP_FRAME, XFltF2);
+ genDef(Tr, KTRAP_FRAME, XFltF3);
+ genDef(Tr, KTRAP_FRAME, XFltF4);
+ genDef(Tr, KTRAP_FRAME, XFltF5);
+ genDef(Tr, KTRAP_FRAME, XFltF6);
+ genDef(Tr, KTRAP_FRAME, XFltF7);
+ genDef(Tr, KTRAP_FRAME, XFltF8);
+ genDef(Tr, KTRAP_FRAME, XFltF9);
+ genDef(Tr, KTRAP_FRAME, XFltF10);
+ genDef(Tr, KTRAP_FRAME, XFltF11);
+ genDef(Tr, KTRAP_FRAME, XFltF12);
+ genDef(Tr, KTRAP_FRAME, XFltF13);
+ genDef(Tr, KTRAP_FRAME, XFltF14);
+ genDef(Tr, KTRAP_FRAME, XFltF15);
+ genDef(Tr, KTRAP_FRAME, XFltF16);
+ genDef(Tr, KTRAP_FRAME, XFltF17);
+ genDef(Tr, KTRAP_FRAME, XFltF18);
+ genDef(Tr, KTRAP_FRAME, XFltF19);
+ genDef(Tr, KTRAP_FRAME, XFltF21);
+ genDef(Tr, KTRAP_FRAME, XFltF23);
+ genDef(Tr, KTRAP_FRAME, XFltF25);
+ genDef(Tr, KTRAP_FRAME, XFltF27);
+ genDef(Tr, KTRAP_FRAME, XFltF29);
+ genDef(Tr, KTRAP_FRAME, XFltF31);
+
+ genCom("64-bit Volatile Integer State");
+
+ genDef(Tr, KTRAP_FRAME, XIntZero);
+ genDef(Tr, KTRAP_FRAME, XIntAt);
+ genDef(Tr, KTRAP_FRAME, XIntV0);
+ genDef(Tr, KTRAP_FRAME, XIntV1);
+ genDef(Tr, KTRAP_FRAME, XIntA0);
+ genDef(Tr, KTRAP_FRAME, XIntA1);
+ genDef(Tr, KTRAP_FRAME, XIntA2);
+ genDef(Tr, KTRAP_FRAME, XIntA3);
+ genDef(Tr, KTRAP_FRAME, XIntT0);
+ genDef(Tr, KTRAP_FRAME, XIntT1);
+ genDef(Tr, KTRAP_FRAME, XIntT2);
+ genDef(Tr, KTRAP_FRAME, XIntT3);
+ genDef(Tr, KTRAP_FRAME, XIntT4);
+ genDef(Tr, KTRAP_FRAME, XIntT5);
+ genDef(Tr, KTRAP_FRAME, XIntT6);
+ genDef(Tr, KTRAP_FRAME, XIntT7);
+ genDef(Tr, KTRAP_FRAME, XIntS0);
+ genDef(Tr, KTRAP_FRAME, XIntS1);
+ genDef(Tr, KTRAP_FRAME, XIntS2);
+ genDef(Tr, KTRAP_FRAME, XIntS3);
+ genDef(Tr, KTRAP_FRAME, XIntS4);
+ genDef(Tr, KTRAP_FRAME, XIntS5);
+ genDef(Tr, KTRAP_FRAME, XIntS6);
+ genDef(Tr, KTRAP_FRAME, XIntS7);
+ genDef(Tr, KTRAP_FRAME, XIntT8);
+ genDef(Tr, KTRAP_FRAME, XIntT9);
+ genDef(Tr, KTRAP_FRAME, XIntGp);
+ genDef(Tr, KTRAP_FRAME, XIntSp);
+ genDef(Tr, KTRAP_FRAME, XIntS8);
+ genDef(Tr, KTRAP_FRAME, XIntRa);
+ genDef(Tr, KTRAP_FRAME, XIntLo);
+ genDef(Tr, KTRAP_FRAME, XIntHi);
+
+ genSpc();
+
+ genDef(Tr, KTRAP_FRAME, Fir);
+ genDef(Tr, KTRAP_FRAME, Fsr);
+ genDef(Tr, KTRAP_FRAME, Psr);
+ genDef(Tr, KTRAP_FRAME, ExceptionRecord);
+ genDef(Tr, KTRAP_FRAME, OldIrql);
+ genDef(Tr, KTRAP_FRAME, PreviousMode);
+ genDef(Tr, KTRAP_FRAME, SavedFlag);
+ genAlt(TrOnInterruptStack, KTRAP_FRAME, u.OnInterruptStack);
+ genAlt(TrTrapFrame, KTRAP_FRAME, u.TrapFrame);
+
+ genVal(TrapFrameLength, sizeof(KTRAP_FRAME));
+ genVal(TrapFrameArguments, KTRAP_FRAME_ARGUMENTS);
+
+ //
+ // Usermode callout kernel frame definitions
+ //
+
+ DisableInc(HALMIPS);
+
+ genCom("Usermode callout kernel frame definitions");
+
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F21);
+ genDef(Cu, KCALLOUT_FRAME, F22);
+ genDef(Cu, KCALLOUT_FRAME, F23);
+ genDef(Cu, KCALLOUT_FRAME, F24);
+ genDef(Cu, KCALLOUT_FRAME, F25);
+ genDef(Cu, KCALLOUT_FRAME, F26);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F20);
+ genDef(Cu, KCALLOUT_FRAME, F27);
+ genDef(Cu, KCALLOUT_FRAME, F28);
+ genDef(Cu, KCALLOUT_FRAME, F29);
+ genDef(Cu, KCALLOUT_FRAME, F30);
+ genDef(Cu, KCALLOUT_FRAME, F31);
+ genDef(Cu, KCALLOUT_FRAME, S0);
+ genDef(Cu, KCALLOUT_FRAME, S1);
+ genDef(Cu, KCALLOUT_FRAME, S2);
+ genDef(Cu, KCALLOUT_FRAME, S3);
+ genDef(Cu, KCALLOUT_FRAME, S4);
+ genDef(Cu, KCALLOUT_FRAME, S5);
+ genDef(Cu, KCALLOUT_FRAME, S6);
+ genDef(Cu, KCALLOUT_FRAME, S7);
+ genDef(Cu, KCALLOUT_FRAME, S8);
+ genDef(Cu, KCALLOUT_FRAME, CbStk);
+ genDef(Cu, KCALLOUT_FRAME, TrFr);
+ genDef(Cu, KCALLOUT_FRAME, Fsr);
+ genDef(Cu, KCALLOUT_FRAME, InStk);
+ genDef(Cu, KCALLOUT_FRAME, Ra);
+ genVal(CuFrameLength, OFFSET(KCALLOUT_FRAME, A0));
+ genDef(Cu, KCALLOUT_FRAME, A0);
+ genDef(Cu, KCALLOUT_FRAME, A1);
+
+ //
+ // Usermode callout user frame definitions.
+ //
+
+ genCom("Usermode callout user frame definitions");
+
+ genDef(Ck, UCALLOUT_FRAME, Buffer);
+ genDef(Ck, UCALLOUT_FRAME, Length);
+ genDef(Ck, UCALLOUT_FRAME, ApiNumber);
+ genDef(Ck, UCALLOUT_FRAME, Sp);
+ genDef(Ck, UCALLOUT_FRAME, Ra);
+
+ EnableInc(HALMIPS);
+
+ //
+ // Loader Parameter Block offset definitions.
+ //
+
+ dumpf("\n");
+ dumpf("//\n");
+ dumpf("// Loader Parameter Block Offset Definitions\n");
+ dumpf("//\n");
+ dumpf("\n");
+
+ dumpf("#define LpbLoadOrderListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, LoadOrderListHead));
+
+ dumpf("#define LpbMemoryDescriptorListHead 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, MemoryDescriptorListHead));
+
+ dumpf("#define LpbKernelStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, KernelStack));
+
+ dumpf("#define LpbPrcb 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Prcb));
+
+ dumpf("#define LpbProcess 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Process));
+
+ dumpf("#define LpbThread 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, Thread));
+
+ dumpf("#define LpbInterruptStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.InterruptStack));
+
+ dumpf("#define LpbFirstLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelDcacheSize));
+
+ dumpf("#define LpbFirstLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelDcacheFillSize));
+
+ dumpf("#define LpbFirstLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelIcacheSize));
+
+ dumpf("#define LpbFirstLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.FirstLevelIcacheFillSize));
+
+ dumpf("#define LpbGpBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.GpBase));
+
+ dumpf("#define LpbPanicStack 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PanicStack));
+
+ dumpf("#define LpbPcrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PcrPage));
+
+ dumpf("#define LpbPdrPage 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PdrPage));
+
+ dumpf("#define LpbSecondLevelDcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelDcacheSize));
+
+ dumpf("#define LpbSecondLevelDcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelDcacheFillSize));
+
+ dumpf("#define LpbSecondLevelIcacheSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelIcacheSize));
+
+ dumpf("#define LpbSecondLevelIcacheFillSize 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.SecondLevelIcacheFillSize));
+
+ dumpf("#define LpbPcrPage2 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, u.Mips.PcrPage2));
+
+ dumpf("#define LpbRegistryLength 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryLength));
+
+ dumpf("#define LpbRegistryBase 0x%lx\n",
+ OFFSET(LOADER_PARAMETER_BLOCK, RegistryBase));
+
+ DisableInc (HALMIPS);
+
+ //
+ // Define Client/Server data structure definitions.
+ //
+
+ genCom("Client/Server Structure Definitions");
+
+ genDef(Cid, CLIENT_ID, UniqueProcess);
+ genDef(Cid, CLIENT_ID, UniqueThread);
+
+ //
+ // Address space layout definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Address Space Layout Definitions");
+
+ genVal(KUSEG_BASE, KUSEG_BASE);
+ genVal(KSEG0_BASE, KSEG0_BASE);
+ genVal(KSEG1_BASE, KSEG1_BASE);
+ genVal(KSEG2_BASE, KSEG2_BASE);
+
+ DisableInc(HALMIPS);
+
+ genVal(CACHE_ERROR_VECTOR, CACHE_ERROR_VECTOR);
+ genVal(SYSTEM_BASE, SYSTEM_BASE);
+ genVal(PDE_BASE, PDE_BASE);
+ genVal(PTE_BASE, PTE_BASE);
+
+ //
+ // Page table and page directory entry definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Page Table and Directory Entry Definitions");
+
+ genVal(PAGE_SIZE, PAGE_SIZE);
+ genVal(PAGE_SHIFT, PAGE_SHIFT);
+ genVal(PDI_SHIFT, PDI_SHIFT);
+ genVal(PTI_SHIFT, PTI_SHIFT);
+
+ //
+ // Software interrupt request mask definitions
+ //
+
+ genCom("Software Interrupt Request Mask Definitions");
+
+ genVal(APC_INTERRUPT, (1 << (APC_LEVEL + CAUSE_INTPEND - 1)));
+ genVal(DISPATCH_INTERRUPT, (1 << (DISPATCH_LEVEL + CAUSE_INTPEND - 1)));
+
+ DisableInc(HALMIPS);
+
+ //
+ // Breakpoint instruction definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Breakpoint Definitions");
+
+ genVal(USER_BREAKPOINT, USER_BREAKPOINT);
+ genVal(KERNEL_BREAKPOINT, KERNEL_BREAKPOINT);
+ genVal(BREAKIN_BREAKPOINT, BREAKIN_BREAKPOINT);
+
+ DisableInc(HALMIPS);
+
+ genVal(BRANCH_TAKEN_BREAKPOINT, BRANCH_TAKEN_BREAKPOINT);
+ genVal(BRANCH_NOT_TAKEN_BREAKPOINT, BRANCH_NOT_TAKEN_BREAKPOINT);
+ genVal(SINGLE_STEP_BREAKPOINT, SINGLE_STEP_BREAKPOINT);
+ genVal(DIVIDE_OVERFLOW_BREAKPOINT, DIVIDE_OVERFLOW_BREAKPOINT);
+ genVal(DIVIDE_BY_ZERO_BREAKPOINT, DIVIDE_BY_ZERO_BREAKPOINT);
+ genVal(RANGE_CHECK_BREAKPOINT, RANGE_CHECK_BREAKPOINT);
+ genVal(STACK_OVERFLOW_BREAKPOINT, STACK_OVERFLOW_BREAKPOINT);
+ genVal(MULTIPLY_OVERFLOW_BREAKPOINT, MULTIPLY_OVERFLOW_BREAKPOINT);
+ genVal(DEBUG_PRINT_BREAKPOINT, DEBUG_PRINT_BREAKPOINT);
+ genVal(DEBUG_PROMPT_BREAKPOINT, DEBUG_PROMPT_BREAKPOINT);
+ genVal(DEBUG_STOP_BREAKPOINT, DEBUG_STOP_BREAKPOINT);
+ genVal(DEBUG_LOAD_SYMBOLS_BREAKPOINT, DEBUG_LOAD_SYMBOLS_BREAKPOINT);
+ genVal(DEBUG_UNLOAD_SYMBOLS_BREAKPOINT, DEBUG_UNLOAD_SYMBOLS_BREAKPOINT);
+
+ //
+ // Miscellaneous definitions
+ //
+
+ EnableInc(HALMIPS);
+
+ genCom("Miscellaneous Definitions");
+
+ genVal(Executive, Executive);
+ genVal(KernelMode, KernelMode);
+ genVal(FALSE, FALSE);
+ genVal(TRUE, TRUE);
+ genVal(UNCACHED_POLICY, UNCACHED_POLICY);
+ genVal(KiPcr, KIPCR);
+ genVal(KiPcr2, KIPCR2);
+
+ DisableInc(HALMIPS);
+
+ genVal(UsPcr, USPCR);
+ genVal(UsPcr2, USPCR2);
+ genVal(BASE_PRIORITY_THRESHOLD, BASE_PRIORITY_THRESHOLD);
+ genVal(EVENT_PAIR_INCREMENT, EVENT_PAIR_INCREMENT);
+ genVal(LOW_REALTIME_PRIORITY, LOW_REALTIME_PRIORITY);
+ genVal(KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
+ genVal(KERNEL_LARGE_STACK_COMMIT, KERNEL_LARGE_STACK_COMMIT);
+ genVal(XCODE_VECTOR_LENGTH, XCODE_VECTOR_LENGTH);
+ genVal(MM_USER_PROBE_ADDRESS, MM_USER_PROBE_ADDRESS);
+ genVal(ROUND_TO_NEAREST, ROUND_TO_NEAREST);
+ genVal(ROUND_TO_ZERO, ROUND_TO_ZERO);
+ genVal(ROUND_TO_PLUS_INFINITY, ROUND_TO_PLUS_INFINITY);
+ genVal(ROUND_TO_MINUS_INFINITY, ROUND_TO_MINUS_INFINITY);
+ genVal(CLOCK_QUANTUM_DECREMENT, CLOCK_QUANTUM_DECREMENT);
+ genVal(READY_SKIP_QUANTUM, READY_SKIP_QUANTUM);
+ genVal(THREAD_QUANTUM, THREAD_QUANTUM);
+ genVal(WAIT_QUANTUM_DECREMENT, WAIT_QUANTUM_DECREMENT);
+ genVal(ROUND_TRIP_DECREMENT_COUNT, ROUND_TRIP_DECREMENT_COUNT);
+
+ //
+ // Close header file.
+ //
+
+ fprintf(stderr, " Finished\n");
+ return;
+}
+
+VOID
+dumpf(
+ const char *format,
+ ...
+ )
+
+{
+
+ va_list(arglist);
+
+ va_start(arglist, format);
+
+ if (OutputEnabled & KSMIPS) {
+ vfprintf (KsMips, format, arglist);
+ }
+
+ if (OutputEnabled & HALMIPS) {
+ vfprintf (HalMips, format, arglist);
+ }
+
+ va_end(arglist);
+}
diff --git a/private/ntos/ke/mips/getsetrg.c b/private/ntos/ke/mips/getsetrg.c
new file mode 100644
index 000000000..88e967454
--- /dev/null
+++ b/private/ntos/ke/mips/getsetrg.c
@@ -0,0 +1,1179 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ getsetrg.c
+
+Abstract:
+
+ This module implement the code necessary to get and set register values.
+ These routines are used during the emulation of unaligned data references
+ and floating point exceptions.
+
+Author:
+
+ David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+ULONG
+KiGetRegisterValue (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the 32-bit value of a register from the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register == 0) {
+ return 0;
+
+ } else if (Register < 32) {
+ return (ULONG)(&TrapFrame->XIntZero)[Register];
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ return TrapFrame->FltF0;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ return TrapFrame->FltF1;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ return TrapFrame->FltF2;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ return TrapFrame->FltF3;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ return TrapFrame->FltF4;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ return TrapFrame->FltF5;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ return TrapFrame->FltF6;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ return TrapFrame->FltF7;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ return TrapFrame->FltF8;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ return TrapFrame->FltF9;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ return TrapFrame->FltF10;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ return TrapFrame->FltF11;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ return TrapFrame->FltF12;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ return TrapFrame->FltF13;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ return TrapFrame->FltF14;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ return TrapFrame->FltF15;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ return TrapFrame->FltF16;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ return TrapFrame->FltF17;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ return TrapFrame->FltF18;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ return TrapFrame->FltF19;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ return ExceptionFrame->FltF20;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ return ExceptionFrame->FltF21;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ return ExceptionFrame->FltF22;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ return ExceptionFrame->FltF23;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ return ExceptionFrame->FltF24;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ return ExceptionFrame->FltF25;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ return ExceptionFrame->FltF26;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ return ExceptionFrame->FltF27;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ return ExceptionFrame->FltF28;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ return ExceptionFrame->FltF29;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ return ExceptionFrame->FltF30;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ return ExceptionFrame->FltF31;
+ }
+ }
+}
+
+ULONGLONG
+KiGetRegisterValue64 (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the 64-bit value of a register from the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register == 0) {
+ return 0;
+
+ } else if (Register < 32) {
+ return (&TrapFrame->XIntZero)[Register];
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ return TrapFrame->XFltF0;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ return TrapFrame->XFltF1;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ return TrapFrame->XFltF2;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ return TrapFrame->XFltF3;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ return TrapFrame->XFltF4;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ return TrapFrame->XFltF5;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ return TrapFrame->XFltF6;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ return TrapFrame->XFltF7;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ return TrapFrame->XFltF8;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ return TrapFrame->XFltF9;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ return TrapFrame->XFltF10;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ return TrapFrame->XFltF11;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ return TrapFrame->XFltF12;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ return TrapFrame->XFltF13;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ return TrapFrame->XFltF14;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ return TrapFrame->XFltF15;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ return TrapFrame->XFltF16;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ return TrapFrame->XFltF17;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ return TrapFrame->XFltF18;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ return TrapFrame->XFltF19;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ return ExceptionFrame->XFltF20;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ return TrapFrame->XFltF21;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ return ExceptionFrame->XFltF22;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ return TrapFrame->XFltF23;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ return ExceptionFrame->XFltF24;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ return TrapFrame->XFltF25;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ return ExceptionFrame->XFltF26;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ return TrapFrame->XFltF27;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ return ExceptionFrame->XFltF28;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ return TrapFrame->XFltF29;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ return ExceptionFrame->XFltF30;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ return TrapFrame->XFltF31;
+ }
+ }
+}
+
+VOID
+KiSetRegisterValue (
+ IN ULONG Register,
+ IN ULONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the 32-bit value of a register in the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register < 32) {
+ (&TrapFrame->XIntZero)[Register] = (LONG)Value;
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ TrapFrame->FltF0 = Value;
+ return;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ TrapFrame->FltF1 = Value;
+ return;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ TrapFrame->FltF2 = Value;
+ return;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ TrapFrame->FltF3 = Value;
+ return;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ TrapFrame->FltF4 = Value;
+ return;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ TrapFrame->FltF5 = Value;
+ return;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ TrapFrame->FltF6 = Value;
+ return;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ TrapFrame->FltF7 = Value;
+ return;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ TrapFrame->FltF8 = Value;
+ return;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ TrapFrame->FltF9 = Value;
+ return;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ TrapFrame->FltF10 = Value;
+ return;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ TrapFrame->FltF11 = Value;
+ return;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ TrapFrame->FltF12 = Value;
+ return;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ TrapFrame->FltF13 = Value;
+ return;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ TrapFrame->FltF14 = Value;
+ return;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ TrapFrame->FltF15 = Value;
+ return;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ TrapFrame->FltF16 = Value;
+ return;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ TrapFrame->FltF17 = Value;
+ return;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ TrapFrame->FltF18 = Value;
+ return;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ TrapFrame->FltF19 = Value;
+ return;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ ExceptionFrame->FltF20 = Value;
+ return;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ ExceptionFrame->FltF21 = Value;
+ return;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ ExceptionFrame->FltF22 = Value;
+ return;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ ExceptionFrame->FltF23 = Value;
+ return;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ ExceptionFrame->FltF24 = Value;
+ return;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ ExceptionFrame->FltF25 = Value;
+ return;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ ExceptionFrame->FltF26 = Value;
+ return;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ ExceptionFrame->FltF27 = Value;
+ return;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ ExceptionFrame->FltF28 = Value;
+ return;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ ExceptionFrame->FltF29 = Value;
+ return;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ ExceptionFrame->FltF30 = Value;
+ return;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ ExceptionFrame->FltF31 = Value;
+ return;
+ }
+ }
+}
+
+VOID
+KiSetRegisterValue64 (
+ IN ULONG Register,
+ IN ULONGLONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the 64-bit value of a register in the
+ specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. Integer registers are specified as 0 - 31 and floating
+ registers are specified as 32 - 63.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the register number.
+ //
+
+ if (Register < 32) {
+ (&TrapFrame->XIntZero)[Register] = Value;
+
+ } else {
+ switch (Register) {
+
+ //
+ // Floating register F0.
+ //
+
+ case 32:
+ TrapFrame->XFltF0 = Value;
+ return;
+
+ //
+ // Floating register F1.
+ //
+
+ case 33:
+ TrapFrame->XFltF1 = Value;
+ return;
+
+ //
+ // Floating register F2.
+ //
+
+ case 34:
+ TrapFrame->XFltF2 = Value;
+ return;
+
+ //
+ // Floating register F3.
+ //
+
+ case 35:
+ TrapFrame->XFltF3 = Value;
+ return;
+
+ //
+ // Floating register F4.
+ //
+
+ case 36:
+ TrapFrame->XFltF4 = Value;
+ return;
+
+ //
+ // Floating register F5.
+ //
+
+ case 37:
+ TrapFrame->XFltF5 = Value;
+ return;
+
+ //
+ // Floating register F6.
+ //
+
+ case 38:
+ TrapFrame->XFltF6 = Value;
+ return;
+
+ //
+ // Floating register F7.
+ //
+
+ case 39:
+ TrapFrame->XFltF7 = Value;
+ return;
+
+ //
+ // Floating register F8.
+ //
+
+ case 40:
+ TrapFrame->XFltF8 = Value;
+ return;
+
+ //
+ // Floating register F9.
+ //
+
+ case 41:
+ TrapFrame->XFltF9 = Value;
+ return;
+
+ //
+ // Floating register F10.
+ //
+
+ case 42:
+ TrapFrame->XFltF10 = Value;
+ return;
+
+ //
+ // Floating register F11.
+ //
+
+ case 43:
+ TrapFrame->XFltF11 = Value;
+ return;
+
+ //
+ // Floating register F12.
+ //
+
+ case 44:
+ TrapFrame->XFltF12 = Value;
+ return;
+
+ //
+ // Floating register F13.
+ //
+
+ case 45:
+ TrapFrame->XFltF13 = Value;
+ return;
+
+ //
+ // Floating register F14.
+ //
+
+ case 46:
+ TrapFrame->XFltF14 = Value;
+ return;
+
+ //
+ // Floating register F15.
+ //
+
+ case 47:
+ TrapFrame->XFltF15 = Value;
+ return;
+
+ //
+ // Floating register F16.
+ //
+
+ case 48:
+ TrapFrame->XFltF16 = Value;
+ return;
+
+ //
+ // Floating register F17.
+ //
+
+ case 49:
+ TrapFrame->XFltF17 = Value;
+ return;
+
+ //
+ // Floating register F18.
+ //
+
+ case 50:
+ TrapFrame->XFltF18 = Value;
+ return;
+
+ //
+ // Floating register F19.
+ //
+
+ case 51:
+ TrapFrame->XFltF19 = Value;
+ return;
+
+ //
+ // Floating register F20.
+ //
+
+ case 52:
+ ExceptionFrame->XFltF20 = Value;
+ return;
+
+ //
+ // Floating register F21.
+ //
+
+ case 53:
+ TrapFrame->XFltF21 = Value;
+ return;
+
+ //
+ // Floating register F22.
+ //
+
+ case 54:
+ ExceptionFrame->XFltF22 = Value;
+ return;
+
+ //
+ // Floating register F23.
+ //
+
+ case 55:
+ TrapFrame->XFltF23 = Value;
+ return;
+
+ //
+ // Floating register F24.
+ //
+
+ case 56:
+ ExceptionFrame->XFltF24 = Value;
+ return;
+
+ //
+ // Floating register F25.
+ //
+
+ case 57:
+ TrapFrame->XFltF25 = Value;
+ return;
+
+ //
+ // Floating register F26.
+ //
+
+ case 58:
+ ExceptionFrame->XFltF26 = Value;
+ return;
+
+ //
+ // Floating register F27.
+ //
+
+ case 59:
+ TrapFrame->XFltF27 = Value;
+ return;
+
+ //
+ // Floating register F28.
+ //
+
+ case 60:
+ ExceptionFrame->XFltF28 = Value;
+ return;
+
+ //
+ // Floating register F29.
+ //
+
+ case 61:
+ TrapFrame->XFltF29 = Value;
+ return;
+
+ //
+ // Floating register F30.
+ //
+
+ case 62:
+ ExceptionFrame->XFltF30 = Value;
+ return;
+
+ //
+ // Floating register F31.
+ //
+
+ case 63:
+ TrapFrame->XFltF31 = Value;
+ return;
+ }
+ }
+}
diff --git a/private/ntos/ke/mips/initkr.c b/private/ntos/ke/mips/initkr.c
new file mode 100644
index 000000000..1c6f11baf
--- /dev/null
+++ b/private/ntos/ke/mips/initkr.c
@@ -0,0 +1,463 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ initkr.c
+
+Abstract:
+
+ This module contains the code to initialize the kernel data structures
+ and to initialize the idle thread, its process, and the processor control
+ block.
+
+Author:
+
+ David N. Cutler (davec) 11-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Put all code for kernel initialization in the INIT section. It will be
+// deallocated by memory management when phase 1 initialization is completed.
+//
+
+#if defined(ALLOC_PRAGMA)
+
+#pragma alloc_text(INIT, KiInitializeKernel)
+
+#endif
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function gains control after the system has been bootstrapped and
+ before the system has been initialized. Its function is to initialize
+ the kernel data structures, initialize the idle thread and process objects,
+ initialize the processor control block, call the executive initialization
+ routine, and then return to the system startup routine. This routine is
+ also called to initialize the processor specific structures when a new
+ processor is brought on line.
+
+Arguments:
+
+ Process - Supplies a pointer to a control object of type process for
+ the specified processor.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread for
+ the specified processor.
+
+ IdleStack - Supplies a pointer the base of the real kernel stack for
+ idle thread on the specified processor.
+
+ Prcb - Supplies a pointer to a processor control block for the specified
+ processor.
+
+ Number - Supplies the number of the processor that is being
+ initialized.
+
+ LoaderBlock - Supplies a pointer to the loader parameter block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+ KIRQL OldIrql;
+ PRESTART_BLOCK RestartBlock;
+
+ //
+ // Perform platform dependent processor initialization.
+ //
+
+ HalInitializeProcessor(Number);
+
+ //
+ // Save the address of the loader parameter block.
+ //
+
+ KeLoaderBlock = LoaderBlock;
+
+ //
+ // Initialize the processor block.
+ //
+
+ Prcb->MinorVersion = PRCB_MINOR_VERSION;
+ Prcb->MajorVersion = PRCB_MAJOR_VERSION;
+ Prcb->BuildType = 0;
+
+#if DBG
+
+ Prcb->BuildType |= PRCB_BUILD_DEBUG;
+
+#endif
+
+#if defined(NT_UP)
+
+ Prcb->BuildType |= PRCB_BUILD_UNIPROCESSOR;
+
+#endif
+
+ Prcb->CurrentThread = Thread;
+ Prcb->NextThread = (PKTHREAD)NULL;
+ Prcb->IdleThread = Thread;
+ Prcb->Number = Number;
+ Prcb->SetMember = 1 << Number;
+ Prcb->PcrPage = LoaderBlock->u.Mips.PcrPage;
+
+#if !defined(NT_UP)
+
+ Prcb->TargetSet = 0;
+ Prcb->WorkerRoutine = NULL;
+ Prcb->RequestSummary = 0;
+ Prcb->IpiFrozen = 0;
+
+#if NT_INST
+
+ Prcb->IpiCounts = &KiIpiCounts[Number];
+
+#endif
+
+#endif
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+
+ //
+ // Initialize DPC listhead and lock.
+ //
+
+ InitializeListHead(&Prcb->DpcListHead);
+ KeInitializeSpinLock(&Prcb->DpcLock);
+
+ //
+ // Set address of processor block.
+ //
+
+ KiProcessorBlock[Number] = Prcb;
+
+ //
+ // Set global processor architecture, level and revision. The
+ // latter two are the least common denominator on an MP system.
+ //
+
+ KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_MIPS;
+ KeFeatureBits = 0;
+ if (KeProcessorLevel == 0 ||
+ KeProcessorLevel > (USHORT)(PCR->ProcessorId >> 8)) {
+ KeProcessorLevel = (USHORT)(PCR->ProcessorId >> 8);
+ }
+
+ if (KeProcessorRevision == 0 ||
+ KeProcessorRevision > (USHORT)(PCR->ProcessorId & 0xff)) {
+ KeProcessorRevision = (USHORT)(PCR->ProcessorId & 0xff);
+ }
+
+ //
+ // Initialize the address of the bus error routines.
+ //
+
+ PCR->DataBusError = KeBusError;
+ PCR->InstructionBusError = KeBusError;
+
+ //
+ // Initialize the idle thread initial kernel stack and limit address value.
+ //
+
+ PCR->InitialStack = IdleStack;
+ PCR->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+
+ //
+ // Initialize all interrupt vectors to transfer control to the unexpected
+ // interrupt routine.
+ //
+ // N.B. This interrupt object is never actually "connected" to an interrupt
+ // vector via KeConnectInterrupt. It is initialized and then connected
+ // by simply storing the address of the dispatch code in the interrupt
+ // vector.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initial the address of the interrupt dispatch routine.
+ //
+
+ KxUnexpectedInterrupt.DispatchAddress = KiUnexpectedInterrupt;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ KxUnexpectedInterrupt.DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ //
+ // Set the default DMA I/O coherency attributes.
+ //
+
+ KiDmaIoCoherency = 0;
+
+ //
+ // Initialize the context swap spinlock.
+ //
+
+ KeInitializeSpinLock(&KiContextSwapLock);
+
+ //
+ // Sweep the data cache to make sure the dispatch code is flushed
+ // to memory on the current processor.
+ //
+
+ HalSweepDcache();
+ }
+
+ for (Index = 0; Index < MAXIMUM_VECTOR; Index += 1) {
+ PCR->InterruptRoutine[Index] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ //
+ // Initialize the profile count and interval.
+ //
+
+ PCR->ProfileCount = 0;
+ PCR->ProfileInterval = 0x200000;
+
+ //
+ // Initialize the passive release, APC, and DPC interrupt vectors.
+ //
+
+ PCR->InterruptRoutine[0] = KiPassiveRelease;
+ PCR->InterruptRoutine[APC_LEVEL] = KiApcInterrupt;
+ PCR->InterruptRoutine[DISPATCH_LEVEL] = KiDispatchInterrupt;
+ PCR->ReservedVectors = (1 << PASSIVE_LEVEL) | (1 << APC_LEVEL) |
+ (1 << DISPATCH_LEVEL) | (1 << IPI_LEVEL);
+
+ //
+ // Initialize the set member for the current processor, set IRQL to
+ // APC_LEVEL, and set the processor number.
+ //
+
+ PCR->CurrentIrql = APC_LEVEL;
+ PCR->SetMember = 1 << Number;
+ PCR->NotMember = ~PCR->SetMember;
+ PCR->Number = Number;
+
+ //
+ // Set the initial stall execution scale factor. This value will be
+ // recomputed later by the HAL.
+ //
+
+ PCR->StallScaleFactor = 50;
+
+ //
+ // Set address of process object in thread object.
+ //
+
+ Thread->ApcState.Process = Process;
+
+ //
+ // Set the appropriate member in the active processors set.
+ //
+
+ SetMember(Number, KeActiveProcessors);
+
+ //
+ // Set the number of processors based on the maximum of the current
+ // number of processors and the current processor number.
+ //
+
+ if ((Number + 1) > KeNumberProcessors) {
+ KeNumberProcessors = Number + 1;
+ }
+
+ //
+ // If the initial processor is being initialized, then initialize the
+ // per system data structures.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initialize the address of the restart block for the boot master.
+ //
+
+ Prcb->RestartBlock = SYSTEM_BLOCK->RestartBlock;
+
+ //
+ // Initialize the kernel debugger.
+ //
+
+ if (KdInitSystem(LoaderBlock, FALSE) == FALSE) {
+ KeBugCheck(PHASE0_INITIALIZATION_FAILED);
+ }
+
+ //
+ // Initialize processor block array.
+ //
+
+ for (Index = 1; Index < MAXIMUM_PROCESSORS; Index += 1) {
+ KiProcessorBlock[Index] = (PKPRCB)NULL;
+ }
+
+ //
+ // Perform architecture independent initialization.
+ //
+
+ KiInitSystem();
+
+ //
+ // Initialize idle thread process object and then set:
+ //
+ // 1. all the quantum values to the maximum possible.
+ // 2. the process in the balance set.
+ // 3. the active processor mask to the specified processor.
+ //
+
+ KeInitializeProcess(Process,
+ (KPRIORITY)0,
+ (KAFFINITY)(0xffffffff),
+ (PULONG)(PDE_BASE + ((PDE_BASE >> PDI_SHIFT - 2) & 0xffc)),
+ FALSE);
+
+ Process->ThreadQuantum = MAXCHAR;
+
+ }
+
+ //
+ // Initialize idle thread object and then set:
+ //
+ // 1. the initial kernel stack to the specified idle stack.
+ // 2. the next processor number to the specified processor.
+ // 3. the thread priority to the highest possible value.
+ // 4. the state of the thread to running.
+ // 5. the thread affinity to the specified processor.
+ // 6. the specified processor member in the process active processors
+ // set.
+ //
+
+ KeInitializeThread(Thread, (PVOID)((ULONG)IdleStack - PAGE_SIZE),
+ (PKSYSTEM_ROUTINE)NULL, (PKSTART_ROUTINE)NULL,
+ (PVOID)NULL, (PCONTEXT)NULL, (PVOID)NULL, Process);
+
+ Thread->InitialStack = IdleStack;
+ Thread->StackBase = IdleStack;
+ Thread->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+ Thread->NextProcessor = Number;
+ Thread->Priority = HIGH_PRIORITY;
+ Thread->State = Running;
+ Thread->Affinity = (KAFFINITY)(1 << Number);
+ Thread->WaitIrql = DISPATCH_LEVEL;
+
+ //
+ // If the current processor is 0, then set the appropriate bit in the
+ // active summary of the idle process.
+ //
+
+ if (Number == 0) {
+ SetMember(Number, Process->ActiveProcessors);
+ }
+
+ //
+ // Execute the executive initialization.
+ //
+
+ try {
+ ExpInitializeExecutive(Number, LoaderBlock);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ KeBugCheck (PHASE0_EXCEPTION);
+ }
+
+ //
+ // If the initial processor is being initialized, then compute the
+ // timer table reciprocal value and reset the PRCB values for the
+ // controllable DPC behavior in order to reflect any registry
+ // overrides.
+ //
+
+ if (Number == 0) {
+ KiTimeIncrementReciprocal = KiComputeReciprocal((LONG)KeMaximumIncrement,
+ &KiTimeIncrementShiftCount);
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+ }
+
+ //
+ // Raise IRQL to dispatch level and set the priority of the idle thread
+ // to zero. This will have the effect of immediately causing the phase
+ // one initialization thread to get scheduled for execution. The idle
+ // thread priority is then set to the lowest realtime priority. This is
+ // necessary so that mutexes aquired at DPC level do not cause the active
+ // matrix to get corrupted.
+ //
+
+ KeRaiseIrql(DISPATCH_LEVEL, &OldIrql);
+ KeSetPriorityThread(Thread, (KPRIORITY)0);
+ Thread->Priority = LOW_REALTIME_PRIORITY;
+
+ //
+ // Raise IRQL to the highest level.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // If a restart block exists for the current process, then set boot
+ // completed.
+ //
+ // N.B. Firmware on uniprocessor machines configured for MP operation
+ // can have a restart block address of NULL.
+ //
+
+#if !defined(NT_UP)
+
+ RestartBlock = Prcb->RestartBlock;
+ if (RestartBlock != NULL) {
+ RestartBlock->BootStatus.BootFinished = 1;
+ }
+
+ //
+ // If the current processor is not 0, then set the appropriate bit in
+ // idle summary.
+ //
+
+ if (Number != 0) {
+ SetMember(Number, KiIdleSummary);
+ }
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/mips/intobj.c b/private/ntos/ke/mips/intobj.c
new file mode 100644
index 000000000..3e97853be
--- /dev/null
+++ b/private/ntos/ke/mips/intobj.c
@@ -0,0 +1,434 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ intobj.c
+
+Abstract:
+
+ This module implements the kernel interrupt object. Functions are provided
+ to initialize, connect, and disconnect interrupt objects.
+
+Author:
+
+ David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+VOID
+KeInitializeInterrupt (
+ IN PKINTERRUPT Interrupt,
+ IN PKSERVICE_ROUTINE ServiceRoutine,
+ IN PVOID ServiceContext,
+ IN PKSPIN_LOCK SpinLock OPTIONAL,
+ IN ULONG Vector,
+ IN KIRQL Irql,
+ IN KIRQL SynchronizeIrql,
+ IN KINTERRUPT_MODE InterruptMode,
+ IN BOOLEAN ShareVector,
+ IN CCHAR ProcessorNumber,
+ IN BOOLEAN FloatingSave
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel interrupt object. The service routine,
+ service context, spin lock, vector, IRQL, Synchronized IRQL, and floating
+ context save flag are initialized.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+ ServiceRoutine - Supplies a pointer to a function that is to be
+ executed when an interrupt occurs via the specified interrupt
+ vector.
+
+ ServiceContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the ServiceRoutine parameter.
+
+ SpinLock - Supplies an optional pointer to an executive spin lock.
+
+ Vector - Supplies the index of the entry in the Interrupt Dispatch Table
+ that is to be associated with the ServiceRoutine function.
+
+ Irql - Supplies the request priority of the interrupting source.
+
+ SynchronizeIrql - The request priority that the interrupt should be
+ synchronized with.
+
+ InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
+ Latched.
+
+ ShareVector - Supplies a boolean value that specifies whether the
+ vector can be shared with other interrupt objects or not. If FALSE
+ then the vector may not be shared, if TRUE it may be.
+ Latched.
+
+ ProcessorNumber - Supplies the number of the processor to which the
+ interrupt will be connected.
+
+ FloatingSave - Supplies a boolean value that determines whether the
+ floating point registers and pipe line are to be saved before calling
+ the ServiceRoutine function.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+
+ //
+ // Initialize standard control object header.
+ //
+
+ Interrupt->Type = InterruptObject;
+ Interrupt->Size = sizeof(KINTERRUPT);
+
+ //
+ // Initialize the address of the service routine, the service context,
+ // the address of the spin lock, the address of the actual spin lock
+ // that will be used, the vector number, the IRQL of the interrupting
+ // source, the Synchronized IRQL of the interrupt object, the interrupt
+ // mode, the processor number, and the floating context save flag.
+ //
+
+ Interrupt->ServiceRoutine = ServiceRoutine;
+ Interrupt->ServiceContext = ServiceContext;
+
+ if (ARGUMENT_PRESENT(SpinLock)) {
+ Interrupt->ActualLock = SpinLock;
+
+ } else {
+ Interrupt->SpinLock = 0;
+ Interrupt->ActualLock = &Interrupt->SpinLock;
+ }
+
+ Interrupt->Vector = Vector;
+ Interrupt->Irql = Irql;
+ Interrupt->SynchronizeIrql = SynchronizeIrql;
+ Interrupt->Mode = InterruptMode;
+ Interrupt->ShareVector = ShareVector;
+ Interrupt->Number = ProcessorNumber;
+ Interrupt->FloatingSave = FloatingSave;
+
+ //
+ // Copy the interrupt dispatch code template into the interrupt object
+ // and flush the dcache on all processors that the current thread can
+ // run on to ensure that the code is actually in memory.
+ //
+
+ for (Index = 0; Index < DISPATCH_LENGTH; Index += 1) {
+ Interrupt->DispatchCode[Index] = KiInterruptTemplate[Index];
+ }
+
+ KeSweepIcache(FALSE);
+
+ //
+ // Set the connected state of the interrupt object to FALSE.
+ //
+
+ Interrupt->Connected = FALSE;
+ return;
+}
+
+BOOLEAN
+KeConnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function connects an interrupt object to the interrupt vector
+ specified by the interrupt object. If the interrupt object is already
+ connected, or an attempt is made to connect to an interrupt that cannot
+ be connected, then a value of FALSE is returned. Else the specified
+ interrupt object is connected to the interrupt vector, the connected
+ state is set to TRUE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is already connected or an attempt is made to
+ connect to an interrupt vector that cannot be connected, then a value
+ of FALSE is returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ KIRQL Irql;
+ CHAR Number;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // If the interrupt object is already connected, the interrupt vector
+ // number is invalid, an attempt is being made to connect to a vector
+ // that cannot be connected, the interrupt request level is invalid,
+ // the processor number is invalid, of the interrupt vector is less
+ // than or equal to the highest level and it not equal to the specified
+ // IRQL, then do not connect the interrupt object. Else connect interrupt
+ // object to the specified vector and establish the proper interrupt
+ // dispatcher.
+ //
+
+ Connected = FALSE;
+ Irql = Interrupt->Irql;
+ Number = Interrupt->Number;
+ Vector = Interrupt->Vector;
+ if ((((Vector >= MAXIMUM_VECTOR) || (Irql > HIGH_LEVEL) ||
+ ((Vector <= HIGH_LEVEL) &&
+ ((((1 << Vector) & PCR->ReservedVectors) != 0) || (Vector != Irql))) ||
+ (Number >= KeNumberProcessors))) == FALSE) {
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the specified interrupt vector is not connected, then
+ // connect the interrupt vector to the interrupt object dispatch
+ // code, establish the dispatcher address, and set the new
+ // interrupt mode and enable masks. Else if the interrupt is
+ // already chained, then add the new interrupt object at the end
+ // of the chain. If the interrupt vector is not chained, then
+ // start a chain with the previous interrupt object at the front
+ // of the chain. The interrupt mode of all interrupt objects in
+ // a chain must be the same.
+ //
+
+ if (Interrupt->Connected == FALSE) {
+ if (PCR->InterruptRoutine[Vector] ==
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode)) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ if (Interrupt->FloatingSave != FALSE) {
+ Interrupt->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupt->Irql == Interrupt->SynchronizeIrql) {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+
+ } else {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupt->DispatchCode);
+
+ HalEnableSystemInterrupt(Vector, Irql, Interrupt->Mode);
+
+ } else {
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interrupt->Mode == Interruptx->Mode) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ KeRaiseIrql(max(Irql, (KIRQL)KiSynchIrql), &PreviousIrql);
+ if (Interruptx->DispatchAddress != KiChainedDispatch) {
+ InitializeListHead(&Interruptx->InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ }
+
+ InsertTailList(&Interruptx->InterruptListEntry,
+ &Interrupt->InterruptListEntry);
+
+ KeLowerIrql(PreviousIrql);
+ }
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+ }
+
+ //
+ // Return whether interrupt was connected to the specified vector.
+ //
+
+ return Connected;
+}
+
+BOOLEAN
+KeDisconnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function disconnects an interrupt object from the interrupt vector
+ specified by the interrupt object. If the interrupt object is not
+ connected, then a value of FALSE is returned. Else the specified interrupt
+ object is disconnected from the interrupt vector, the connected state is
+ set to FALSE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is not connected, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ PKINTERRUPT Interrupty;
+ KIRQL Irql;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Interrupt->Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the interrupt object is connected, then disconnect it from the
+ // specified vector.
+ //
+
+ Connected = Interrupt->Connected;
+ if (Connected != FALSE) {
+ Irql = Interrupt->Irql;
+ Vector = Interrupt->Vector;
+
+ //
+ // If the specified interrupt vector is not connected to the chained
+ // interrupt dispatcher, then disconnect it by setting its dispatch
+ // address to the unexpected interrupt routine. Else remove the
+ // interrupt object from the interrupt chain. If there is only
+ // one entry remaining in the list, then reestablish the dispatch
+ // address.
+ //
+
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interruptx->DispatchAddress == KiChainedDispatch) {
+ KeRaiseIrql(max(Irql, (KIRQL)KiSynchIrql), &PreviousIrql);
+ if (Interrupt == Interruptx) {
+ Interruptx = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT, InterruptListEntry);
+ Interruptx->DispatchAddress = KiChainedDispatch;
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interruptx->DispatchCode);
+ }
+
+ RemoveEntryList(&Interrupt->InterruptListEntry);
+ Interrupty = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry);
+
+ if (Interruptx == Interrupty) {
+ if (Interrupty->FloatingSave != FALSE) {
+ Interrupty->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupty->Irql == Interrupty->SynchronizeIrql) {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+
+ } else {
+ Interrupty->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&Interrupty->DispatchCode);
+ }
+
+ KeLowerIrql(PreviousIrql);
+
+ } else {
+ HalDisableSystemInterrupt(Vector, Irql);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ KeSweepIcache(TRUE);
+ Interrupt->Connected = FALSE;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+
+ //
+ // Return whether interrupt was disconnected from the specified vector.
+ //
+
+ return Connected;
+}
diff --git a/private/ntos/ke/mips/services.stb b/private/ntos/ke/mips/services.stb
new file mode 100644
index 000000000..7c2f19871
--- /dev/null
+++ b/private/ntos/ke/mips/services.stb
@@ -0,0 +1,64 @@
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// sysstubs.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch stub procedures.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// User or kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+#define STUBS_BEGIN1( t )
+#define STUBS_BEGIN2( t )
+#define STUBS_BEGIN3( t )
+#define STUBS_BEGIN4( t )
+#define STUBS_BEGIN5( t )
+#define STUBS_BEGIN6( t )
+#define STUBS_BEGIN7( t )
+#define STUBS_BEGIN8( t )
+
+#define STUBS_END
+
+#define SYSSTUBS_ENTRY1( ServiceNumber, Name, NumArgs ) SYSTEM_ENTRY(Zw##Name)
+#define SYSSTUBS_ENTRY2( ServiceNumber, Name, NumArgs ) li v0, ServiceNumber
+#define SYSSTUBS_ENTRY3( ServiceNumber, Name, NumArgs ) syscall
+#define SYSSTUBS_ENTRY4( ServiceNumber, Name, NumArgs ) .end Zw##Name ;
+#define SYSSTUBS_ENTRY5( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY6( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY7( ServiceNumber, Name, NumArgs )
+#define SYSSTUBS_ENTRY8( ServiceNumber, Name, NumArgs )
+
+#define USRSTUBS_ENTRY1( ServiceNumber, Name, NumArgs) SYSTEM_ENTRY(Zw##Name)
+#define USRSTUBS_ENTRY2( ServiceNumber, Name, NumArgs) ALTERNATE_ENTRY(Nt##Name)
+#define USRSTUBS_ENTRY3( ServiceNumber, Name, NumArgs) li v0, ServiceNumber
+#define USRSTUBS_ENTRY4( ServiceNumber, Name, NumArgs) syscall
+#define USRSTUBS_ENTRY5( ServiceNumber, Name, NumArgs) .end Zw##Name ;
+#define USRSTUBS_ENTRY6( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY7( ServiceNumber, Name, NumArgs)
+#define USRSTUBS_ENTRY8( ServiceNumber, Name, NumArgs)
+
+
+ STUBS_BEGIN1( "System Service Stub Procedures" )
+ STUBS_BEGIN2( "System Service Stub Procedures" )
+ STUBS_BEGIN3( "System Service Stub Procedures" )
+ STUBS_BEGIN4( "System Service Stub Procedures" )
+ STUBS_BEGIN5( "System Service Stub Procedures" )
+ STUBS_BEGIN6( "System Service Stub Procedures" )
+ STUBS_BEGIN7( "System Service Stub Procedures" )
+ STUBS_BEGIN8( "System Service Stub Procedures" )
diff --git a/private/ntos/ke/mips/sources b/private/ntos/ke/mips/sources
new file mode 100644
index 000000000..d6a83c820
--- /dev/null
+++ b/private/ntos/ke/mips/sources
@@ -0,0 +1,41 @@
+!IFNDEF USE_CRTDLL
+MIPS_OPTIONS=-nodwalign -float
+GPSIZE=32
+!ENDIF
+
+MSC_WARNING_LEVEL=/W3 /WX
+
+MIPS_SOURCES=..\mips\alignem.c \
+ ..\mips\allproc.c \
+ ..\mips\apcuser.c \
+ ..\mips\branchem.c \
+ ..\mips\buserror.c \
+ ..\mips\callback.c \
+ ..\mips\callout.s \
+ ..\mips\dmpstate.c \
+ ..\mips\exceptn.c \
+ ..\mips\floatem.c \
+ ..\mips\flush.c \
+ ..\mips\getsetrg.c \
+ ..\mips\thredini.c \
+ ..\mips\timindex.s \
+ ..\mips\xxapcint.s \
+ ..\mips\xxclock.s \
+ ..\mips\xxflshtb.c \
+ ..\mips\xxintsup.s \
+ ..\mips\xxirql.s \
+ ..\mips\xxmiscs.s \
+ ..\mips\x4mpipi.s \
+ ..\mips\xxmpipi.c \
+ ..\mips\xxregsv.s \
+ ..\mips\xxspinlk.s \
+ ..\mips\x4ctxsw.s \
+ ..\mips\sysstubs.s \
+ ..\mips\systable.s \
+ ..\mips\x4trap.s \
+ ..\mips\threadbg.s \
+ ..\mips\initkr.c \
+ ..\mips\intobj.c \
+ ..\mips\x4start.s \
+ ..\mips\x4sqrt.s \
+ ..\mips\vdm.c
diff --git a/private/ntos/ke/mips/table.stb b/private/ntos/ke/mips/table.stb
new file mode 100644
index 000000000..2890df74b
--- /dev/null
+++ b/private/ntos/ke/mips/table.stb
@@ -0,0 +1,61 @@
+4 // This is the number of in register arguments
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// systable.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch table.
+//
+// Author:
+//
+// David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+//
+// To add a system service simply add the name of the service to the below
+// table. If the system service has in memory arguments, then immediately
+// follow the name of the serice with a comma and following that the number
+// of bytes of in memory arguments, e.g. CreateObject,40.
+//
+
+#define TABLE_BEGIN1( t ) .rdata
+#define TABLE_BEGIN2( t ) .align 4
+#define TABLE_BEGIN3( t ) .globl KiServiceTable
+#define TABLE_BEGIN4( t ) KiServiceTable:
+#define TABLE_BEGIN5( t )
+#define TABLE_BEGIN6( t )
+#define TABLE_BEGIN7( t )
+#define TABLE_BEGIN8( t )
+
+#define TABLE_ENTRY( l,bias,numargs ) .word Nt##l+bias
+
+#define TABLE_END( n ) .sdata ; .globl KiServiceLimit ; KiServiceLimit: .word n + 1
+
+#define ARGTBL_BEGIN .rdata ; .align 4 ; .globl KiArgumentTable ; KiArgumentTable:
+
+#define ARGTBL_ENTRY( e0,e1,e2,e3,e4,e5,e6,e7 ) .byte e0,e1,e2,e3,e4,e5,e6,e7
+
+#define ARGTBL_END
+
+
+ TABLE_BEGIN1( "System Service Dispatch Table" )
+ TABLE_BEGIN2( "System Service Dispatch Table" )
+ TABLE_BEGIN3( "System Service Dispatch Table" )
+ TABLE_BEGIN4( "System Service Dispatch Table" )
+ TABLE_BEGIN5( "System Service Dispatch Table" )
+ TABLE_BEGIN6( "System Service Dispatch Table" )
+ TABLE_BEGIN7( "System Service Dispatch Table" )
+ TABLE_BEGIN8( "System Service Dispatch Table" )
+ \ No newline at end of file
diff --git a/private/ntos/ke/mips/threadbg.s b/private/ntos/ke/mips/threadbg.s
new file mode 100644
index 000000000..aaf0bdd49
--- /dev/null
+++ b/private/ntos/ke/mips/threadbg.s
@@ -0,0 +1,128 @@
+// TITLE("Thread Startup")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// threadbg.s
+//
+// Abstract:
+//
+// This module implements the MIPS machine dependent code necessary to
+// startup a thread in kernel mode.
+//
+// Author:
+//
+// David N. Cutler (davec) 28-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Thread Startup")
+//++
+//
+// RoutineDescription:
+//
+// The following code is never executed. It's purpose is to allow the
+// kernel debugger to walk call frames backwards through thread startup
+// and to support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiThreadDispatch, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s7
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+ sw s3,ExIntS3(sp) //
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f30
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// This routine is called at thread startup. Its function is to call the
+// initial thread procedure. If control returns from the initial thread
+// procedure and a user mode context was established when the thread
+// was initialized, then the user mode context is restored and control
+// is transfered to user mode. Otherwise a bug check will occur.
+//
+//
+// Arguments:
+//
+// s0 (saved) - Supplies a boolean value that specified whether a user
+// mode thread context was established when the thread was initialized.
+//
+// s1 (saved) - Supplies the starting context parameter for the initial
+// thread procedure.
+//
+// s2 (saved) - Supplies the starting address of the initial thread routine.
+//
+// s3 - Supplies the starting address of the initial system routine.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiThreadStartup)
+
+ lw s0,ExIntS0(sp) // get user context flag
+ lw s1,ExIntS1(sp) // get context parameter value
+ lw s2,ExIntS2(sp) // get initial routine address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ bne zero,s0,10f // if ne, user context specified
+ subu sp,sp,TrapFrameArguments // allocate argument space
+
+ .set noreorder
+ .set noat
+10: ctc1 zero,fsr // clear floating status
+ .set at
+ .set reorder
+
+ li a0,APC_LEVEL // lower IRQL to APC level
+ jal KeLowerIrql //
+ move a0,s2 // set address of thread routine
+ move a1,s1 // set startup context parameter
+ jal s3 // call system startup routine
+ beq zero,s0,20f // if eq, no user context
+
+//
+// Finish in common exception exit code which will restore the nonvolatile
+// registers and exit to user mode.
+//
+
+ j KiExceptionExit // finish in exception exit code
+
+//
+// An attempt was made to enter user mode for a thread that has no user mode
+// context. Generate a bug check.
+//
+
+20: li a0,NO_USER_MODE_CONTEXT // set bug check code
+ jal KeBugCheck // call bug check routine
+
+ .end KiThreadDispatch
diff --git a/private/ntos/ke/mips/thredini.c b/private/ntos/ke/mips/thredini.c
new file mode 100644
index 000000000..24c3a47be
--- /dev/null
+++ b/private/ntos/ke/mips/thredini.c
@@ -0,0 +1,285 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ thredini.c
+
+Abstract:
+
+ This module implements the machine dependent functions to set the initial
+ context and data alignment handling mode for a process or thread object.
+
+Author:
+
+ David N. Cutler (davec) 1-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macros are used to check that an input object is
+// really the proper type.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextRecord OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the machine dependent context of a thread object.
+
+ N.B. This function does not check the accessibility of the context record.
+ It is assumed the the caller of this routine is either prepared to
+ handle access violations or has probed and copied the context record
+ as appropriate.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextRecord - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKEXCEPTION_FRAME CxFrame;
+ PKEXCEPTION_FRAME ExFrame;
+ ULONG InitialStack;
+ PKTRAP_FRAME TrFrame;
+
+ //
+ // If a context frame is specified, then initialize a trap frame and
+ // and an exception frame with the specified user mode context.
+ //
+
+ InitialStack = (LONG)Thread->InitialStack;
+ if (ARGUMENT_PRESENT(ContextRecord)) {
+ TrFrame = (PKTRAP_FRAME)(InitialStack - sizeof(KTRAP_FRAME));
+ ExFrame = (PKEXCEPTION_FRAME)((ULONG)TrFrame - sizeof(KEXCEPTION_FRAME));
+ CxFrame = (PKEXCEPTION_FRAME)((ULONG)ExFrame - sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Zero the exception and trap frames and copy information from the
+ // specified context frame to the trap and exception frames.
+ //
+
+ RtlZeroMemory((PVOID)ExFrame, sizeof(KEXCEPTION_FRAME));
+ RtlZeroMemory((PVOID)TrFrame, sizeof(KTRAP_FRAME));
+ KeContextToKframes(TrFrame,
+ ExFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags | CONTEXT_CONTROL,
+ UserMode);
+
+ //
+ // Set the saved previous processor mode in the trap frame and the
+ // previous processor mode in the thread object to user mode.
+ //
+
+ TrFrame->PreviousMode = UserMode;
+ Thread->PreviousMode = UserMode;
+
+ //
+ // Initialize the return address in the exception frame.
+ //
+
+ ExFrame->IntRa = 0;
+
+ } else {
+ ExFrame = NULL;
+ CxFrame = (PKEXCEPTION_FRAME)(InitialStack - sizeof(KEXCEPTION_FRAME));
+
+ //
+ // Set the previous mode in thread object to kernel.
+ //
+
+ Thread->PreviousMode = KernelMode;
+ }
+
+ //
+ // Initialize context switch frame and set thread start up parameters.
+ //
+
+ CxFrame->SwapReturn = (ULONG)KiThreadStartup;
+ if (ExFrame == NULL) {
+ CxFrame->IntS8 = (ULONG)ExFrame;
+
+ } else {
+ CxFrame->IntS8 = (ULONG)TrFrame;
+ }
+
+ CxFrame->IntS0 = (ULONG)ContextRecord;
+ CxFrame->IntS1 = (ULONG)StartContext;
+ CxFrame->IntS2 = (ULONG)StartRoutine;
+ CxFrame->IntS3 = (ULONG)SystemRoutine;
+ Thread->KernelStack = (PVOID)CxFrame;
+ return;
+}
+
+BOOLEAN
+KeSetAutoAlignmentProcess (
+ IN PRKPROCESS Process,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ process and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the process. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Process->AutoAlignment;
+ Process->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
+
+BOOLEAN
+KeSetAutoAlignmentThread (
+ IN PKTHREAD Thread,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ thread and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the thread. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Thread->AutoAlignment;
+ Thread->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
diff --git a/private/ntos/ke/mips/timindex.s b/private/ntos/ke/mips/timindex.s
new file mode 100644
index 000000000..cf00f07be
--- /dev/null
+++ b/private/ntos/ke/mips/timindex.s
@@ -0,0 +1,111 @@
+// TITLE("Compute Timer Table Index")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// timindex.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the timer table
+// index for a timer.
+//
+// Author:
+//
+// David N. Cutler (davec) 17-May-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external vsriables that can be addressed using GP.
+//
+
+ .extern KiTimeIncrementReciprocal 2 * 4
+ .extern KiTimeIncrementShiftCount 1
+
+ SBTTL("Compute Timer Table Index")
+//++
+//
+// ULONG
+// KiComputeTimerTableIndex (
+// IN LARGE_INTEGER Interval,
+// IN LARGE_INTEGER CurrentTime,
+// IN PKTIMER Timer
+// )
+//
+// Routine Description:
+//
+// This function compute the timer table index for the specified timer
+// object and stores the due time in the timer object.
+//
+// N.B. The interval parameter is guaranteed to be negative since it is
+// expressed as relative time.
+//
+// The formula for due time calculation is:
+//
+// Due Time = Current time - Interval
+//
+// The formula for the index calculation is:
+//
+// Index = (Due Time / Maximum Time) & (Table Size - 1)
+//
+// The due time division is performed using reciprocal multiplication.
+//
+// Arguments:
+//
+// Interval (a0, a1) - Supplies the relative time at which the timer is
+// to expire.
+//
+// CurrentTime (a2, a3) - Supplies the current interrupt time.
+//
+// Timer (10(sp)) - Supplies a pointer to a dispatch object of type timer.
+//
+// Return Value:
+//
+// The time table index is returned as the function value and the due
+// time is stored in the timer object.
+//
+//--
+
+ LEAF_ENTRY(KiComputeTimerTableIndex)
+
+ subu t0,a2,a0 // subtract low parts
+ subu t1,a3,a1 // subtract high parts
+ sltu t2,a2,a0 // generate borrow from high part
+ subu t1,t1,t2 // subtract borrow
+ lw a0,4 * 4(sp) // get address of timer object
+ ld t2,KiTimeIncrementReciprocal // get 64-bit magic divisor
+ dsll t0,t0,32 // isolate low 32-bits of due time
+ dsrl t0,t0,32 //
+ dsll t1,t1,32 // isolate high 32-bits of due time
+ or t3,t1,t0 // merge low and high parts of due time
+ sd t3,TiDueTime(a0) // set due time of timer object
+
+//
+// Compute the product of the due time with the magic divisor.
+//
+
+ dmultu t2,t3 // compute 128-bit product
+ lbu v1,KiTimeIncrementShiftCount // get shift count
+ mfhi v0 // get high 32-bits of product
+
+//
+// Right shift the result by the specified shift count and isolate the timer
+// table index.
+//
+
+ dsrl v0,v0,v1 // shift low half right count bits
+ and v0,v0,TIMER_TABLE_SIZE - 1 // compute index value
+ j ra // return
+
+ .end KiComputeTimerTableIndex
diff --git a/private/ntos/ke/mips/vdm.c b/private/ntos/ke/mips/vdm.c
new file mode 100644
index 000000000..a92acd9bd
--- /dev/null
+++ b/private/ntos/ke/mips/vdm.c
@@ -0,0 +1,52 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ VDM.C
+
+Abstract:
+
+ This routine has a stub for the x86 only api NtStartVdmExecution.
+
+Author:
+
+ Dave Hastings (daveh) 2 Apr 1991
+
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+NtInitializeVDM(
+ VOID
+ )
+{
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+NtVdmStartExecution (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns STATUS_NOT_IMPLEMENTED
+
+Arguments:
+
+Return Value:
+
+ STATUS_NOT_IMPLEMENTED
+--*/
+{
+
+ return STATUS_NOT_IMPLEMENTED;
+
+}
diff --git a/private/ntos/ke/mips/x4ctxsw.s b/private/ntos/ke/mips/x4ctxsw.s
new file mode 100644
index 000000000..fcda60c50
--- /dev/null
+++ b/private/ntos/ke/mips/x4ctxsw.s
@@ -0,0 +1,1497 @@
+// TITLE("Context Swap")
+//++
+//
+// Copyright (c) 1991 - 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// x4ctxswap.s
+//
+// Abstract:
+//
+// This module implements the MIPS machine dependent code necessary to
+// field the dispatch interrupt and to perform kernel initiated context
+// switching.
+//
+// Author:
+//
+// David N. Cutler (davec) 1-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+//#define _COLLECT_SWITCH_DATA_ 1
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeNumberProcessIds 4
+ .extern KeTickCount 3 * 4
+ .extern KiContextSwapLock 4
+ .extern KiDispatcherLock 4
+ .extern KiIdleSummary 4
+ .extern KiReadySummary 4
+ .extern KiSynchIrql 4
+ .extern KiWaitInListHead 2 * 4
+ .extern KiWaitOutListHead 2 * 4
+
+ SBTTL("Switch To Thread")
+//++
+//
+// NTSTATUS
+// KiSwitchToThread (
+// IN PKTHREAD NextThread
+// IN ULONG WaitReason,
+// IN ULONG WaitMode,
+// IN PKEVENT WaitObject
+// )
+//
+// Routine Description:
+//
+// This function performs an optimal switch to the specified target thread
+// if possible. No timeout is associated with the wait, thus the issuing
+// thread will wait until the wait event is signaled or an APC is deliverd.
+//
+// N.B. This routine is called with the dispatcher database locked.
+//
+// N.B. The wait IRQL is assumed to be set for the current thread and the
+// wait status is assumed to be set for the target thread.
+//
+// N.B. It is assumed that if a queue is associated with the target thread,
+// then the concurrency count has been incremented.
+//
+// N.B. Control is returned from this function with the dispatcher database
+// unlocked.
+//
+// Arguments:
+//
+// NextThread - Supplies a pointer to a dispatcher object of type thread.
+//
+// WaitReason - supplies the reason for the wait operation.
+//
+// WaitMode - Supplies the processor wait mode.
+//
+// WaitObject - Supplies a pointer to a dispatcher object of type event
+// or semaphore.
+//
+// Return Value:
+//
+// The wait completion status. A value of STATUS_SUCCESS is returned if
+// the specified object satisfied the wait. A value of STATUS_USER_APC is
+// returned if the wait was aborted to deliver a user APC to the current
+// thread.
+//--
+
+ NESTED_ENTRY(KiSwitchToThread, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+//
+// Save the wait reason, the wait mode, and the wait object address.
+//
+
+ sw a1,ExceptionFrameLength + (1 * 4)(sp) // save wait reason
+ sw a2,ExceptionFrameLength + (2 * 4)(sp) // save wait mode
+ sw a3,ExceptionFrameLength + (3 * 4)(sp) // save wait object address
+
+//
+// If the target thread's kernel stack is resident, the target thread's
+// process is in the balance set, the target thread can can run on the
+// current processor, and another thread has not already been selected
+// to run on the current processor, then do a direct dispatch to the
+// target thread bypassing all the general wait logic, thread priorities
+// permiting.
+//
+
+ lw t9,ThApcState + AsProcess(a0) // get target process address
+ lbu v0,ThKernelStackResident(a0) // get kernel stack resident
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+ lbu v1,PrState(t9) // get target process state
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ beq zero,v0,LongWay // if eq, kernel stack not resident
+ xor v1,v1,ProcessInMemory // check if process in memory
+ move s2,a0 // set target thread address
+ bne zero,v1,LongWay // if ne, process not in memory
+
+#if !defined(NT_UP)
+
+ lw t0,PbNextThread(s0) // get address of next thread
+ lbu t1,ThNextProcessor(s1) // get current processor number
+ lw t2,ThAffinity(s2) // get target thread affinity
+ lw t3,KiPcr + PcSetMember(zero) // get processor set member
+ bne zero,t0,LongWay // if ne, next thread selected
+ and t3,t3,t2 // check if for compatible affinity
+ beq zero,t3,LongWay // if eq, affinity not compatible
+
+#endif
+
+//
+// Compute the new thread priority.
+//
+
+ lbu t4,ThPriority(s1) // get client thread priority
+ lbu t5,ThPriority(s2) // get server thread priority
+ sltu v0,t4,LOW_REALTIME_PRIORITY // check if realtime client
+ sltu v1,t5,LOW_REALTIME_PRIORITY // check if realtime server
+ beq zero,v0,60f // if eq, realtime client
+ lbu t6,ThPriorityDecrement(s2) // get priority decrement value
+ lbu t7,ThBasePriority(s2) // get client base priority
+ beq zero,v1,50f // if eq, realtime server
+ addu t8,t7,1 // computed boosted priority
+ bne zero,t6,30f // if ne, server boost active
+
+//
+// Both the client and the server are not realtime and a priority boost
+// is not currently active for the server. Under these conditions an
+// optimal switch to the server can be performed if the base priority
+// of the server is above a minimum threshold or the boosted priority
+// of the server is not less than the client priority.
+//
+
+ sltu v0,t8,t4 // check if high enough boost
+ sltu v1,t8,LOW_REALTIME_PRIORITY // check if less than realtime
+ bne zero,v0,20f // if ne, boosted priority less
+ sb t8,ThPriority(s2) // asssume boosted priority is okay
+ bne zero,v1,70f // if ne, less than realtime
+ li t8,LOW_REALTIME_PRIORITY - 1 // set high server priority
+ sb t8,ThPriority(s2) //
+ b 70f //
+
+//
+// The boosted priority of the server is less than the current priority of
+// the client. If the server base priority is above the required threshold,
+// then a optimal switch to the server can be performed by temporarily
+// raising the priority of the server to that of the client.
+//
+
+20: sltu v0,t7,BASE_PRIORITY_THRESHOLD // check if above threshold
+ subu t8,t4,t7 // compute priority decrement value
+ bne zero,v0,LongWay // if ne, priority below threshold
+ li t7,ROUND_TRIP_DECREMENT_COUNT // get system decrement count value
+ sb t8,ThPriorityDecrement(s2) // set priority decrement value
+ sb t4,ThPriority(s2) // set current server priority
+ sb t7,ThDecrementCount(s2) // set server decrement count
+ b 70f //
+
+//
+// A server boost has previously been applied to the server thread. Count
+// down the decrement count to determine if another optimal server switch
+// is allowed.
+//
+
+30: lbu t8,ThDecrementCount(s2) // decrement server count value
+ subu t8,t8,1 //
+ sb t8,ThDecrementCount(s2) // store updated decrement count
+ beq zero,t8,40f // if eq, no more switches allowed
+
+//
+// Another optimal switch to the server is allowed provided that the
+// server priority is not less than the client priority.
+//
+
+ sltu v0,t5,t4 // check if server lower priority
+ beq zero,v0,70f // if eq, server not lower priority
+ b LongWay //
+
+//
+// The server has exhausted the number of times an optimal switch may
+// be performed without reducing it priority. Reduce the priority of
+// the server to its original unboosted value minus one.
+//
+
+40: sb zero,ThPriorityDecrement(s2) // clear server priority decrement
+ sb t7,ThPriority(s2) // set server priority to base
+ b LongWay //
+
+//
+// The client is not realtime and the server is realtime. An optimal switch
+// to the server can be performed.
+//
+
+50: lb t8,PrThreadQuantum(t9) // get process quantum value
+ b 65f //
+
+//
+// The client is realtime. In order for an optimal switch to occur, the
+// server must also be realtime and run at a high or equal priority.
+//
+
+60: sltu v0,t5,t4 // check if server is lower priority
+ lb t8,PrThreadQuantum(t9) // get process quantum value
+ bne zero,v0,LongWay // if ne, server is lower priority
+65: sb t8,ThQuantum(s2) // set server thread quantum
+
+//
+// Set the next processor for the server thread.
+//
+
+70: //
+
+#if !defined(NT_UP)
+
+ sb t1,ThNextProcessor(s2) // set server next processor number
+
+#endif
+
+//
+// Set the address of the wait block list in the client thread, initialization
+// the event wait block, and insert the wait block in client event wait list.
+//
+
+ addu t0,s1,EVENT_WAIT_BLOCK_OFFSET // compute wait block address
+ sw t0,ThWaitBlockList(s1) // set address of wait block list
+ sw zero,ThWaitStatus(s1) // set initial wait status
+ sw a3,WbObject(t0) // set address of wait object
+ sw t0,WbNextWaitBlock(t0) // set next wait block address
+ lui t1,WaitAny // get wait type and wait key
+ sw t1,WbWaitKey(t0) // set wait key and wait type
+ addu t1,a3,EvWaitListHead // compute wait object listhead address
+ lw t2,LsBlink(t1) // get backward link of listhead
+ addu t3,t0,WbWaitListEntry // compute wait block list entry address
+ sw t3,LsBlink(t1) // set backward link of listhead
+ sw t3,LsFlink(t2) // set forward link in last entry
+ sw t1,LsFlink(t3) // set forward link in wait entry
+ sw t2,LsBlink(t3) // set backward link in wait entry
+
+//
+// Set the client thread wait parameters, set the thread state to Waiting,
+// and insert the thread in the proper wait list.
+//
+
+ sb zero,ThAlertable(s1) // set alertable FALSE.
+ sb a1,ThWaitReason(s1) //
+ sb a2,ThWaitMode(s1) // set the wait mode
+ lb a3,ThEnableStackSwap(s1) // get kernel stack swap enable
+ lw t1,KeTickCount + 0 // get low part of tick count
+ sw t1,ThWaitTime(s1) // set thread wait time
+ li t0,Waiting // set thread state
+ sb t0,ThState(s1) //
+ la t1,KiWaitInListHead // get address of wait in listhead
+ beq zero,a2,75f // if eq, wait mode is kernel
+ beq zero,a3,75f // if eq, kernel stack swap disabled
+ sltu t0,t4,LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ bne zero,t0,76f // if ne, thread priority in range
+75: la t1,KiWaitOutListHead // get address of wait out listhead
+76: lw t2,LsBlink(t1) // get backlink of wait listhead
+ addu t3,s1,ThWaitListEntry // compute wait list entry address
+ sw t3,LsBlink(t1) // set backward link of listhead
+ sw t3,LsFlink(t2) // set forward link in last entry
+ sw t1,LsFlink(t3) // set forward link in wait entry
+ sw t2,LsBlink(t3) // set backward link in wait entry
+
+//
+// If the current thread is processing a queue entry, then attempt to
+// activate another thread that is blocked on the queue object.
+//
+// N.B. The next thread address can change if the routine to activate
+// a queue waiter is called.
+//
+
+77: lw a0,ThQueue(s1) // get queue object address
+ beq zero,a0,78f // if eq, no queue object attached
+ sw s2,PbNextThread(s0) // set next thread address
+ jal KiActivateWaiterQueue // attempt to activate a blocked thread
+ lw s2,PbNextThread(s0) // get next thread address
+ sw zero,PbNextThread(s0) // set next thread address to NULL
+78: sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal SwapContext // swap context
+
+//
+// Lower IRQL to its previous level.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ lw v0,ThWaitStatus(s2) // get wait completion status
+ lbu a0,ThWaitIrql(s2) // get original IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+//
+// If the wait was not interrupted to deliver a kernel APC, then return the
+// completion status.
+//
+
+ xor v1,v0,STATUS_KERNEL_APC // check if awakened for kernel APC
+ bne zero,v1,90f // if ne, normal wait completion
+
+//
+// Disable interrupts an attempt to acquire the dispatcher database lock.
+//
+
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu s2,KiSynchIrql // get new IRQL level
+
+79: DISABLE_INTERRUPTS(t4) // disable interrupts
+
+#if !defined(NT_UP)
+
+80: ll t0,KiDispatcherLock // get current lock value
+ move t1,s1 // set ownership value
+ bne zero,t0,85f // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,80b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to synchronization level and save wait IRQL.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t1,KiPcr + PcIrqlTable(s2) // get translation table entry value
+ li t2,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t1,t1,PSR_INTMASK // shift table entry into position
+ lbu t3,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t4,t4,t2 // clear current interrupt enables
+ or t4,t4,t1 // set new interrupt enables
+ sb s2,KiPcr + PcCurrentIrql(zero) // set new IRQL level
+
+ ENABLE_INTERRUPTS(t4) // enable interrupts
+
+ sb t3,ThWaitIrql(s1) // set client wait IRQL
+ b ContinueWait //
+
+#if !defined(NT_UP)
+
+85: ENABLE_INTERRUPTS(t4) // enable interrupts
+
+ b 79b // try again
+
+#endif
+
+//
+// Ready the target thread for execution and wait on the specified wait
+// object.
+//
+
+LongWay: //
+ jal KiReadyThread // ready thread for execution
+
+//
+// Continue the and return the wait completion status.
+//
+// N.B. The wait continuation routine is called with the dispatcher
+// database locked.
+//
+
+ContinueWait: //
+ lw a0,ExceptionFrameLength + (3 * 4)(sp) // get wait object address
+ lw a1,ExceptionFrameLength + (1 * 4)(sp) // get wait reason
+ lw a2,ExceptionFrameLength + (2 * 4)(sp) // get wait mode
+ jal KiContinueClientWait // continue client wait
+90: lw s0,ExIntS0(sp) // restore register s0 - s2
+ lw s1,ExIntS1(sp) //
+ lw s2,ExIntS2(sp) //
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiSwitchToThread
+
+ SBTTL("Unlock Dispatcher Database")
+//++
+//
+// VOID
+// KiUnlockDispatcherDatabase (
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This routine is entered at synchronization level with the dispatcher
+// database locked. Its function is to either unlock the dispatcher
+// database and return or initiate a context switch if another thread
+// has been selected for execution.
+//
+// N.B. This code merges with the following swap context code.
+//
+// N.B. A context switch CANNOT be initiated if the previous IRQL
+// is greater than or equal to DISPATCH_LEVEL.
+//
+// N.B. This routine is carefully written to be a leaf function. If,
+// however, a context swap should be performed, the routine is
+// switched to a nested fucntion.
+//
+// Arguments:
+//
+// OldIrql (a0) - Supplies the IRQL when the dispatcher database
+// lock was acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiUnlockDispatcherDatabase)
+
+//
+// Check if a thread has been scheduled to execute on the current processor.
+//
+
+ lw t0,KiPcr + PcPrcb(zero) // get address of PRCB
+ and a0,a0,0xff // isolate old IRQL
+ sltu t1,a0,DISPATCH_LEVEL // check if IRQL below dispatch level
+ lw t2,PbNextThread(t0) // get next thread address
+ bne zero,t2,30f // if ne, a new thread selected
+
+//
+// A new thread has not been selected to run on the current processor.
+// Release the dispatcher database lock and restore IRQL to its previous
+// level.
+//
+
+10: //
+
+#if !defined(NT_UP)
+
+ sw zero,KiDispatcherLock // set spin lock not owned
+
+#endif
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+//
+// A new thread has been selected to run on the current processor, but
+// the new IRQL is not below dispatch level. If the current processor is
+// not executing a DPC, then request a dispatch interrupt on the current
+// processor before releasing the dispatcher lock and restoring IRQL.
+//
+
+
+20: bne zero,t3,10b // if ne, DPC routine active
+
+#if !defined(NT_UP)
+
+ sw zero,KiDispatcherLock // set spin lock not owned
+
+#endif
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,cause // get exception cause register
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ or t3,t3,DISPATCH_INTERRUPT // set dispatch interrupt request
+ mtc0 t3,cause // set exception cause register
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+//
+// A new thread has been selected to run on the current processor.
+//
+// If the new IRQL is less than dispatch level, then switch to the new
+// thread.
+//
+// N.B. the jump to the switch to the next thread is required.
+//
+
+30: lw t3,PbDpcRoutineActive(t0) // get DPC active flag
+ beq zero,t1,20b // if eq, IRQL not below dispatch
+ j KxUnlockDispatcherDatabase //
+
+ .end KiUnlockDispatcherDataBase
+
+//
+// N.B. This routine is carefully written as a nested function. Control
+// drops into this function from above.
+//
+
+ NESTED_ENTRY(KxUnlockDispatcherDatabase, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+ move s0,t0 // set address of PRCB
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ move s2,t2 // set next thread address
+ sb a0,ThWaitIrql(s1) // save previous IRQL
+ sw zero,PbNextThread(s0) // clear next thread address
+
+//
+// Reready current thread for execution and swap context to the selected
+// thread.
+//
+// N.B. The return from the call to swap context is directly to the swap
+// thread exit.
+//
+
+ move a0,s1 // set address of previous thread object
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal KiReadyThread // reready thread for execution
+ la ra,KiSwapThreadExit // set return address
+ j SwapContext // swap context
+
+ .end KxUnlockDispatcherDatabase
+
+ SBTTL("Swap Thread")
+//++
+//
+// VOID
+// KiSwapThread (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to select and the next thread to run on the
+// current processor and to perform a context switch to the thread.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Wait completion status (v0).
+//
+//--
+
+ NESTED_ENTRY(KiSwapThread, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+ sw s0,ExIntS0(sp) // save integer registers s0 - s2
+ sw s1,ExIntS1(sp) //
+ sw s2,ExIntS2(sp) //
+
+ PROLOGUE_END
+
+ .set noreorder
+ .set noat
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+ lw t0,KiReadySummary // get ready summary
+ lw s1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw s2,PbNextThread(s0) // get address of next thread
+
+#if !defined(NT_UP)
+
+ lw t1,KiPcr + PcSetMember(zero) // get processor affinity mask
+ lbu v0,PbNumber(s0) // get current processor number
+ lw v1,KeTickCount + 0 // get low part of tick count
+
+#endif
+
+ srl t3,t0,16 // isolate bits <31:16> of summary
+ li t2,16 // set base bit number
+ bnel zero,s2,120f // if ne, next thread selected
+ sw zero,PbNextThread(s0) // zero address of next thread
+
+//
+// Find the highest nibble in the ready summary that contains a set bit
+// and left justify so the nibble is in bits <31:28>.
+//
+
+ bne zero,t3,10f // if ne, bits <31:16> are nonzero
+ srl t3,t3,8 // isolate bits <31:24> of summary
+ li t2,0 // set base bit number
+ srl t3,t0,8 // isolate bits <15:8> of summary
+10: bnel zero,t3,20f // if ne, bits <15:8> are nonzero
+ addu t2,t2,8 // add bit offset to nonzero byte
+20: srl t3,t0,t2 // isolate highest nonzero byte
+ addu t2,t2,3 // adjust to high bit in nibble
+ sltu t4,t3,0x10 // check if high nibble nonzero
+ xor t4,t4,1 // complement less than indicator
+ sll t4,t4,2 // multiply by nibble width
+ addu t2,t2,t4 // compute ready queue priority
+ la t3,KiDispatcherReadyListHead // get ready listhead base address
+ nor t4,t2,zero // compute left justify shift count
+ sll t4,t0,t4 // left justify ready summary to nibble
+
+//
+// If the next bit is set in the ready summary, then scan the corresponding
+// dispatcher ready queue.
+//
+
+30: bltz t4,50f // if ltz, queue contains an entry
+ sll t4,t4,1 // position next ready summary bit
+ bne zero,t4,30b // if ne, more queues to scan
+ subu t2,t2,1 // decrement ready queue priority
+
+//
+// All ready queues were scanned without finding a runnable thread so
+// default to the idle thread and set the appropriate bit in idle summary.
+//
+
+40: //
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+ la t0,KeThreadSwitchCounters // get switch counters address
+ lw v0,TwSwitchToIdle(t0) // increment switch to idle count
+ addu v0,v0,1 //
+ sw v0,TwSwitchToIdle(t0) //
+
+#endif
+
+#if defined(NT_UP)
+
+ li t0,1 // get current idle summary
+#else
+
+ lw t0,KiIdleSummary // get current idle summary
+ or t0,t0,t1 // set member bit in idle summary
+
+#endif
+
+ sw t0,KiIdleSummary // set new idle summary
+ b 120f //
+ lw s2,PbIdleThread(s0) // set address of idle thread
+
+//
+// If the thread can execute on the current processor, then remove it from
+// the dispatcher ready queue.
+//
+
+50: sll t5,t2,3 // compute ready listhead offset
+ addu t5,t5,t3 // compute ready queue address
+ lw t6,LsFlink(t5) // get address of first queue entry
+ subu s2,t6,ThWaitListEntry // compute address of thread object
+
+#if !defined(NT_UP)
+
+60: lw t7,ThAffinity(s2) // get thread affinity
+ lw t8,ThWaitTime(s2) // get time of thread ready
+ lbu t9,ThNextProcessor(s2) // get last processor number
+ and t7,t7,t1 // check for compatible thread affinity
+ bne zero,t7,70f // if ne, thread affinity compatible
+ subu t8,v1,t8 // compute length of wait
+ lw t6,LsFlink(t6) // get address of next entry
+ bne t5,t6,60b // if ne, not end of list
+ subu s2,t6,ThWaitListEntry // compute address of thread object
+ bne zero,t4,30b // if ne, more queues to scan
+ subu t2,t2,1 // decrement ready queue priority
+ b 40b //
+ nop // fill
+
+//
+// If the thread last ran on the current processor, the processor is the
+// ideal processor for the thread, the thread has been waiting for longer
+// than a quantum, ot its priority is greater than low realtime plus 9,
+// then select the thread. Otherwise, an attempt is made to find a more
+// appropriate candidate.
+//
+
+70: lbu a0,ThIdealProcessor(s2) // get ideal processor number
+ beq v0,t9,110f // if eq, last processor number match
+ sltu t7,t2,LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ beq v0,a0,100f // if eq, ideal processor number match
+ sltu t8,t8,READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ and t8,t8,t7 // check if priority and time match
+ beql zero,t8,110f // if eq, priority or time mismatch
+ sb v0,ThNextProcessor(s2) // set next processor number
+
+//
+// Search forward in the ready queue until the end of the list is reached
+// or a more appropriate thread is found.
+//
+
+ lw t7,LsFlink(t6) // get address of next entry
+80: beq t5,t7,100f // if eq, end of list
+ subu a1,t7,ThWaitListEntry // compute address of thread object
+ lw a2,ThAffinity(a1) // get thread affinity
+ lw t8,ThWaitTime(a1) // get time of thread ready
+ lbu t9,ThNextProcessor(a1) // get last processor number
+ lbu a0,ThIdealProcessor(a1) // get ideal processor number
+ and a2,a2,t1 // check for compatible thread affinity
+ subu t8,v1,t8 // compute length of wait
+ beq zero,a2,85f // if eq, thread affinity not compatible
+ sltu t8,t8,READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ beql v0,t9,90f // if eq, processor number match
+ move s2,a1 // set thread address
+ beql v0,a0,90f // if eq, processor number match
+ move s2,a1 // set thread address
+85: bne zero,t8,80b // if ne, wait time not exceeded
+ lw t7,LsFlink(t7) // get address of next entry
+ b 110f //
+ sb v0,ThNextProcessor(s2) // set next processor number
+
+90: move t6,t7 // set list entry address
+100: sb v0,ThNextProcessor(s2) // set next processor number
+ .set at
+ .set reorder
+
+110: //
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+ la v1,KeThreadSwitchCounters + TwFindIdeal// get counter address
+ lbu a0,ThIdealProcessor(s2) // get ideal processor number
+ lbu t9,ThLastprocessor(s2) // get last processor number
+ beq v0,a0,115f // if eq, processor number match
+ addu v1,v1,TwFindLast - TwFindIdeal // compute counter address
+ beq v0,t9,115f // if eq, processor number match
+ addu v1,v1,TwFindAny - TwFindLast // compute counter address
+115: lw v0,0(v1) // increment appropriate counter
+ addu v0,v0,1 //
+ sw v0,0(v1) //
+
+#endif
+
+#endif
+
+//
+// Remove the selected thread from the ready queue.
+//
+
+ lw t7,LsFlink(t6) // get list entry forward link
+ lw t8,LsBlink(t6) // get list entry backward link
+ li t1,1 // set bit for mask generation
+ sw t7,LsFlink(t8) // set forward link in previous entry
+ sw t8,LsBlink(t7) // set backward link in next entry
+ bne t7,t8,120f // if ne, list is not empty
+ sll t1,t1,t2 // compute ready summary set member
+ xor t1,t1,t0 // clear ready summary bit
+ sw t1,KiReadySummary //
+
+//
+// Swap context to the next thread.
+//
+
+ .set noreorder
+ .set noat
+120: jal SwapContext // swap context
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ .set at
+ .set reorder
+
+//
+// Lower IRQL, deallocate context frame, and return wait completion status.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. The register v0 contains the kernel APC pending state on return.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ ALTERNATE_ENTRY(KiSwapThreadExit)
+
+ lw s1,ThWaitStatus(s2) // get wait completion status
+ lbu a0,ThWaitIrql(s2) // get original wait IRQL
+ sltu v1,a0,APC_LEVEL // check if wait IRQL is zero
+ and v1,v1,v0 // check if IRQL and APC pending set
+ beq zero,v1,10f // if eq, IRQL or pending not set
+
+//
+// Lower IRQL to APC level and dispatch APC interrupt.
+//
+
+ .set noreorder
+ .set noat
+ li a0,APC_LEVEL // set new IRQL level
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ li t2,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t3,psr // get current PSR
+ mtc0 t2,psr
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ mfc0 t4,cause // get exception cause register
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ and t4,t4,DISPATCH_INTERRUPT // clear APC interrupt pending
+ mtc0 t4,cause //
+ mtc0 t3,psr // enable interrupts
+ .set at
+ .set reorder
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t1,PbApcBypassCount(t0) // increment the APC bypass count
+ addu t1,t1,1 //
+ sw t1,PbApcBypassCount(t0) // store result
+ move a0,zero // set previous mode to kernel
+ move a1,zero // set exception frame address
+ move a2,zero // set trap frame addresss
+ jal KiDeliverApc // deliver kernel mode APC
+ move a0,zero // set original wait IRQL
+
+//
+// Lower IRQL to wait level, set return status, restore registers, and
+// return.
+//
+
+ .set noreorder
+ .set noat
+10: lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ li t2,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t3,psr // get current PSR
+ mtc0 t2,psr
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ mtc0 t3,psr // enable interrupts
+ .set at
+ .set reorder
+
+ move v0,s1 // set return status
+ lw s0,ExIntS0(sp) // restore register s0 - s2
+ lw s1,ExIntS1(sp) //
+ lw s2,ExIntS2(sp) //
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiSwapThread
+
+ SBTTL("Dispatch Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at DISPATCH_LEVEL. Its function is to process the Deferred Procedure Call
+// (DPC) list, and then perform a context switch if a new thread has been
+// selected for execution on the processor.
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database unlocked. When a return to the caller finally occurs, the
+// IRQL remains at DISPATCH_LEVEL, and the dispatcher database is still
+// unlocked.
+//
+// N.B. On entry to this routine all integer registers and the volatile
+// floating registers have been saved.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to the base of a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiDispatchInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate context frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw s0,KiPcr + PcPrcb(zero) // get address of PRCB
+
+//
+// Process the deferred procedure call list.
+//
+
+PollDpcList: //
+
+ DISABLE_INTERRUPTS(s1) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t0,cause // get exception cause register
+ and t0,t0,APC_INTERRUPT // clear dispatch interrupt pending
+ mtc0 t0,cause // set exception cause register
+ .set at
+ .set reorder
+
+ addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,20f // if eq, DPC list is empty
+
+//
+// Switch to interrupt stack to process the DPC list.
+//
+
+ lw t1,KiPcr + PcInterruptStack(zero) // get interrupt stack address stack
+ subu t2,t1,ExceptionFrameLength // allocate exception frame
+ sw sp,ExIntS4(t2) // save old stack pointer
+ sw zero,ExIntRa(t2) // clear return address
+ sw t1,KiPcr + PcInitialStack(zero) // set initial stack address
+ subu t1,t1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t1,KiPcr + PcStackLimit(zero) //
+ move sp,t2 // set new stack pointer
+ sw sp,KiPcr + PcOnInterruptStack(zero) // set stack indicator
+ move v0,s1 // set previous PSR value
+ jal KiRetireDpcList // process the DPC list
+
+//
+// Switch back to previous stack and restore the initial stack limit.
+//
+
+ lw t1,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t2,ThInitialStack(t1) // get initial stack address
+ lw t3,ThStackLimit(t1) // get stack limit
+ lw sp,ExIntS4(sp) // restore stack pointer
+ sw t2,KiPcr + PcInitialStack(zero) // set initial stack address
+ sw t3,KiPcr + PcStackLimit(zero) // set stack limit
+ sw zero,KiPcr + PcOnInterruptStack(zero) // clear stack indicator
+
+20: ENABLE_INTERRUPTS(s1) // enable interrupts
+
+//
+// Check to determine if quantum end has occured.
+//
+// N.B. If a new thread is selected as a result of processing a quantum
+// end request, then the new thread is returned with the dispatcher
+// database locked. Otherwise, NULL is returned with the dispatcher
+// database unlocked.
+//
+
+ lw t0,KiPcr + PcQuantumEnd(zero) // get quantum end indicator
+ bne zero,t0,70f // if ne, quantum end request
+
+//
+// Check to determine if a new thread has been selected for execution on
+// this processor.
+//
+
+ lw s2,PbNextThread(s0) // get address of next thread object
+ beq zero,s2,50f // if eq, no new thread selected
+
+//
+// Disable interrupts and attempt to acquire the dispatcher database lock.
+//
+
+ lbu a0,KiSynchIrql // get new IRQL value
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+#if !defined(NT_UP)
+
+30: ll t0,KiDispatcherLock // get current lock value
+ move t1,s2 // set lock ownership value
+ bne zero,t0,60f // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,30b // if eq, store conditional failed
+
+#endif
+
+//
+// Raise IRQL to synchronization level.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+40: lw s1,KiPcr + PcCurrentThread(zero) // get current thread object address
+ lw s2,PbNextThread(s0) // get address of next thread object
+ sw zero,PbNextThread(s0) // clear address of next thread object
+
+//
+// Reready current thread for execution and swap context to the selected thread.
+//
+
+ move a0,s1 // set address of previous thread object
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+ jal KiReadyThread // reready thread for execution
+ jal SwapContext // swap context
+
+//
+// Restore saved registers, deallocate stack frame, and return.
+//
+
+50: lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+//
+// Enable interrupts and check DPC queue.
+//
+
+#if !defined(NT_UP)
+
+60: ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ j PollDpcList //
+
+#endif
+
+//
+// Process quantum end event.
+//
+// N.B. If the quantum end code returns a NULL value, then no next thread
+// has been selected for execution. Otherwise, a next thread has been
+// selected and the dispatcher databased is locked.
+//
+
+70: sw zero,KiPcr + PcQuantumEnd(zero) // clear quantum end indicator
+ jal KiQuantumEnd // process quantum end request
+ bne zero,v0,40b // if ne, next thread selected
+ lw ra,ExIntRa(sp) // get return address
+ addu sp,sp,ExceptionFrameLength // deallocate context frame
+ j ra // return
+
+ .end KiDispatchInterrupt
+
+ SBTTL("Swap Context to Next Thread")
+//++
+//
+// Routine Description:
+//
+// This routine is called to swap context from one thread to the next.
+//
+// Arguments:
+//
+// s0 - Address of Processor Control Block.
+// s1 - Address of previous thread object.
+// s2 - Address of next thread object.
+// sp - Pointer to a exception frame.
+//
+// Return value:
+//
+// v0 - Kernel APC pending.
+// s0 - Address of Processor Control Block.
+// s2 - Address of current thread object.
+//
+//--
+
+ NESTED_ENTRY(SwapContext, 0, zero)
+
+//
+// Set the thread state to running.
+//
+
+ li t0,Running // set thread state to running
+ sb t0,ThState(s2) //
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+
+10: ll t0,KiContextSwapLock // get current lock value
+ move t1,s2 // set ownership value
+ bne zero,t0,10b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,10b // if eq, store conditional failed
+ sw zero,KiDispatcherLock // set lock not owned
+
+#endif
+
+//
+// Save old thread nonvolatile context.
+//
+
+ sw ra,ExSwapReturn(sp) // save return address
+ sw s3,ExIntS3(sp) // save integer registers s3 - s8.
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+//
+// Accumlate the total time spent in a thread.
+//
+
+#if defined(PERF_DATA)
+
+ addu a0,sp,ExFltF20 // compute address of result
+ move a1,zero // set address of optional frequency
+ jal KeQueryPerformanceCounter // query performance counter
+ lw t0,ExFltF20(sp) // get current cycle count
+ lw t1,ExFltF20 + 4(sp) //
+ lw t2,PbStartCount(s0) // get starting cycle count
+ lw t3,PbStartCount + 4(s0) //
+ sw t0,PbStartCount(s0) // set starting cycle count
+ sw t1,PbStartCount + 4(s0) //
+ lw t4,EtPerformanceCountLow(s1) // get accumulated cycle count
+ lw t5,EtPerformanceCountHigh(s1) //
+ subu t6,t0,t2 // subtract low parts
+ subu t7,t1,t3 // subtract high parts
+ sltu v0,t0,t2 // generate borrow from high part
+ subu t7,t7,v0 // subtract borrow
+ addu t6,t6,t4 // add low parts
+ addu t7,t7,t5 // add high parts
+ sltu v0,t6,t4 // generate carry into high part
+ addu t7,t7,v0 // add carry
+ sw t6,EtPerformanceCountLow(s1) // set accumulated cycle count
+ sw t7,EtPerformanceCountHigh(s1) //
+
+#endif
+
+//
+// The following entry point is used to switch from the idle thread to
+// another thread.
+//
+
+ ALTERNATE_ENTRY(SwapFromIdle)
+
+#if DBG
+
+ lw t0,ThInitialStack(s1) // get initial stack address
+ lw t1,ThStackLimit(s1) // get stack limit
+ sltu t2,sp,t0 // stack within limits?
+ sltu t3,sp,t1 //
+ xor t3,t3,t2 //
+ bne zero,t3,5f // if ne, stack within limits
+ li a0,PANIC_STACK_SWITCH // set bug check code
+ move a1,t0 // set initial stack address
+ move a2,t1 // set stack limit
+ move a3,sp // set stack address
+ jal KeBugCheckEx // bug check
+
+#endif
+
+//
+// Get the old and new process object addresses.
+//
+
+5: lw s3,ThApcState + AsProcess(s2) // get new process address
+ lw s4,ThApcState + AsProcess(s1) // get old process address
+
+//
+// Save the processor state, swap stack pointers, and set the new stack
+// limits.
+//
+
+ .set noreorder
+ .set noat
+ mfc0 s7,psr // save current PSR
+ li t1,1 << PSR_CU1 // disable interrupts
+ mtc0 t1,psr // 3 cycle hazzard
+ lw t2,ThInitialStack(s2) // get new initial stack pointer
+ lw t3,ThStackLimit(s2) // get new stack limit
+ sw sp,ThKernelStack(s1) // save old kernel stack pointer
+ lw sp,ThKernelStack(s2) // get new kernel stack pointer
+ ld t1,ThTeb(s2) // get user TEB and TLS array addresses
+ sw t2,KiPcr + PcInitialStack(zero) // set initial stack pointer
+ sw t3,KiPcr + PcStackLimit(zero) // set stack limit
+ sd t1,KiPcr + PcTeb(zero) // set user TEB and TLS array addresses
+
+//
+// If the new process is not the same as the old process, then swap the
+// address space to the new process.
+//
+// N.B. The context swap lock cannot be dropped until all references to the
+// old process address space are complete. This includes any possible
+// TB Misses that could occur referencing the new address space while
+// still executing in the old address space.
+//
+// N.B. The process address space swap is executed with interrupts disabled.
+//
+
+#if defined(NT_UP)
+
+ beq s3,s4,20f // if eq, old and new process match
+
+#else
+
+ beql s3,s4,20f // if eq, old and new process match
+ sw zero,KiContextSwapLock // set spin lock not owned
+
+//
+// Update the processor set masks.
+//
+
+ lw t0,KiPcr + PcSetMember(zero) // get processor set member
+ lw t2,PrActiveProcessors(s3) // get new active processor set
+ lw t1,PrActiveProcessors(s4) // get old active processor set
+ or t2,t2,t0 // set processor member in set
+ xor t1,t1,t0 // clear processor member in set
+ sw t2,PrActiveProcessors(s3) // set new active processor set
+ sw t1,PrActiveProcessors(s4) // set old active processor set
+ sw zero,KiContextSwapLock // set spin lock not owned
+
+#endif
+
+ lw s5,PrDirectoryTableBase(s3) // get page directory PDE
+ lw s6,PrDirectoryTableBase + 4(s3) // get hyper space PDE
+ .set at
+ .set reorder
+
+//
+// Allocate a new process PID. If the new PID number is greater than the
+// number of PIDs supported on the host processor, then flush the entire
+// TB and reset the PID number ot zero.
+//
+
+ lw v1,KiPcr + PcCurrentPid(zero) // get current processor PID
+ lw t2,KeNumberProcessIds // get number of process id's
+ addu v1,v1,1 << ENTRYHI_PID // increment master system PID
+ sltu t2,v1,t2 // any more PIDs to allocate
+ bne zero,t2,10f // if ne, more PIDs to allocate
+
+//
+// Flush the random part of the TB.
+//
+
+ jal KiFlushRandomTb // flush random part of TB
+ move v1,zero // set next PID value
+
+//
+// Swap address space to the specified process.
+//
+
+10: sw v1,KiPcr + PcCurrentPid(zero) // set current processor PID
+ li t3,PDE_BASE // get virtual address of PDR
+ or t3,t3,v1 // merge process PID
+ li t4,PDR_ENTRY << INDEX_INDEX // set entry index for PDR
+
+ .set noreorder
+ .set noat
+ mtc0 t3,entryhi // set VPN2 and PID of TB entry
+ mtc0 s5,entrylo0 // set first PDE value
+ mtc0 s6,entrylo1 // set second PDE value
+ mtc0 t4,index // set index of PDR entry
+ nop // 1 cycle hazzard
+ tlbwi // write system PDR TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+//
+// If the new thread has a kernel mode APC pending, then request an APC
+// interrupt.
+//
+
+ .set noreorder
+ .set noat
+20: lbu v0,ThApcState + AsKernelApcPending(s2) // get kernel APC pending
+ mfc0 t3,cause // get cause register contents
+ sll t2,v0,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t3,t3,t2 // merge possible APC interrupt request
+ mtc0 t3,cause // write exception cause register
+ mtc0 s7,psr // set new PSR
+ .set at
+ .set reorder
+
+//
+// Update the number of context switches for the current processor and the
+// new thread and save the address of the new thread objhect in the PCR.
+//
+
+ lw t0,PbContextSwitches(s0) // increment processor context switches
+ addu t0,t0,1 //
+ sw t0,PbContextSwitches(s0) //
+ lw t1,ThContextSwitches(s2) // increment thread context switches
+ addu t1,t1,1 //
+ sw t1,ThContextSwitches(s2) //
+ sw s2,KiPcr + PcCurrentThread(zero) // set address of new thread
+
+//
+// Restore new thread nonvolatile context.
+//
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+ lw s3,ExIntS3(sp) // restore integer registers s3 - s8.
+ lw s4,ExIntS4(sp) //
+ lw s5,ExIntS5(sp) //
+ lw s6,ExIntS6(sp) //
+ lw s7,ExIntS7(sp) //
+ lw s8,ExIntS8(sp) //
+
+//
+// Set address of current thread object and return.
+//
+// N.B. The register s2 contains the address of the new thread on return.
+//
+
+ lw ra,ExSwapReturn(sp) // get return address
+ j ra // return
+
+ .end SwapContext
+
+ SBTTL("Swap Process")
+//++
+//
+// BOOLEAN
+// KiSwapProcess (
+// IN PKPROCESS NewProcess,
+// IN PKPROCESS OldProcess
+// )
+//
+// Routine Description:
+//
+// This function swaps the address space from one process to another by
+// assigning a new process id, if necessary, and loading the fixed entry
+// in the TB that maps the process page directory page.
+//
+// Arguments:
+//
+// NewProcess (a0) - Supplies a pointer to a control object of type process
+// which represents the new process that is switched to.
+//
+// OldProcess (a1) - Supplies a pointer to a control object of type process
+// which represents the old process that is switched from.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SpArg: .space 4 * 4 // argument register save area
+ .space 4 * 3 // fill for alignment
+SpRa: .space 4 // saved return address
+SpFrameLength: // length of stack frame
+SpA0: .space 4 // saved argument register a0
+
+ NESTED_ENTRY(KiSwapProcess, SpFrameLength, zero)
+
+ subu sp,sp,SpFrameLength // allocate stack frame
+ sw ra,SpRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Acquire the context swap lock, clear the processor set member in he old
+// process, set the processor member in the new process, and release the
+// context swap lock.
+//
+
+#if !defined(NT_UP)
+
+10: ll t0,KiContextSwapLock // get current lock value
+ move t1,a0 // set ownership value
+ bne zero,t0,10b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,10b // if eq, store conditional failed
+ lw t0,KiPcr + PcSetMember(zero) // get processor set member
+ lw t2,PrActiveProcessors(a0) // get new active processor set
+ lw t1,PrActiveProcessors(a1) // get old active processor set
+ or t2,t2,t0 // set processor member in set
+ xor t1,t1,t0 // clear processor member in set
+ sw t2,PrActiveProcessors(a0) // set new active processor set
+ sw t1,PrActiveProcessors(a1) // set old active processor set
+ sw zero,KiContextSwapLock // clear lock value
+
+#endif
+
+//
+// Allocate a new process PID. If the new PID number is greater than the
+// number of PIDs supported on the host processor, then flush the entire
+// TB and reset the PID number ot zero.
+//
+
+ lw v1,KiPcr + PcCurrentPid(zero) // get current processor PID
+ lw t2,KeNumberProcessIds // get number of process id's
+ addu v1,v1,1 << ENTRYHI_PID // increment master system PID
+ sltu t2,v1,t2 // any more PIDs to allocate
+ bne zero,t2,15f // if ne, more PIDs to allocate
+
+//
+// Flush the random part of the TB.
+//
+
+ sw a0,SpA0(sp) // save process object address
+ jal KiFlushRandomTb // flush random part of TB
+ lw a0,SpA0(sp) // restore process object address
+ move v1,zero // set next PID value
+
+//
+// Swap address space to the specified process.
+//
+
+15: sw v1,KiPcr + PcCurrentPid(zero) // set current processor PID
+ lw t1,PrDirectoryTableBase(a0) // get page directory PDE
+ lw t2,PrDirectoryTableBase + 4(a0) // get hyper space PDE
+ li t3,PDE_BASE // get virtual address of PDR
+ or t3,t3,v1 // merge process PID
+ li t4,PDR_ENTRY << INDEX_INDEX // set entry index for PDR
+
+ DISABLE_INTERRUPTS(t5) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mtc0 t3,entryhi // set VPN2 and PID of TB entry
+ mtc0 t1,entrylo0 // set first PDE value
+ mtc0 t2,entrylo1 // set second PDE value
+ mtc0 t4,index // set index of PDR entry
+ nop // 1 cycle hazzard
+ tlbwi // write system PDR TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t5) // enable interrupts
+
+ lw ra,SpRa(sp) // restore return address
+ addu sp,sp,SpFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiSwapProcess
diff --git a/private/ntos/ke/mips/x4mpipi.s b/private/ntos/ke/mips/x4mpipi.s
new file mode 100644
index 000000000..9b5a4bc7a
--- /dev/null
+++ b/private/ntos/ke/mips/x4mpipi.s
@@ -0,0 +1,451 @@
+// TITLE("Interprocessor Interrupt support routines")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// x4mpipi.s
+//
+// Abstract:
+//
+// This module implements the MIPS specific functions required to
+// support multiprocessor systems.
+//
+// Author:
+//
+// David N. Cutler (davec) 22-Apr-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Interprocess Interrupt Processing")
+//++
+//
+// VOID
+// KeIpiInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// );
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interprocessor interrupt.
+// It's function is to process all interprocess immediate and packet
+// requests.
+//
+// Arguments:
+//
+// TrapFrame (s8) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KeIpiInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Process all interprocessor requests.
+//
+
+ jal KiIpiProcessRequests // process requests
+ andi v1,v0,IPI_FREEZE // check if freeze is requested
+ beq zero,v1,10f // if eq, no freeze requested
+
+//
+// Save the floating state.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Freeze the execution of the current processor.
+//
+
+ move a0,s8 // set address of trap frame
+ move a1,sp // set address of exception frame
+ jal KiFreezeTargetExecution // freeze current processor execution
+
+//
+// Restore the volatile floating state.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+
+10: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate exception frame
+ j ra // return
+
+ .end KeIpiInterrupt
+
+ SBTTL("Processor Request")
+//++
+//
+// ULONG
+// KiIpiProcessRequests (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This routine processes interprocessor requests and returns a summary
+// of the requests that were processed.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The request summary is returned as the function value.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+PrS0: .space 4 // saved integer register s0
+PrS1: .space 4 // saved integer register s1
+ .space 4 // fill
+PrRa: .space 4 // saved return address
+PrFrameLength: // frame length
+
+ NESTED_ENTRY(KiIpiProcessRequests, PrFrameLength, zero)
+
+ subu sp,sp,PrFrameLength // allocate exception frame
+ sw s0,PrS0(sp) // save integer register s0
+
+#if NT_INST
+
+ sw s1,PrS1(sp) // save integer register s1
+
+#endif
+
+ sw ra,PrRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Read request summary and write a zero result interlocked.
+//
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+10: lld t1,PbRequestSummary(t0) // get request summary and entry address
+ move t2,zero // set zero value for store
+ scd t2,PbRequestSummary(t0) // zero request summary
+ beq zero,t2,10b // if eq, store conditional failed
+ dsra a0,t1,32 // shift entry address to low 32-bits
+ move s0,t1 // copy request summary
+
+//
+// Check for Packet ready.
+//
+// If a packet is ready, then get the address of the requested function
+// and call the function passing the address of the packet address as a
+// parameter.
+//
+
+ and t1,s0,IPI_PACKET_READY // check for packet ready
+ beq zero,t1,20f // if eq, packet not ready
+ lw t2,PbWorkerRoutine(a0) // get address of worker function
+ lw a1,PbCurrentPacket(a0) // get request parameters
+ lw a2,PbCurrentPacket + 4(a0) //
+ lw a3,PbCurrentPacket + 8(a0) //
+ jal t2 // call work routine
+
+#if NT_INST
+
+ lw s1,PbIpiCounts(t0) // get interrupt count structure
+ lw t1,IcPacket(s1) // increment number of packet requests
+ addu t1,t1,1 //
+ sw t1,IcPacket(s1) //
+
+#endif
+
+//
+// Check for APC interrupt request.
+//
+// If an APC interrupt is requested, then request a software interrupt at
+// APC level on the current processor.
+//
+
+20: and t1,s0,IPI_APC // check for APC interrupt request
+ beq zero,t1,25f // if eq, no APC interrupt requested
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,APC_INTERRUPT // set dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+#if NT_INST
+
+ lw t1,IcAPC(s1) // increment number of APC requests
+ addu t1,t1,1 //
+ sw t1,IcAPC(s1) //
+
+#endif
+
+//
+// Check for DPC interrupt request.
+//
+// If an DPC interrupt is requested, then request a software interrupt at
+// DPC level on the current processor.
+//
+
+25: and t1,s0,IPI_DPC // check for DPC interrupt request
+ beq zero,t1,30f // if eq, no DPC interrupt requested
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,DISPATCH_INTERRUPT // set dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+#if NT_INST
+
+ lw t1,IcDPC(s1) // increment number of DPC requests
+ addu t1,t1,1 //
+ sw t1,IcDPC(s1) //
+
+#endif
+
+//
+// Set function return value, restores registers, and return.
+//
+
+30: move v0,s0 // set funtion return value
+ lw s0,PrS0(sp) // restore integer register s0
+
+#if NT_INST
+
+ and t1,v0,IPI_FREEZE // check if freeze requested
+ beq zero,t1,40f // if eq, no freeze request
+ lw t1,IcFreeze(s1) // increment number of freeze requests
+ addu t1,t1,1 //
+ sw t1,IcFreeze(s1) //
+40: lw s1,PrS1(sp) // restore integer register s1
+
+#endif
+
+ lw ra,PrRa(sp) // restore return address
+ addu sp,sp,PrFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiIpiProcessRequests
+
+ SBTTL("Send Interprocess Request")
+//++
+//
+// VOID
+// KiIpiSend (
+// IN KAFINITY TargetProcessors,
+// IN KIPI_REQUEST IpiRequest
+// );
+//
+// Routine Description:
+//
+// This routine requests the specified operation on the target set of
+// processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// IpiRequest (a1) - Supplies the request operation mask.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSend)
+
+#if !defined(NT_UP)
+
+ move v0,a0 // copy target processor set
+ la v1,KiProcessorBlock // get processor block array address
+
+//
+// Loop through the target processors and send the request to the specified
+// recipients.
+//
+
+10: and t1,v0,1 // check if target bit set
+ srl v0,v0,1 // shift out target processor
+ beq zero,t1,30f // if eq, target not specified
+ lw t1,0(v1) // get target processor block address
+20: lld t3,PbRequestSummary(t1) // get request summary of target
+ or t3,t3,a1 // merge current request with summary
+ scd t3,PbRequestSummary(t1) // store request summary and entry address
+ beq zero,t3,20b // if eq, store conditional failed
+30: add v1,v1,4 // advance to next array element
+ bne zero,v0,10b // if ne, more targets requested
+ lw t0,__imp_HalRequestIpi // request IPI interrupt on targets
+ j t0 //
+#else
+
+ j ra // return
+
+#endif
+
+ .end KiIpiSend
+
+ SBTTL("Send Interprocess Request Packet")
+//++
+//
+// VOID
+// KiIpiSendPacket (
+// IN KAFINITY TargetProcessors,
+// IN PKIPI_WORKER WorkerFunction,
+// IN PVOID Parameter1,
+// IN PVOID Parameter2,
+// IN PVOID Parameter3
+// );
+//
+// Routine Description:
+//
+// This routine executes the specified worker function on the specified
+// set of processors.
+//
+// Arguments:
+//
+// TargetProcessors (a0) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// WorkerFunction (a1) - Supplies the address of the worker function.
+//
+// Parameter1 - Parameter3 (a2, a3, 4 * 4(sp)) - Supplies worker
+// function specific parameters.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSendPacket)
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcPrcb(zero) // get current processor block address
+ move v0,a0 // copy target processor set
+ la v1,KiProcessorBlock // get processor block array address
+
+//
+// Store function address and parameters in the packet area of the PRCB on
+// the current processor.
+//
+
+ lw t9,4 * 4(sp) // get parameter3 value
+ sw a0,PbTargetSet(t0) // set target processor set
+ sw a1,PbWorkerRoutine(t0) // set worker function address
+ sw a2,PbCurrentPacket(t0) // store worker function parameters
+ sw a3,PbCurrentPacket + 4(t0) //
+ sw t9,PbCurrentPacket + 8(t0) //
+
+//
+// Loop through the target processors and send the packet to the specified
+// recipients.
+//
+
+10: and t1,v0,1 // check if target bit set
+ srl v0,v0,1 // shift out target processor
+ beq zero,t1,30f // if eq, target not specified
+ lw t1,0(v1) // get target processor block address
+ dsll t3,t0,32 // shift entry address to upper 32-bits
+ or t3,t3,IPI_PACKET_READY // set packet ready in lower 32-bits
+20: lld t4,PbRequestSummary(t1) // get request summary of target
+ and t5,t4,IPI_PACKET_READY // check if target packet busy
+ or t4,t4,t3 // set entry address in request summary
+ bne zero,t5,20b // if ne, target packet busy
+ scd t4,PbRequestSummary(t1) // store request summary and entry address
+ beq zero,t4,20b // if eq, store conditional failed
+30: addu v1,v1,4 // advance to get array element
+ bne zero,v0,10b // if ne, more targets requested
+ lw t0,__imp_HalRequestIpi // request IPI interrupt on targets
+ j t0 //
+
+#else
+
+ j ra // return
+
+#endif
+
+ .end KiIpiSendPacket
+
+ SBTTL("Signal Packet Done")
+//++
+//
+// VOID
+// KeIpiSignalPacketDone (
+// IN PVOID SignalDone
+// );
+//
+// Routine Description:
+//
+// This routine signals that a processor has completed a packet by
+// clearing the calling processor's set member of the requesting
+// processor's packet.
+//
+// Arguments:
+//
+// SignalDone (a0) - Supplies a pointer to the processor block of the
+// sending processor.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSignalPacketDone)
+
+ lw a1,KiPcr + PcNotMember(zero) // get processor set member
+10: ll a2,PbTargetSet(a0) // get request target set
+ and a2,a2,a1 // clear processor set member
+ sc a2,PbTargetSet(a0) // store target set
+ beq zero,a2,10b // if eq, store conditional failed
+ j ra // return
+
+ .end KiIpiSignalPacketDone
diff --git a/private/ntos/ke/mips/x4sqrt.s b/private/ntos/ke/mips/x4sqrt.s
new file mode 100644
index 000000000..71061bdf8
--- /dev/null
+++ b/private/ntos/ke/mips/x4sqrt.s
@@ -0,0 +1,113 @@
+// TITLE("Square Root")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4sqrt.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the square root
+// of a denormalized value.
+//
+// Author:
+//
+// David N. Cutler (davec) 20-Apr-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Double Square Root")
+//++
+//
+// ULONG
+// KiSquareRootDouble (
+// IN PULONG DoubleValue
+// )
+//
+// Routine Description:
+//
+// This routine is called to compute the square root of a double
+// precision denormalized value.
+//
+// N.B. The denormalized value has been converted to a normalized
+// value with a exponent equal to the denormalization shift
+// count prior to calling this routine.
+//
+// Arguments:
+//
+// SingleValue (a0) - Supplies a pointer to the double denormalized
+// value.
+//
+// Return Value:
+//
+// The inexact bit is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSquareRootDouble)
+
+ ldc1 f0,0(a0) // get double value
+ cfc1 t0,fsr // get current floating status
+ and t0,t1,0x3 // isolate rounding mode
+ ctc1 t0,fsr // set current floating status
+ sqrt.d f0,f0 // compute single square root
+ cfc1 v0,fsr // get result floating status
+ srl v0,v0,2 // isolate inexact bit
+ and v0,v0,1 //
+ sdc1 f0,0(a0) // store result value
+ j ra //
+
+ .end KiSquareRootDouble
+
+ SBTTL("Single Square Root")
+//++
+//
+// ULONG
+// KiSquareRootSingle (
+// IN PULONG SingleValue
+// )
+//
+// Routine Description:
+//
+// This routine is called to compute the square root of a single
+// precision denormalized value.
+//
+// N.B. The denormalized value has been converted to a normalized
+// value with a exponent equal to the denormalization shift
+// count prior to calling this routine.
+//
+// Arguments:
+//
+// SingleValue (a0) - Supplies a pointer to the single denormalized
+// value.
+//
+// Return Value:
+//
+// The inexact bit is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSquareRootSingle)
+
+ lwc1 f0,0(a0) // get single value
+ cfc1 t0,fsr // get current floating status
+ and t0,t1,0x3 // isolate rounding mode
+ ctc1 t0,fsr // set current floating status
+ sqrt.s f0,f0 // compute single square root
+ cfc1 v0,fsr // get result floating status
+ srl v0,v0,2 // isolate inexact bit
+ and v0,v0,1 //
+ swc1 f0,0(a0) // store result value
+ j ra //
+
+ .end KiSquareRootSingle
diff --git a/private/ntos/ke/mips/x4start.s b/private/ntos/ke/mips/x4start.s
new file mode 100644
index 000000000..842e9056f
--- /dev/null
+++ b/private/ntos/ke/mips/x4start.s
@@ -0,0 +1,968 @@
+// TITLE("System Initialization")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4start.s
+//
+// Abstract:
+//
+// This module implements the code necessary to initially startup the
+// NT system.
+//
+// Author:
+//
+// David N. Cutler (davec) 5-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KdDebuggerEnabled 1
+ .extern KeNumberProcessIds 4
+ .extern KeNumberProcessors 1
+ .extern KeNumberTbEntries 4
+ .extern KiBarrierWait 4
+ .extern KiContextSwapLock 4
+ .extern KiDispatcherLock 4
+ .extern KiSynchIrql 4
+
+ SBTTL("System Initialization")
+//++
+//
+// Routine Description:
+//
+// This routine is called when the NT system begins execution.
+// Its function is to initialize system hardware state, call the
+// kernel initialization routine, and then fall into code that
+// represents the idle thread for all processors.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the loader parameter block.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+SsArgA0:.space 4 // process address argument (a0)
+SsArgA1:.space 4 // thread address argument (a1)
+SsArgA2:.space 4 // idle stack argument (a2)
+SsArgA3:.space 4 // processor block address argument (a3)
+SsPrNum:.space 4 // processor number argument
+SsLdPrm:.space 4 // loader parameter block address
+SsPte: .space 2 * 4 // Pte values
+ .space 4 // fill
+SsRa: .space 4 // saved return address
+
+SsFrameLength: // length of stack frame
+
+ NESTED_ENTRY_S(KiSystemBegin, SsFrameLength, zero, INIT)
+
+ subu sp,sp,SsFrameLength // allocate stack frame
+ sw ra,SsRa(sp) // save return address
+
+ PROLOGUE_END
+
+ ALTERNATE_ENTRY(KiInitializeSystem)
+
+ lw sp,LpbKernelStack(a0) // get address of idle thread stack
+ subu sp,sp,SsFrameLength // allocate stack frame
+ lw gp,LpbGpBase(a0) // get global pointer base address
+ sw zero,SsRa(sp) // zero return address
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+//
+// Get page frame numbers for the PCR and PDR pages that were allocated by
+// the OS loader.
+//
+
+ lw s0,LpbPdrPage(a0) // set PDR page number
+ lw s1,LpbPcrPage(a0) // set PCR page number
+ move s2,a0 // save loader parameter block address
+ lw s3,LpbPrcb(s2) // get processor block address
+ lbu s3,PbNumber(s3) // get processor number
+ lw s6,LpbPcrPage2(a0) // set second PCR page
+
+//
+// Initialize the configuration, context, page mask, watch, and wired
+// registers.
+//
+// N.B. The base virtual address of the page table pages is left shift by
+// one because of the way VPN2 in inserted into the context register
+// when a TB miss occurs. The TB miss routine right arithmetic shifts
+// the address by one to obtain the real virtual address. Note that it
+// is assumed that bits <31:30> of PTE_BASE are set.
+//
+
+ li t0,PTE_BASE << 1 // set base virtual page table address
+ li t1,FIXED_ENTRIES // set number of fixed TB entries
+ li t2,0xf000 // set frame mask register value
+
+ .set noreorder
+ .set noat
+ mfc0 s7,config // get processor configuration
+ mfc0 s8,prid // get processor id
+ mtc0 t0,context // initialize the context register
+ mtc0 zero,pagemask // initialize the page mask register
+ mtc0 zero,taglo // initialize the tag low register
+ mtc0 zero,watchlo // initialize the watch address register
+ mtc0 zero,watchhi //
+ mtc0 t1,wired // initialize the wired register
+ and s4,s7,0x7 // isolate KSEG0 cache policy
+ and t3,s8,0xff00 // isolate processor id
+ xor t3,t3,0x900 // check if r10000 processor
+ bne zero,t3,5f // if ne, not r10000 processor
+ sll s5,s4,ENTRYLO_C // shift cache policy into position
+ mtc0 t2,framemask // set frame mask register
+ .set at
+ .set reorder
+
+//
+// Clear the translation buffer.
+//
+
+5: bne zero,s3,20f // if ne, not processor zero
+ li t0,48 // set number of TB entries for r4x00
+ and t1,s8,0xff00 // isolate processor id
+ xor t1,t1,0x900 // check if r10000 processor
+ bne zero,t1,10f // if ne, not r10000 processor
+ li t0,64 // set number of TB entries for r10000
+10: sw t0,KeNumberTbEntries // store number of TB entries
+ li t0,256 // set number of process id's
+ sw t0,KeNumberProcessIds //
+20: jal KiFlushFixedTb // flush fixed TB entries
+ jal KiFlushRandomTb // flush random TB entries
+
+//
+// Initialize fixed entries that map the PCR into system and user space.
+//
+
+ sll t0,s6,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t0,t0,1 << ENTRYLO_V //
+ or t0,t0,1 << ENTRYLO_D //
+ or t0,t0,s5 //
+ sll t1,s1,ENTRYLO_PFN // shift PFN into position
+ or t1,t1,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t1,t1,1 << ENTRYLO_V //
+ or t1,t1,1 << ENTRYLO_D //
+ or t1,t1,s5 //
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,KiPcr & ~(1 << PAGE_SHIFT) // set virtual address/2 of PCR
+ li a2,PCR_ENTRY // set index of system PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+
+ sll t0,s6,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_G // Set G, V, D, and the cache policy
+ or t0,t0,1 << ENTRYLO_V //
+ or t0,t0,s5 //
+ sll t1,s1,ENTRYLO_PFN // shift PFN into position
+ or t1,t1,1 << ENTRYLO_G // set G, V, and cache policy
+ or t1,t1,1 << ENTRYLO_V //
+ or t1,t1,s5 //
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,UsPcr & ~(1 << PAGE_SHIFT) // set virtual address/2 of PCR
+ li a2,PCR_ENTRY + 1 // set index of user PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+
+//
+// Set the cache policy for cached memory.
+//
+
+ li t1,KiPcr // get PCR address
+ sw s4,PcCachePolicy(t1) // set cache policy for cached memory
+ sw s5,PcAlignedCachePolicy(t1) //
+
+//
+// Set the first level data and instruction cache fill size and size.
+//
+
+ lw t2,LpbFirstLevelDcacheSize(s2) //
+ sw t2,PcFirstLevelDcacheSize(t1) //
+ lw t2,LpbFirstLevelDcacheFillSize(s2) //
+ sw t2,PcFirstLevelDcacheFillSize(t1) //
+ lw t2,LpbFirstLevelIcacheSize(s2) //
+ sw t2,PcFirstLevelIcacheSize(t1) //
+ lw t2,LpbFirstLevelIcacheFillSize(s2) //
+ sw t2,PcFirstLevelIcacheFillSize(t1) //
+
+//
+// Set the second level data and instruction cache fill size and size.
+//
+
+ lw t2,LpbSecondLevelDcacheSize(s2) //
+ sw t2,PcSecondLevelDcacheSize(t1) //
+ lw t2,LpbSecondLevelDcacheFillSize(s2) //
+ sw t2,PcSecondLevelDcacheFillSize(t1) //
+ lw t2,LpbSecondLevelIcacheSize(s2) //
+ sw t2,PcSecondLevelIcacheSize(t1) //
+ lw t2,LpbSecondLevelIcacheFillSize(s2) //
+ sw t2,PcSecondLevelIcacheFillSize(t1) //
+
+//
+// Set the data cache fill size and alignment values.
+//
+
+ lw t2,PcSecondLevelDcacheSize(t1) // get second level dcache size
+ lw t3,PcSecondLevelDcacheFillSize(t1) // get second level fill size
+ bne zero,t2,30f // if ne, second level cache present
+ lw t3,PcFirstLevelDcacheFillSize(t1) // get first level fill size
+30: subu t4,t3,1 // compute dcache alignment value
+ sw t3,PcDcacheFillSize(t1) // set dcache fill size
+ sw t4,PcDcacheAlignment(t1) // set dcache alignment value
+
+//
+// Set the instruction cache fill size and alignment values.
+//
+
+ lw t2,PcSecondLevelIcacheSize(t1) // get second level icache size
+ lw t3,PcSecondLevelIcacheFillSize(t1) // get second level fill size
+ bne zero,t2,40f // if ne, second level cache present
+ lw t3,PcFirstLevelIcacheFillSize(t1) // get first level fill size
+40: subu t4,t3,1 // compute icache alignment value
+ sw t3,PcIcacheFillSize(t1) // set icache fill size
+ sw t4,PcIcacheAlignment(t1) // set icache alignment value
+
+//
+// Sweep the data and instruction caches.
+//
+
+ jal HalSweepIcache // sweep the instruction cache
+ jal HalSweepDcache // sweep the data cache
+
+//
+// Initialize the fixed entries that map the PDR pages.
+//
+
+ sll t0,s0,ENTRYLO_PFN // shift PFN into position
+ or t0,t0,1 << ENTRYLO_V // set V, D, and cache policy
+ or t0,t0,1 << ENTRYLO_D //
+ or t0,t0,s5 //
+ addu t1,t0,1 << ENTRYLO_PFN // compute PTE for second PDR page
+ sw t0,SsPte(sp) // set first PTE value
+ sw t1,SsPte + 4(sp) // set second PTE value
+ addu a0,sp,SsPte // compute address of PTE values
+ li a1,PDE_BASE // set system virtual address/2 of PDR
+ li a2,PDR_ENTRY // set index of system PCR entry
+ jal KeFillFixedEntryTb // fill fixed TB entry
+ li t2,PDE_BASE // set virtual address of PDR
+ lw t0,SsPte(sp) // get first PTE value
+ lw t1,SsPte + 4(sp) // get second PTE value
+ sw t0,((PDE_BASE >> (PDI_SHIFT - 2)) & 0xffc)(t2) // set recursive PDE
+ sw t1,((PDE_BASE >> (PDI_SHIFT - 2)) & 0xffc) + 4(t2) // set hyper PDE
+
+//
+// Initialize the Processor Control Registers (PCR).
+//
+
+ li t1,KiPcr // get PCR address
+
+//
+// Initialize the minor and major version numbers.
+//
+
+ li t2,PCR_MINOR_VERSION // set minor version number
+ sh t2,PcMinorVersion(t1) //
+ li t2,PCR_MAJOR_VERSION // set major version number
+ sh t2,PcMajorVersion(t1) //
+
+//
+// Set address of processor block.
+//
+
+ lw t2,LpbPrcb(s2) // set processor block address
+ sw t2,PcPrcb(t1) //
+
+//
+// Initialize the routine addresses in the exception dispatch table.
+//
+
+ la t2,KiInvalidException // set address of invalid exception
+ li t3,XCODE_VECTOR_LENGTH // set length of dispatch vector
+ la t4,PcXcodeDispatch(t1) // compute address of dispatch vector
+50: sw t2,0(t4) // fill dispatch vector
+ subu t3,t3,1 // decrement number of entries
+ addu t4,t4,4 // advance to next vector entry
+ bgtz t3,50b // if gtz, more to fill
+
+ la t2,KiInterruptException // Initialize exception dispatch table
+ sw t2,PcXcodeDispatch + XCODE_INTERRUPT(t1) //
+ la t2,KiModifyException //
+ sw t2,PcXcodeDispatch + XCODE_MODIFY(t1) //
+ la t2,KiReadMissException // set read miss address for r4x00
+ and t3,s8,0xff00 // isolate processor id
+ xor t3,t3,0x900 // check if r10000 processor
+ bne zero,t3,55f // if ne, not r10000 processor
+ la t2,KiReadMissException9.x // set read miss address for r10000
+55: sw t2,PcXcodeDispatch + XCODE_READ_MISS(t1) //
+ la t2,KiWriteMissException //
+ sw t2,PcXcodeDispatch + XCODE_WRITE_MISS(t1) //
+ la t2,KiReadAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_READ_ADDRESS_ERROR(t1) //
+ la t2,KiWriteAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_WRITE_ADDRESS_ERROR(t1) //
+ la t2,KiInstructionBusErrorException //
+ sw t2,PcXcodeDispatch + XCODE_INSTRUCTION_BUS_ERROR(t1) //
+ la t2,KiDataBusErrorException //
+ sw t2,PcXcodeDispatch + XCODE_DATA_BUS_ERROR(t1) //
+ la t2,KiSystemServiceException //
+ sw t2,PcXcodeDispatch + XCODE_SYSTEM_CALL(t1) //
+ la t2,KiBreakpointException //
+ sw t2,PcXcodeDispatch + XCODE_BREAKPOINT(t1) //
+ la t2,KiIllegalInstructionException //
+ sw t2,PcXcodeDispatch + XCODE_ILLEGAL_INSTRUCTION(t1) //
+ la t2,KiCoprocessorUnusableException //
+ sw t2,PcXcodeDispatch + XCODE_COPROCESSOR_UNUSABLE(t1) //
+ la t2,KiIntegerOverflowException //
+ sw t2,PcXcodeDispatch + XCODE_INTEGER_OVERFLOW(t1) //
+ la t2,KiTrapException //
+ sw t2,PcXcodeDispatch + XCODE_TRAP(t1) //
+ la t2,KiInstructionCoherencyException //
+ sw t2,PcXcodeDispatch + XCODE_VIRTUAL_INSTRUCTION(t1) //
+ la t2,KiFloatingException //
+ sw t2,PcXcodeDispatch + XCODE_FLOATING_EXCEPTION(t1) //
+ la t2,KiUserAddressErrorException //
+ sw t2,PcXcodeDispatch + XCODE_INVALID_USER_ADDRESS(t1)
+ la t2,KiPanicException //
+ sw t2,PcXcodeDispatch + XCODE_PANIC(t1) //
+ la t2,KiDataCoherencyException //
+ sw t2,PcXcodeDispatch + XCODE_VIRTUAL_DATA(t1) //
+
+//
+// Initialize the addresses of various data structures that are referenced
+// from the exception and interrupt handling code.
+//
+// N.B. The panic stack is a separate stack that is used when the current
+// kernel stack overlfows.
+//
+// N.B. The interrupt stack is a separate stack and is used to process all
+// interrupts that run at IRQL 3 and above.
+//
+
+ lw t2,LpbKernelStack(s2) // set initial stack address
+ sw t2,PcInitialStack(t1) //
+ lw t2,LpbPanicStack(s2) // set panic stack address
+ sw t2,PcPanicStack(t1) //
+ lw t2,LpbInterruptStack(s2) // set interrupt stack address
+ sw t2,PcInterruptStack(t1) //
+ sw gp,PcSystemGp(t1) // set system global pointer address
+ lw t2,LpbThread(s2) // set current thread address
+ sw t2,PcCurrentThread(t1) //
+
+//
+// Set current IRQL to highest value.
+//
+
+ li t2,HIGH_LEVEL // set current IRQL
+ sb t2,PcCurrentIrql(t1) //
+
+//
+// Set processor id and configuration.
+//
+
+ sw s7,PcSystemReserved(t1) // save processor configuration
+ sw s8,PcProcessorId(t1) // save processor id
+
+//
+// Clear floating status and zero the count and compare registers.
+//
+
+ .set noreorder
+ .set noat
+ ctc1 zero,fsr // clear floating status
+ mtc0 zero,count // initialize the count register
+ mtc0 zero,compare // initialize the compare register
+ .set at
+ .set reorder
+
+//
+// Set system dispatch address limits used by get and set context.
+//
+
+ la t2,KiSystemServiceDispatchStart // set starting address of range
+ sw t2,PcSystemServiceDispatchStart(t1) //
+ la t2,KiSystemServiceDispatchEnd // set ending address of range
+ sw t2,PcSystemServiceDispatchEnd(t1) //
+
+//
+// Copy the TB miss, XTB miss, cache parity, and general exception handlers to
+// low memory.
+//
+
+ bne zero,s3,100f // if ne, not processor zero
+
+//
+// Copy TB Miss Handler.
+//
+
+ la t2,KiTbMissStartAddress2.x // get user TB miss start address
+ la t3,KiTbMissEndAddress3.x // get user TB miss end address
+ and a0,s8,0xfff0 // isolate id and major chip version
+ xor a0,a0,0x420 // test if id 4 and version 2.0 chip
+ beq zero,a0,60f // if eq, version 2.0 chip
+ la t2,KiTbMissStartAddress3.x // get user TB miss start address
+ and a0,s8,0xff00 // isolate processor id
+ xor a0,a0,0x900 // check if r10000 processor
+ bne zero,a0,60f // if ne, not r10000 processor
+ la t2,KiTbMissStartAddress9.x // get user TB miss start address
+ la t3,KiTbMissEndAddress9.x // get user TB miss end address
+60: li t4,KSEG0_BASE // get copy address
+70: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,70b // if ne, more to copy
+
+//
+// Copy XTB Miss Handler.
+//
+
+ la t2,KiXTbMissStartAddress2.x // get user TB miss start address
+ la t3,KiXTbMissEndAddress3.x // get user TB miss end address
+ and a0,s8,0xfff0 // isolate id and major chip version
+ xor a0,a0,0x420 // test if id 4 and version 2.0 chip
+ beq zero,a0,73f // if eq, version 2.0 chip
+ la t2,KiXTbMissStartAddress3.x // get user TB miss start address
+ and a0,s8,0xff00 // isolate processor id
+ xor a0,a0,0x900 // check if r10000 processor
+ bne zero,a0,73f // if ne, not r10000 processor
+ la t2,KiXTbMissStartAddress9.x // get user TB miss start address
+ la t3,KiXTbMissEndAddress9.x // get user TB miss end address
+73: li t4,KSEG0_BASE + 0x80 // get copy address
+77: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,77b // if ne, more to copy
+
+//
+// Copy Cache Error Handler.
+//
+
+ la t2,KiCacheErrorStartAddress // get cache error start address
+ la t3,KiCacheErrorEndAddress // get cache error end address
+ li t4,KSEG1_BASE + 0x100 // get copy address
+80: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,80b // if ne, more to copy
+
+//
+// Copy General Exception Handler.
+//
+
+ la t2,KiGeneralExceptionStartAddress // get general exception start address
+ la t3,KiGeneralExceptionEndAddress // get general exception end address
+ li t4,KSEG0_BASE + 0x180 // get copy address
+90: lw t5,0(t2) // copy code to low memory
+ sw t5,0(t4) //
+ addu t2,t2,4 // advance copy pointers
+ addu t4,t4,4 //
+ bne t2,t3,90b // if ne, more to copy
+
+//
+// Set the default cache error routine address.
+//
+
+ la t0,SOFT_RESET_VECTOR // get soft reset vector address
+ la t1,CACHE_ERROR_VECTOR // get cache error vector address
+ sw t0,0(t1) // set default cache error routine
+
+//
+// Sweep the data and instruction caches.
+//
+
+100: jal HalSweepIcache // sweep the instruction cache
+ jal HalSweepDcache // sweep the data cache
+
+// ****** temp ******
+//
+// Setup watch registers to catch write to location 0.
+//
+// ****** temp ******
+
+// .set noreorder
+// .set noat
+// li t0,1 // set to watch writes to location 0
+// mtc0 t0,watchlo //
+// mtc0 zero,watchhi //
+// .set at
+// .set reorder
+
+//
+// Setup arguments and call kernel initialization routine.
+//
+
+ lw s0,LpbProcess(s2) // get idle process address
+ lw s1,LpbThread(s2) // get idle thread address
+ move a0,s0 // set idle process address
+ move a1,s1 // set idle thread address
+ lw a2,LpbKernelStack(s2) // set idle thread stack address
+ lw a3,LpbPrcb(s2) // get processor block address
+ sw s3,SsPrNum(sp) // set processor number
+ sw s2,SsLdPrm(sp) // set loader parameter block address
+ jal KiInitializeKernel // initialize system data structures
+
+//
+// Control is returned to the idle thread with IRQL at HIGH_LEVEL. Lower IRQL
+// to DISPATCH_LEVEL, set wait IRQL of idle thread, load global register values,
+// and enter idle loop.
+//
+
+ move s7,s3 // set processor number
+ lw s0,KiPcr + PcPrcb(zero) // get processor control block address
+ addu s3,s0,PbDpcListHead // compute DPC listhead address
+ li a0,DISPATCH_LEVEL // get dispatch level IRQL
+ sb a0,ThWaitIrql(s1) // set wait IRQL of idle thread
+ jal KeLowerIrql // lower IRQL
+
+ DISABLE_INTERRUPTS(s8) // disable interrupts
+
+ or s8,s8,1 << PSR_IE // set interrupt enable bit set
+ subu s6,s8,1 << PSR_IE // clear interrupt enable bit
+
+ ENABLE_INTERRUPTS(s8) // enable interrupts
+
+ move s4,zero // clear breakin loop counter
+ lbu a0,KiSynchIrql // get new IRQL value
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ and s5,s8,t1 // clear current interrupt enables
+ or s5,s5,t0 // set new interrupt enables
+
+//
+// In a multiprocessor system the boot processor proceeds directly into
+// the idle loop. As other processors start executing, however, they do
+// not directly enter the idle loop and spin until all processors have
+// been started and the boot master allows them to proceed.
+//
+
+#if !defined(NT_UP)
+
+110: lw t0,KiBarrierWait // get the current barrier wait value
+ bne zero,t0,110b // if ne, spin until allowed to proceed
+ lbu t1,KiPcr + PcNumber(zero) // get current processor number
+ beq zero,t1,120f // if eq, processor zero
+ jal HalAllProcessorsStarted // perform platform specific operations
+ bne zero,v0,120f // if ne, initialization succeeded
+ li a0,HAL1_INITIALIZATION_FAILED // set bug check reason
+ jal KeBugCheck // bug check
+
+#endif
+
+//
+// Allocate an exception frame and store the nonvolatile register and
+// return address in the frame so when a context switch from the idle
+// thread to another thread occurs, context does not have to be saved
+// and the special swtich from idle entry pointer in the context swap
+// code can be called.
+//
+// Registers s0 - s8 have the following contents:
+//
+// s0 - Address of the current processor block.
+// s1 - Not used.
+// s2 - Not used.
+// s3 - Address of DPC listhead for current processor.
+// s4 - Debugger breakin poll counter.
+// s5 - Saved PSR with interrupt enabled and IRQL of synchronization level.
+// s6 - Saved PSR with interrupts disabled and an IRQL of DISPATCH_LEVEL.
+// s7 - Number of the current processor.
+// s8 - Saved PSR with interrupt enabled and IRQL of DISPATCH_LEVEL.
+//
+
+120: subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw s3,ExIntS3(sp) // save register s3 - s8
+ sw s4,ExIntS4(sp) //
+ sw s5,ExIntS5(sp) //
+ sw s6,ExIntS6(sp) //
+ sw s7,ExIntS7(sp) //
+ sw s8,ExIntS8(sp) //
+ la ra,KiIdleLoop // set address of swap return
+ sw ra,ExSwapReturn(sp) //
+ j KiIdleLoop //
+
+ .end KiSystemBegin
+
+//
+// The following code represents the idle thread for a processor. The idle
+// thread executes at IRQL DISPATCH_LEVEL and continually polls for work to
+// do. Control may be given to this loop either as a result of a return from
+// the system initialize routine or as the result of starting up another
+// processor in a multiprocessor configuration.
+//
+
+ LEAF_ENTRY(KiIdleLoop)
+
+#if DBG
+
+ move s4,zero // clear breakin loop counter
+
+#endif
+
+//
+// Lower IRQL to DISPATCH_LEVEL and enable interrupts.
+//
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ li a0,DISPATCH_LEVEL // get new IRQL value
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(s8) // enable interrupts
+
+//
+// Check if the debugger is enabled, the current processor is zero, and
+// whether it is time to poll for a debugger breakin.
+//
+
+KiIdleTop: //
+
+#if DBG
+
+#if !defined(NT_UP)
+
+ bne zero,s7,CheckDpcList // if ne, not processor zero
+
+#endif
+
+ subu s4,s4,1 // decrement poll counter
+ bgtz s4,CheckDpcList // if gtz, then not time to poll
+ lbu t0,KdDebuggerEnabled // check if debugger is enabled
+ li s4,200 * 1000 // set breakin loop counter
+ beq zero,t0,CheckDpcList // if eq, debugger not enabled
+ jal KdPollBreakIn // check if breakin is requested
+ beq zero,v0,CheckDpcList // if eq, no breakin requested
+ li a0,DBG_STATUS_CONTROL_C // break in and send
+ jal DbgBreakPointWithStatus // status to the debugger
+
+#endif
+
+//
+// Enable interrupts to allow any outstanding interrupts to occur, then
+// disable interrupts and check if there is any work in the DPC list of
+// the current processor.
+//
+
+CheckDpcList: //
+
+//
+// N.B. The following code enables interrupts for a few cycles, then
+// disables them again for the subsequent DPC and next thread
+// checks.
+//
+
+ .set noreorder
+ .set noat
+ mtc0 s8,psr // enable interrupts
+ nop //
+ nop //
+ nop //
+ nop // allow interrupts to occur
+ nop //
+ mtc0 s6,psr // disable interrupts
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+//
+// Process the deferred procedure call list for the current processor.
+//
+
+ lw a0,LsFlink(s3) // get address of next entry
+ beq a0,s3,CheckNextThread // if eq, DPC list is empty
+
+ .set noreorder
+ .set noat
+ mfc0 t0,cause // get exception cause register
+ and t0,t0,APC_INTERRUPT // clear dispatch interrupt pending
+ mtc0 t0,cause // set exception cause register
+ .set at
+ .set reorder
+
+ move v0,s8 // set previous PSR value
+ jal KiRetireDpcList // process the DPC list
+
+#if DBG
+
+ move s4,zero // clear breakin loop counter
+
+#endif
+
+//
+// Check if a thread has been selected to run on the current processor.
+//
+
+CheckNextThread: //
+ lw s2,PbNextThread(s0) // get address of next thread object
+ beq zero,s2,20f // if eq, no thread selected
+
+//
+// A thread has been selected for execution on this processor. Acquire
+// dispatcher database lock, get the thread address again (it may have
+// changed), clear the address of the next thread in the processor block,
+// and call swap context to start execution of the selected thread.
+//
+// N.B. If the dispatcher database lock cannot be obtained immediately,
+// then attempt to process another DPC rather than spinning on the
+// dispatcher database lock.
+//
+
+ lbu a0,KiSynchIrql // get new IRQL value
+
+#if !defined(NT_UP)
+
+10: ll t0,KiDispatcherLock // get current lock value
+ move t1,s2 // set lock ownership value
+ bne zero,t0,CheckDpcList // if ne, spin lock owned
+ sc t1,KiDispatcherLock // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+//
+// Raise IRQL to synchronization level and enable interrupts.
+//
+
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(s5) // enable interrupts
+
+ lw s1,PbCurrentThread(s0) // get address of current thread
+ lw s2,PbNextThread(s0) // get address of next thread object
+ sw zero,PbNextThread(s0) // clear next thread address
+ sw s2,PbCurrentThread(s0) // set address of current thread object
+
+//
+// Set the thread state to running.
+//
+
+ li t0,Running // set thread state to running
+ sb t0,ThState(s2) //
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock. In
+// this case the old process is the system process, but the context swap
+// code releases the context swap lock so it must be acquired.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+
+15: ll t0,KiContextSwapLock // get current lock value
+ move t1,s2 // set ownership value
+ bne zero,t0,15b // if ne, lock already owned
+ sc t1,KiContextSwapLock // set lock ownership value
+ beq zero,t1,15b // if eq, store conditional failed
+ sw zero,KiDispatcherLock // set lock not owned
+
+#endif
+
+ j SwapFromIdle // swap context to new thread
+
+//
+// There are no entries in the DPC list and a thread has not been selected
+// for excuttion on this processor. Call the HAL so power managment can be
+// performed.
+//
+// N.B. The HAL is called with interrupts disabled. The HAL will return
+// with interrupts enabled.
+//
+
+20: la ra,KiIdleTop // set return address
+ j HalProcessorIdle // notify HAL of idle state
+
+ .end KiIdleLoop
+
+ SBTTL("Retire Deferred Procedure Call List")
+//++
+//
+// Routine Description:
+//
+// This routine is called to retire the specified deferred procedure
+// call list. DPC routines are called using the idle thread (current)
+// stack.
+//
+// N.B. Interrupts must be disabled on entry to this routine. Control
+// is returned to the caller with the same conditions true.
+//
+// Arguments:
+//
+// v0 - Previous PSR value.
+// s0 - Address of the current PRCB.
+//
+// Return value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+DpRa: .space 4 // return address
+ .space 4 // fill
+
+#if DBG
+
+DpStart:.space 4 // DPC start time in ticks
+DpFunct:.space 4 // DPC function address
+DpCount:.space 4 // interrupt count at start of DPC
+DpTime: .space 4 // interrupt time at start of DPC
+
+#endif
+
+DpcFrameLength: // DPC frame length
+
+ NESTED_ENTRY(KiRetireDpcList, DpcFrameLength, zero)
+
+ subu sp,sp,DpcFrameLength // allocate stack frame
+ sw ra,DpRa(sp) // save return address
+
+ PROLOGUE_END
+
+5: sw sp,PbDpcRoutineActive(s0) // set DPC routine active
+ sw sp,KiPcr + PcDpcRoutineActive(zero) //
+
+//
+// Process the DPC list.
+//
+
+10: addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,60f // if eq, DPC list is empty
+
+#if !defined(NT_UP)
+
+20: ll t1,PbDpcLock(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,20b // if ne, spin lock owned
+ sc t2,PbDpcLock(s0) // set spin lock owned
+ beq zero,t2,20b // if eq, store conditional failed
+ lw a0,LsFlink(a1) // get address of next entry
+ beq a0,a1,50f // if eq, DPC list is empty
+
+#endif
+
+ lw t1,LsFlink(a0) // get address of next entry
+ subu a0,a0,DpDpcListEntry // compute address of DPC Object
+ sw t1,LsFlink(a1) // set address of next in header
+ sw a1,LsBlink(t1) // set address of previous in next
+ lw a1,DpDeferredContext(a0) // get deferred context argument
+ lw a2,DpSystemArgument1(a0) // get first system argument
+ lw a3,DpSystemArgument2(a0) // get second system argument
+ lw t1,DpDeferredRoutine(a0) // get deferred routine address
+ sw zero,DpLock(a0) // clear DPC inserted state
+ lw t2,PbDpcQueueDepth(s0) // decrement the DPC queue depth
+ subu t2,t2,1 //
+ sw t2,PbDpcQueueDepth(s0) //
+
+#if !defined(NT_UP)
+
+ sw zero,PbDpcLock(s0) // set spin lock not owned
+
+#endif
+
+ ENABLE_INTERRUPTS(v0) // enable interrupts
+
+#if DBG
+
+ sw t1,DpFunct(sp) // save DPC function address
+ lw t2,KeTickCount // save current tick count
+ sw t2,DpStart(sp) //
+ lw t3,PbInterruptCount(s0) // get current interrupt count
+ lw t4,PbInterruptTime(s0) // get current interrupt time
+ sw t3,DpCount(sp) // save interrupt count at start of DPC
+ sw t4,DpTime(sp) // save interrupt time at start of DPC
+
+#endif
+
+ jal t1 // call DPC routine
+
+#if DBG
+
+ lbu t0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ sltu t1,t0,DISPATCH_LEVEL // check if less than dispatch level
+ beq zero,t1,30f // if eq, not less than dispatch level
+ lw t1,DpFunct(sp) // get DPC function address
+ jal DbgBreakPoint // execute debug breakpoint
+30: lw t0,KeTickCount // get current tick count
+ lw t1,DpStart(sp) // get starting tick count
+ lw t2,DpFunct(sp) // get DPC function address
+ subu t3,t0,t1 // compute time in DPC function
+ sltu t3,t3,100 // check if less than one second
+ bne zero,t3,40f // if ne, less than one second
+ lw t3,PbInterruptCount(s0) // get current interrupt count
+ lw t4,PbInterruptTime(s0) // get current interrupt time
+ lw t5,DpCount(sp) // get starting interrupt count
+ lw t6,DpTime(sp) // get starting interrupt time
+ subu t3,t3,t5 // compute number of interrupts
+ subu t4,t4,t6 // compute time of interrupts
+ jal DbgBreakPoint // execute debug breakpoint
+
+#endif
+
+40: DISABLE_INTERRUPTS(v0) // disable interrupts
+
+ b 10b //
+
+//
+// Unlock DPC list and clear DPC active.
+//
+
+50:
+
+#if !defined(NT_UP)
+
+ sw zero,PbDpcLock(s0) // set spin lock not owned
+
+#endif
+
+60: sw zero,PbDpcRoutineActive(s0) // clear DPC routine active
+ sw zero,KiPcr + PcDpcRoutineActive(zero) //
+ sw zero,PbDpcInterruptRequested(s0) // clear DPC interrupt requested
+
+//
+// Check one last time that the DPC list is empty. This is required to
+// close a race condition with the DPC queuing code where it appears that
+// a DPC routine is active (and thus an interrupt is not requested), but
+// this code has decided the DPC list is empty and is clearing the DPC
+// active flag.
+//
+
+ addu a1,s0,PbDpcListHead // compute DPC listhead address
+ lw a0,LsFlink(a1) // get address of next entry
+ bne a0,a1,5b // if ne, DPC list is not empty
+ lw ra,DpRa(sp) // restore return address
+ addu sp,sp,DpcFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiRetireDpcList
diff --git a/private/ntos/ke/mips/x4trap.s b/private/ntos/ke/mips/x4trap.s
new file mode 100644
index 000000000..dce5e158d
--- /dev/null
+++ b/private/ntos/ke/mips/x4trap.s
@@ -0,0 +1,4622 @@
+// TITLE("Interrupt and Exception Processing")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// x4trap.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process MIPS
+// interrupt and exception conditions.
+//
+// N.B. This module executes in KSEG0 or KSEG1 and, in general, cannot
+// tolerate a TB Miss. Registers k0 and k1 are used for argument
+// passing during the initial stage of interrupt and exception
+// processing, and therefore, extreme care must be exercised when
+// modifying this module.
+//
+// Author:
+//
+// David N. Cutler (davec) 4-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Constant Value Definitions")
+//++
+//
+// The following are definitions of constants used in this module.
+//
+//--
+
+#define PSR_ENABLE_MASK ((0xff << PSR_INTMASK) | (0x3 << PSR_KSU) | (1 << PSR_EXL))
+
+#define PSR_MASK (~((0x3 << PSR_KSU) | (1 << PSR_EXL))) // PSR exception mask
+
+//
+// Define exception handler frame structure.
+//
+
+ .struct 0
+ .space 4 * 4 // argument save area
+HdRa: .space 4 // return address
+ .space 3 * 4 //
+HandlerFrameLength: // handler frame length
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KdpOweBreakpoint 1
+ .extern KeGdiFlushUserBatch 4
+ .extern KeNumberTbEntries 4
+ .extern PsWatchEnabled 1
+
+//
+// Define set of load/store instructions.
+//
+// This set has a one bit for each of the possible load/store instructions.
+//
+// These include: ldl, ldr, lb, lh, lwl, lw, lbu, lhu, lwr, lwu, sb, sh, swl,
+// sw, sdl. sdr. swr, ll, lwc1, lwc2, lld, ldc1, ldc2, ld, sc,
+// swc1, swc2, sdc, sdc1, sdc2, sd.
+//
+// N.B. The set is biased by a base of 0x20 which is the opcode for lb.
+//
+
+ .sdata
+ .align 3
+ .globl KiLoadInstructionSet
+KiLoadInstructionSet: // load instruction set
+ .word 0x0c000000 //
+ .word 0xf7f77fff //
+
+//
+// Define count of bad virtual address register cases.
+//
+
+#if DBG
+
+ .globl KiBadVaddrCount
+KiBadVaddrCount: // count of bad virtual
+ .word 0 //
+
+ .globl KiMismatchCount
+KiMismatchCount: // count of read miss address mismatches
+ .word 0 //
+
+#endif
+
+
+ SBTTL("System Startup")
+//++
+//
+// Routine Description:
+//
+// Control is transfered to this routine when the system is booted. Its
+// function is to transfer control to the real system startup routine.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSystemStartup)
+
+ j KiInitializeSystem // initialize system
+
+ .end KiSystemStartup
+
+ SBTTL("TB Miss Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a TB miss on a reference
+// to any part of the 32-bit address space from kernel mode. Interrupts
+// are disabled when this routine is entered.
+//
+// The function of this routine is to load a pair of second level PTEs
+// from the current page table into the TB. The context register is
+// loaded by hardware with the virtual address of the PTE * 2. In addition,
+// the entryhi register is loaded with the virtual tag, such that the PTEs
+// can be loaded directly into the TB. The badvaddr register is loaded by
+// hardware with the virtual address of the fault and is saved in case the
+// page table page is not currently mapped by the TB.
+//
+// If a fault occurs when attempting to load the specified PTEs from the
+// current page table, then it is vectored through the general exception
+// vector at KSEG0_BASE + 0x180.
+//
+// This routine is copied to address KSEG0_BASE at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+// N.B. This routine saves the contents of the badvaddr register in k1
+// so that it can be used by the general exception vector routine
+// if an exception occurs while trying to load the first PTE from
+// memory.
+//
+//--
+
+ LEAF_ENTRY(KiTbMiss)
+
+//
+// The following code is required on 2.x R4000 chips to work around a
+// chip bug. The work around is not needed for 3.0 and later chips.
+//
+
+ START_REGION(KiTbMissStartAddress2.x)
+
+ .set noreorder
+ .set noat
+ nop // ****** r4000 errata ******
+ mfc0 k0,psr // ****** r4000 errata ******
+ mtc0 zero,psr // ****** r4000 errata ******
+ mtc0 k0,psr // ****** r4000 errata ******
+ nop // ****** r4000 errata ******
+ .set at
+ .set reorder
+
+ START_REGION(KiTbMissStartAddress3.x)
+
+ .set noreorder
+ .set noat
+
+//
+// The following code is required on all MP systems to work around a problem
+// where the hardware reports a TB miss even when the entry is really in the
+// TB.
+//
+
+#if defined(NT_UP)
+
+ mfc0 k0,context // get virtual address * 2 of PTE
+ mfc0 k1,badvaddr // get bad virtual address
+ sra k0,k0,1 // compute virtual address of PTE
+
+#else
+
+ tlbp // ****** r4400 errata ******
+ mfc0 k0,context // ****** r4400 errata ******
+ nop // ****** r4400 errata ******
+ mfc0 k1,index // ****** r4400 errata ******
+ sra k0,k0,1 // compute virtual address of PTE
+ bgez k1,20f // ****** r4400 errata ******
+ mfc0 k1,badvaddr // get bad virtual address
+
+#endif
+
+ mtc0 k0,taglo // set first level active flag
+ lw k1,0(k0) // get first PTE - may fault
+ lw k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+ .set at
+ .set reorder
+
+ END_REGION(KiTbMissEndAddress3.x)
+
+//
+// The r10000 TB miss routine is different since the fine designers of the
+// chip didn't understand what the frame mask register was really for and
+// only masked PFN bits. Unfortunately they didn't mask the UC bits which
+// require the bits to be masked manually.
+//
+
+ START_REGION(KiTbMissStartAddress9.x)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,context // get virtual address * 2 of PTE
+ mfc0 k1,badvaddr // get bad virtual address
+ sra k0,k0,1 // compute virtual address of PTE
+ mtc0 k0,taglo // set first level active flag
+ lwu k1,0(k0) // get first PTE - may fault
+ lwu k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+ .set at
+ .set reorder
+
+ END_REGION(KiTbMissEndAddress9.x)
+
+ .end KiTbMiss
+
+ SBTTL("XTB Miss Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a TB miss on a reference
+// to any part of the 64-bit address space from user mode. Interrupts
+// are disabled when this routine is entered.
+//
+// The function of this routine is to load a pair of second level PTEs
+// from the current page table into the TB. The context register is
+// loaded by hardware with the virtual address of the PTE * 2. In addition,
+// the entryhi register is loaded with the virtual tag, such that the PTEs
+// can be loaded directly into the TB. The badvaddr register is loaded by
+// hardware with the virtual address of the fault and is saved in case the
+// page table page is not currently mapped by the TB.
+//
+// If a fault occurs when attempting to load the specified PTEs from the
+// current page table, then it is vectored through the general exception
+// vector at KSEG0_BASE + 0x180.
+//
+// This routine is copied to address KSEG0_BASE + 0x80 at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+// N.B. This routine saves the contents of the badvaddr register in k1
+// so that it can be used by the general exception vector routine
+// if an exception occurs while trying to load the first PTE from
+// memory.
+//
+//--
+
+ LEAF_ENTRY(KiXTbMiss)
+
+//
+// The following code is required on 2.x R4000 chips to work around a
+// chip bug. The work around is not needed for 3.0 and later chips.
+//
+
+ START_REGION(KiXTbMissStartAddress2.x)
+
+ .set noreorder
+ .set noat
+ nop // ****** r4000 errata ******
+ mfc0 k0,psr // ****** r4000 errata ******
+ mtc0 zero,psr // ****** r4000 errata ******
+ mtc0 k0,psr // ****** r4000 errata ******
+ nop // ****** r4000 errata ******
+ .set at
+ .set reorder
+
+ START_REGION(KiXTbMissStartAddress3.x)
+
+ .set noreorder
+ .set noat
+
+//
+// The following code is required on all MP systems to work around a problem
+// where the hardware reports a TB miss even when the entry is really in the
+// TB.
+//
+
+#if defined(NT_UP)
+
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dmfc0 k1,xcontext // get extended context register
+ sra k0,k0,1 // compute virtual address of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 39:31 of address
+ and k1,k1,0x7ff // check if valid user address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,badvaddr // get bad virtual address
+
+#else
+
+//
+// ****** r4400 errata ******
+//
+
+ dmfc0 k1,xcontext // get extended context register
+ tlbp // probe TB for miss address
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 39:31 of
+ and k1,k1,0x7ff // virtual address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,index // get index register
+ sra k0,k0,1 // compute virtual address of PTE
+ bgez k1,20f // if gez, address already in TB
+ mfc0 k1,badvaddr // get bad virtual address
+
+#endif
+
+ mtc0 k0,taglo // set first level active flag
+ lw k1,0(k0) // get first PTE - may fault
+ lw k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+20: eret //
+
+//
+// The user address is greater than 32-bits.
+//
+
+30: j KiInvalidUserAddress //
+ nop //
+ .set at
+ .set reorder
+
+ END_REGION(KiXTbMissEndAddress3.x)
+
+//
+// The r10000 TB miss routine is different since the fine designers of the
+// chip didn't understand what the frame mask register was really for and
+// only masked PFN bits. Unfortunately they didn't mask the UC bits which
+// require the bits to be masked manually.
+//
+
+ START_REGION(KiXTbMissStartAddress9.x)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,context // get virtual address * 2 of PTE
+ dmfc0 k1,xcontext // get extended context register
+ sra k0,k0,1 // compute virtual address of PTE
+ dsrl k1,k1,22 // isolate bits 63:62 and 43:31 of
+ and k1,k1,0x7ff // check if valid user address
+ beq zero,k1,5f // if eq, valid user address
+ xor k1,k1,0x7ff // check if valid kernel address
+ bne zero,k1,30f // if ne, invalid kernel address
+5: mfc0 k1,badvaddr // get bad virtual address
+ mtc0 k0,taglo // set first level active flag
+ lwu k1,0(k0) // get first PTE - may fault
+ lwu k0,4(k0) // get second PTE - no fault
+ mtc0 k1,entrylo0 // set first PTE value
+ mtc0 k0,entrylo1 // set second PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,10f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+
+#endif
+
+10: nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+ eret //
+
+//
+// The user address is greater than 32-bits.
+//
+
+30: j KiInvalidUserAddress //
+ nop //
+ .set at
+ .set reorder
+
+ END_REGION(KiXTbMissEndAddress9.x)
+
+ .end KiXTbMiss
+
+ SBTTL("Cache Parity Error Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a cache parity error and runs
+// uncached. Its function is to remap the PCR uncached and call the cache
+// parity routine to save all pertinent cache error information, establish
+// an error stack frame, and call the system cache parity error routine.
+//
+// N.B. The cache parity error routine runs uncached and must be
+// extremely careful not access any cached addresses.
+//
+// N.B. If a second exception occurs while cache error handling is in
+// progress, then a soft reset is performed by the hardware.
+//
+// N.B. While ERL is set in the PSR, the user address space is replaced
+// by an uncached, unmapped, address that corresponds to physical
+// memory.
+//
+// N.B. There is room for up to 32 instructions in the vectored cache
+// parity error routine.
+//
+// This routine is copied to address KSEG1_BASE + 0x100 at system startup.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiCacheError)
+
+ START_REGION(KiCacheErrorStartAddress)
+
+ .set noreorder
+ .set noat
+ nop // fill
+ nop // fill
+ la k0,CACHE_ERROR_VECTOR // get cache error vector address
+ lw k0,0(k0) // get cache error routine address
+ nop // fill
+ j k0 // dispatch to cache error routine
+ nop // fill
+ .set at
+ .set reorder
+
+ END_REGION(KiCacheErrorEndAddress)
+
+ .end KiCacheError
+
+ SBTTL("General Exception Vector Routine")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a general exception. The reason
+// for the exception is contained in the cause register. When this routine
+// is entered, interrupts are disabled.
+//
+// The primary function of this routine is to route the exception to the
+// appropriate exception handling routine. If the cause of the exception
+// is a read or write TB miss and the access can be resolved, then this
+// routine performs the necessary processing and returns from the exception.
+// If the exception cannot be resolved, then it is dispatched to the proper
+// routine.
+//
+// This routine is copied to address KSEG0_BASE + 0x180 at system startup.
+//
+// N.B. This routine is very carefully written to not destroy k1 until
+// it has been determined that the exception did not occur in the
+// user TB miss vector routine.
+//
+// Arguments:
+//
+// k1 - Supplies the bad virtual address if the exception occurred from
+// the TB miss vector routine while attempting to load a PTE into the
+// TB.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiGeneralException)
+
+ START_REGION(KiGeneralExceptionStartAddress)
+
+ .set noreorder
+ .set noat
+ mfc0 k0,cause // get cause of exception
+ mtc0 k1,lladdr // save possible bad virtual address
+ li k1,XCODE_READ_MISS // get exception code for read miss
+ and k0,k0,R4000_MISS_MASK // isolate exception code
+
+//
+// The read and write miss codes differ by exactly one bit such that they
+// can be tested for by a single mask operation followed by a test for the
+// read miss code.
+//
+
+ bne k0,k1,20f // if ne, not read or write miss
+ mfc0 k1,badvaddr // get the bad virtual address
+
+//
+// The exception is either a read or a write to an address that is not mapped
+// by the TB, or a reference to an invalid entry that is in the TB. Attempt to
+// resolve the reference by loading a pair of a PDEs from the page directory
+// page.
+//
+// There are four cases to be considered:
+//
+// 1. The address specified by the badvaddr register is not in the TB.
+//
+// For this case, a pair of PDEs are loaded into the TB from the
+// page directory page and execution is resumed.
+//
+// 2. The address specified by the badvaddr register is in the TB and the
+// address is not the address of a page table page.
+//
+// For this case an invalid translation has occured, but since it is
+// not the address of a page table page, then it could not have come
+// from the TB Miss handler. The badvaddr register contains the virtual
+// address of the exception and is passed to the appropriate exception
+// routine.
+//
+// 3. The address specified by the badvaddr register is in the TB, the
+// address is the address of a page table page, and the first level
+// TB miss routine was active when the current TB miss occurred.
+//
+// For this case, an invalid translation has occured, but since it is
+// a page table page and the first level TB miss routine active flag
+// is set, then the exception occured in the TB Miss handler. The
+// integer register k1 contains the virtual address of the exception
+// as saved by the first level TB fill handler and is passed to the
+// appropriate exception routine.
+//
+// N.B. The virtual address that is passed to the exception routine is
+// the exact virtual address that caused the fault and is obtained
+// from integer register k1.
+//
+// 4. The address specified by the badvaddr register is in the TB, the
+// address is the address of a page table page, and the first level
+// TB miss routine was not active when the current TB miss occurred.
+//
+// For this case, an invalid translation has occured, but since it is
+// a page table page and the first level TB miss routine active flag
+// is clear, then the exception must have occured as part of a probe
+// operation or is a page fault to an invalid page.
+//
+// N.B. The virtual address that is passed to the exception routine is
+// the exact virtual address that caused the fault and is obtained
+// from the badvaddr register.
+//
+
+ tlbp // probe TB for the faulting address
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 k1,index // read result of probe
+ mfc0 k0,context // get virtual address * 2 of PDE
+ bgez k1,10f // if gez, entry is in TB
+ sra k0,k0,1 // compute virtual address of PDE
+
+//
+// Case 1 - The entry is not in the TB.
+//
+// The TB miss is a reference to a page table page and a pair of PDEs are
+// loaded into the TB from the page directory page and execution is continued.
+//
+
+ lw k1,4(k0) // get second PDE value
+ lw k0,0(k0) // get first PDE value
+ mtc0 k1,entrylo1 // set second PTE value
+ mtc0 k0,entrylo0 // set first PTE value
+
+#if DBG
+
+ xor k1,k1,k0 // compare G-bits
+ and k1,k1,1 << ENTRYLO_G // isolate G-bit
+ beq zero,k1,5f // if eq, G-bits match
+ nop // fill
+ mtc0 zero,entrylo0 // reset first PTE value
+ mtc0 zero,entrylo1 // reset second PTE value
+5: //
+
+#endif
+
+ nop //
+ tlbwr // write entry randomly into TB
+ nop // 3 cycle hazzard
+ nop //
+ mtc0 zero,taglo // 1 cycle hazzard - clear active flag
+
+#if DBG
+
+ lw k0,KiPcr + PcPrcb(zero) // get processor block address
+ nop // fill
+ lw k1,PbSecondLevelTbFills(k0) // increment number of second level
+ nop // fill
+ addu k1,k1,1 // TB fills
+ sw k1,PbSecondLevelTbFills(k0) //
+
+#endif
+
+ eret //
+ nop // errata
+ nop //
+ nop //
+ eret //
+
+//
+// Case 2, 3, or 4 - The entry is in the TB.
+//
+// Check for one of the three remaining cases.
+//
+
+10: mfc0 k1,badvaddr // get bad virtual address
+ mfc0 k0,taglo // get first level flag
+ srl k1,k1,PDI_SHIFT // isolate page directory index
+ xor k1,k1,PDE_BASE >> PDI_SHIFT // check if page table reference
+ bne zero,k1,20f // if ne, not a page table page
+ mfc0 k1,badvaddr // get bad virtual address
+
+//
+// Case 2 or 3 - The bad virtual address is the address of a page table page.
+//
+// Check for one of the two remaining cases.
+//
+
+ beq zero,k0,20f // if eq, not first level miss
+ nop // fill
+ mfc0 k1,lladdr // get actual bad virtual address
+
+//
+// Save bad virtual address in case it is needed by the exception handling
+// routine.
+//
+
+20: mfc0 k0,epc // get exception PC
+ mtc0 zero,taglo // clear first level miss flag
+ sd t7,KiPcr + PcSavedT7(zero) // save integer registers t7 - t9
+ sd t8,KiPcr + PcSavedT8(zero) //
+ sd t9,KiPcr + PcSavedT9(zero) //
+ sw k0,KiPcr + PcSavedEpc(zero) // save exception PC
+ sw k1,KiPcr + PcBadVaddr(zero) // save bad virtual address
+
+//
+// The bad virtual address is saved in the PCR in case it is needed by the
+// respective dispatch routine.
+//
+// N.B. EXL must be cleared in the current PSR so switching the stack
+// can occur with TB Misses enabled.
+//
+
+ mfc0 t9,psr // get current processor status
+ li t8,1 << PSR_CU1 // set coprocessor 1 enable bit
+ mfc0 t7,cause // get cause of exception
+ mtc0 t8,psr // clear EXL and disable interrupts
+ lw k1,KiPcr + PcInitialStack(zero) // get initial kernel stack
+ and t8,t9,1 << PSR_PMODE // isolate previous processor mode
+ bnel zero,t8,30f // if ne, previous mode was user
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// If the kernel stack has overflowed, then a switch to the panic stack is
+// performed and the exception/ code is set to cause a bug check.
+//
+
+ lw k1,KiPcr + PcStackLimit(zero) // get current stack limit
+ subu t8,sp,TrapFrameLength // allocate trap frame
+ sltu k1,t8,k1 // check for stack overflow
+ beql zero,k1,30f // if eq, no stack overflow
+ nop // fill
+
+//
+// The kernel stack has either overflowed. Switch to the panic stack and
+// cause a bug check to occur by setting the exception cause value to the
+// panic code.
+//
+
+ lw t7,KiPcr + PcInitialStack(zero) // ***** temp ****
+ lw t8,KiPcr + PcStackLimit(zero) // ***** temp ****
+ sw t7,KiPcr + PcSystemReserved(zero) // **** temp ****
+ sw t8,KiPcr + PcSystemReserved + 4(zero) // **** temp ****
+ lw k1,KiPcr + PcPanicStack(zero) // get address of panic stack
+ li t7,XCODE_PANIC // set cause of exception to panic
+ sw k1,KiPcr + PcInitialStack(zero) // reset initial stack pointer
+ subu t8,k1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t8,KiPcr + PcStackLimit(zero) //
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// Allocate a trap frame, save parital context, and dispatch to the appropriate
+// exception handling routine.
+//
+// N.B. At this point:
+//
+// t7 contains the cause of the exception,
+// t8 contains the new stack pointer, and
+// t9 contains the previous processor state.
+//
+// Since the kernel stack is not wired into the TB, a TB miss can occur
+// during the switch of the stack and the subsequent storing of context.
+//
+//
+
+30: sd sp,TrXIntSp(t8) // save integer register sp
+ move sp,t8 // set new stack pointer
+ cfc1 t8,fsr // get floating status register
+ sd gp,TrXIntGp(sp) // save integer register gp
+ sd s8,TrXIntS8(sp) // save integer register s8
+ sw t8,TrFsr(sp) // save current FSR
+ sw t9,TrPsr(sp) // save processor state
+ sd ra,TrXIntRa(sp) // save integer register ra
+ lw gp,KiPcr + PcSystemGp(zero) // set system general pointer
+ and t8,t7,R4000_XCODE_MASK // isolate exception code
+
+//
+// Check for system call exception.
+//
+// N.B. While k1 is being used a TB miss cannot be tolerated.
+//
+
+ xor k1,t8,XCODE_SYSTEM_CALL // check for system call exception
+ bne zero,k1,40f // if ne, not system call exception
+ move s8,sp // set address of trap frame
+
+//
+// Get the address of the current thread and form the next PSR value.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ li t8,PSR_MASK // get the PSR mask
+ and t8,t9,t8 // clear EXL and mode in PSR
+ sw ra,TrFir(s8) // set real continuation address
+ sb zero,TrSavedFlag(s8) // clear s-registers saved flag
+ j KiSystemServiceNormal // execute normal system service
+ mtc0 t8,psr // enable interrupts
+
+//
+// Save the volatile integer register state.
+//
+
+40: sd AT,TrXIntAt(s8) // save assembler temporary register
+ sd v0,TrXIntV0(s8) // save integer register v0
+ sd v1,TrXIntV1(s8) // save integer register v1
+ sd a0,TrXIntA0(s8) // save integer registers a0 - a3
+ sd a1,TrXIntA1(s8) //
+ sd a2,TrXIntA2(s8) //
+ sd a3,TrXIntA3(s8) //
+ sd t0,TrXIntT0(s8) // save integer registers t0 - t2
+ sd t1,TrXIntT1(s8) //
+ sd t2,TrXIntT2(s8) //
+ ld t0,KiPcr + PcSavedT7(zero) // get saved register t8 - t9
+ ld t1,KiPcr + PcSavedT8(zero) //
+ ld t2,KiPcr + PcSavedT9(zero) //
+ sd t3,TrXIntT3(s8) // save integer register t3 - t7
+ sd t4,TrXIntT4(s8) //
+ sd t5,TrXIntT5(s8) //
+ sd t6,TrXIntT6(s8) //
+ sd t0,TrXIntT7(s8) //
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ sd t1,TrXIntT8(s8) // save integer registers t8 - t9
+ sd t2,TrXIntT9(s8) //
+ mflo t3 // get multiplier/quotient lo and hi
+ mfhi t4 //
+ lw t5,KiPcr + PcXcodeDispatch(t8) // get exception routine address
+ xor t6,t8,XCODE_INTERRUPT // check for interrupt exception
+ lw t8,KiPcr + PcSavedEpc(zero) // get exception PC
+ sd t3,TrXIntLo(s8) // save multiplier/quotient lo and hi
+ sd t4,TrXIntHi(s8) //
+ beq zero,t6,50f // if eq, interrupt exception
+ sw t8,TrFir(s8) // save exception PC
+
+//
+// Save the volatile floating register state.
+//
+
+ sdc1 f0,TrFltF0(s8) // save floating register f0 - f19
+ sdc1 f2,TrFltF2(s8) //
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ sdc1 f18,TrFltF18(s8) //
+ srl t6,t9,PSR_PMODE // isolate previous mode
+ and t6,t6,1 //
+ li t0,PSR_MASK // clear EXL amd mode is PSR
+ and t9,t9,t0 //
+
+//
+// Dispatch to exception handing routine with:
+//
+// t5 - Address of the exception handling routine.
+// t6 - If not an interrupt, then the previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - If not an interrupt, then the new PSR with EXL and mode clear.
+// Otherwise the previous PSR with EXL and mode set.
+//
+
+50: li t4,TRUE // get saved s-registers flag
+ bltzl t7,60f // if ltz, exception in delay slot
+ addu t8,t8,4 // compute address of exception
+60: j t5 // dispatch to exception routine
+ sb t4,TrSavedFlag(s8) // set s-registers saved flag
+ .set at
+ .set reorder
+
+ END_REGION(KiGeneralExceptionEndAddress)
+
+ .end KiGeneralException
+
+ SBTTL("Invalid User Address")
+//++
+//
+// Routine Description:
+//
+// This routine is entered when an invalid user address is encountered
+// in the XTB Miss handler. When this routine is entered, interrupts
+// are disabled.
+//
+// The primary function of this routine is to route the exception to the
+// invalid user 64-bit address exception handling routine.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiInvalidUserAddress)
+
+ .set noreorder
+ .set noat
+ dmfc0 k1,badvaddr // get the bad virtual address
+ dmfc0 k0,epc // get exception PC
+ sd k1,KiPcr + PcSystemReserved(zero) // **** temp ****
+ dmfc0 k1,xcontext // **** temp ****
+ sd k0,KiPcr + PcSystemReserved + 8(zero) // **** temp ****
+ sd k1,KiPcr + PcSystemReserved + 16(zero) // **** temp ****
+ ld k1,KiPcr + PcSystemReserved(zero) // **** temp ****
+ sd t7,KiPcr + PcSavedT7(zero) // save integer registers t7 - t9
+ sd t8,KiPcr + PcSavedT8(zero) //
+ sd t9,KiPcr + PcSavedT9(zero) //
+ sw k0,KiPcr + PcSavedEpc(zero) // save exception PC
+ sw k1,KiPcr + PcBadVaddr(zero) // save bad virtual address
+
+//
+// The bad virtual address is saved in the PCR in case it is needed by the
+// respective dispatch routine.
+//
+// N.B. EXL must be cleared in the current PSR so switching the stack
+// can occur with TB Misses enabled.
+//
+
+ mfc0 t9,psr // get current processor status
+ li t8,1 << PSR_CU1 // set coprocessor 1 enable bit
+ mfc0 t7,cause // get cause of exception
+ mtc0 t8,psr // clear EXL and disable interrupts
+ lw k1,KiPcr + PcInitialStack(zero) // get initial kernel stack
+ and t8,t9,1 << PSR_PMODE // isolate previous processor mode
+ bnel zero,t8,10f // if ne, previous mode was user
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// If the kernel stack has overflowed, then a switch to the panic stack is
+// performed and the exception/ code is set to cause a bug check.
+//
+
+ lw k1,KiPcr + PcStackLimit(zero) // get current stack limit
+ subu t8,sp,TrapFrameLength // allocate trap frame
+ sltu k1,t8,k1 // check for stack overflow
+ beql zero,k1,10f // if eq, no stack overflow
+ nop // fill
+
+//
+// The kernel stack has either overflowed. Switch to the panic stack and
+// cause a bug check to occur by setting the exception cause value to the
+// panic code.
+//
+
+ lw k1,KiPcr + PcPanicStack(zero) // get address of panic stack
+ li t7,XCODE_PANIC // set cause of exception to panic
+ sw k1,KiPcr + PcInitialStack(zero) // reset initial stack pointer
+ subu t8,k1,KERNEL_STACK_SIZE // compute and set stack limit
+ sw t8,KiPcr + PcStackLimit(zero) //
+ subu t8,k1,TrapFrameLength // allocate trap frame
+
+//
+// Allocate a trap frame, save parital context, and dispatch to the appropriate
+// exception handling routine.
+//
+// N.B. At this point:
+//
+// t7 contains the cause of the exception,
+// t8 contains the new stack pointer, and
+// t9 contains the previous processor state.
+//
+// Since the kernel stack is not wired into the TB, a TB miss can occur
+// during the switch of the stack and the subsequent storing of context.
+//
+//
+
+10: sd sp,TrXIntSp(t8) // save integer register sp
+ move sp,t8 // set new stack pointer
+ cfc1 t8,fsr // get floating status register
+ sd gp,TrXIntGp(sp) // save integer register gp
+ sd s8,TrXIntS8(sp) // save integer register s8
+ sw t8,TrFsr(sp) // save current FSR
+ sw t9,TrPsr(sp) // save processor state
+ sd ra,TrXIntRa(sp) // save integer register ra
+ lw gp,KiPcr + PcSystemGp(zero) // set system general pointer
+ and t8,t7,R4000_XCODE_MASK // isolate exception code
+
+//
+// Check for panic stack switch.
+//
+// N.B. While k1 is being used a TB miss cannot be tolerated.
+//
+
+ xor k1,t8,XCODE_PANIC // check for panic stack switch
+ bnel zero,k1,20f // if ne, invalid user address
+ li t8,XCODE_INVALID_USER_ADDRESS // set exception dispatch code
+
+//
+// Save the volatile integer register state.
+//
+
+20: move s8,sp // set address of trap frame
+ sd AT,TrXIntAt(s8) // save assembler temporary register
+ sd v0,TrXIntV0(s8) // save integer register v0
+ sd v1,TrXIntV1(s8) // save integer register v1
+ sd a0,TrXIntA0(s8) // save integer registers a0 - a3
+ sd a1,TrXIntA1(s8) //
+ sd a2,TrXIntA2(s8) //
+ sd a3,TrXIntA3(s8) //
+ sd t0,TrXIntT0(s8) // save integer registers t0 - t2
+ sd t1,TrXIntT1(s8) //
+ sd t2,TrXIntT2(s8) //
+ ld t0,KiPcr + PcSavedT7(zero) // get saved register t8 - t9
+ ld t1,KiPcr + PcSavedT8(zero) //
+ ld t2,KiPcr + PcSavedT9(zero) //
+ sd t3,TrXIntT3(s8) // save integer register t3 - t7
+ sd t4,TrXIntT4(s8) //
+ sd t5,TrXIntT5(s8) //
+ sd t6,TrXIntT6(s8) //
+ sd t0,TrXIntT7(s8) //
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ sd t1,TrXIntT8(s8) // save integer registers t8 - t9
+ sd t2,TrXIntT9(s8) //
+ mflo t3 // get multiplier/quotient lo and hi
+ mfhi t4 //
+ lw t5,KiPcr + PcXcodeDispatch(t8) // get exception routine address
+ lw t8,KiPcr + PcSavedEpc(zero) // get exception PC
+ sd t3,TrXIntLo(s8) // save multiplier/quotient lo and hi
+ sd t4,TrXIntHi(s8) //
+ sw t8,TrFir(s8) // save exception PC
+
+//
+// Save the volatile floating register state.
+//
+
+ sdc1 f0,TrFltF0(s8) // save floating register f0 - f19
+ sdc1 f2,TrFltF2(s8) //
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ sdc1 f18,TrFltF18(s8) //
+ srl t6,t9,PSR_PMODE // isolate previous mode
+ and t6,t6,1 //
+ li t0,PSR_MASK // clear EXL amd mode is PSR
+ and t9,t9,t0 //
+
+//
+// Dispatch to exception handing routine with:
+//
+// t5 - Address of the exception handling routine.
+// t6 - Previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+//
+
+ li t4,TRUE // get saved s-registers flag
+ bltzl t7,30f // if ltz, exception in delay slot
+ addu t8,t8,4 // compute address of exception
+30: j t5 // dispatch to exception routine
+ sb t4,TrSavedFlag(s8) // set s-registers saved flag
+ .set at
+ .set reorder
+
+ .end KiInvalidUserAddress
+
+ SBTTL("Address Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiAddressErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a read or write address error exception
+// code is read from the cause register. When this routine is entered,
+// interrupts are disabled.
+//
+// The function of this routine is to raise an data misalignment exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiReadAddressErrorException)
+
+ li t0,0 // set read indicator
+ b 10f // join common code
+
+ ALTERNATE_ENTRY(KiWriteAddressErrorException)
+
+ li t0,1 // set write indicator
+
+//
+// Common code for read and write address error exceptions.
+//
+
+10: addu a0,s8,TrExceptionRecord // compute exception record address
+ lw t1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw t0,ErExceptionInformation(a0) // save load/store indicator
+ sw t1,ErExceptionInformation + 4(a0) // save bad virtual address
+ sw t8,ErExceptionAddress(a0) // set exception address
+
+//
+// If the faulting instruction address is the same as the faulting virtual
+// address, then the fault is an instruction misalignment exception. Otherwise,
+// the exception is a data misalignment.
+//
+
+ li t3,STATUS_INSTRUCTION_MISALIGNMENT // set exception code
+ beq t1,t8,20f // if eq, instruction misalignment
+ li t3,STATUS_DATATYPE_MISALIGNMENT // set exception code
+
+//
+// If the faulting address is a kernel address and the previous mode was
+// user, then the address error is really an access violation since an
+// attempt was made to access kernel memory from user mode.
+//
+
+20: bgez t1,30f // if gez, KUSEG address
+ beq zero,a3,30f // if eq, previous mode was kernel
+ li t3,STATUS_ACCESS_VIOLATION // set exception code
+30: sw t3,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ li t0,2 // set number of exception parameters
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiAddressErrorDispatch
+
+ SBTTL("Breakpoint Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiBreakpointDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a breakpoint exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise a breakpoint exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiBreakpointException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+ lw t0,0(t8) // get breakpoint instruction
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw t0,ErExceptionInformation(a0) // save breakpoint instruction
+ li t1,STATUS_BREAKPOINT // set exception code
+ sw t1,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+
+ ALTERNATE_ENTRY(KiKernelBreakpoint)
+
+ break KERNEL_BREAKPOINT // kernel breakpoint instruction
+
+ .end KiBreakpointDispatch
+
+ SBTTL("Bug Check Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiBugCheckDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when the following codes are read from the cause
+// register:
+//
+// Data coherency,
+// Instruction coherency,
+// Invlid exception, and
+// Panic exception.
+//
+// The function of this routine is to cause a bug check with the appropriate
+// code.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiDataCoherencyException)
+
+ li a0,DATA_COHERENCY_EXCEPTION // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiInstructionCoherencyException)
+
+ li a0,INSTRUCTION_COHERENCY_EXCEPTION // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiInvalidException)
+
+ li a0,TRAP_CAUSE_UNKNOWN // set bug check code
+ b 10f // finish in common code
+
+ ALTERNATE_ENTRY(KiPanicException)
+
+ li a0,PANIC_STACK_SWITCH // set bug check code
+10: lw a1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a2,t8 // set address of faulting instruction
+ .set at
+ .set reorder
+
+ move a3,t6 // set previous mode
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiBugCheckDispatch
+
+ SBTTL("Coprocessor Unusable Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiCoprocessorUnusableDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a coprocessor unusable exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an illegal instruction exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiCoprocessorUnusableException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_ILLEGAL_INSTRUCTION // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiCoprocessorUnusableDispatch
+
+ SBTTL("Data Bus Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiDataBusErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a data bus error exception code is read from
+// the cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to capture the current machine state and
+// call the exception dispatcher which will provide specical case processing
+// of this exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiDataBusErrorException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,DATA_BUS_ERROR | 0xdfff0000 // set special exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiDataBusErrorDispatch
+
+ SBTTL("Floating Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiFloatDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a floating exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise a floating exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiFloatingException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ cfc1 t0,fsr // get current floating status
+ li t1,~(0x3f << FSR_XI) // get exception mask value
+ and t1,t0,t1 // clear exception bits
+ ctc1 t1,fsr // set new floating status
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_FLOAT_STACK_CHECK // set floating escape code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiFloatDispatch
+
+ SBTTL("Illegal Instruction Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiIllegalInstructionDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an illegal instruction exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an illegal instruction exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiIllegalInstructionException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_ILLEGAL_INSTRUCTION // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiIllegalInstructionDispatch
+
+ SBTTL("Instruction Bus Error Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiInstructionBusErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an instruction bus error exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to capture the current machine state and
+// call the exception dispatcher which will provide specical case processing
+// of this exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiInstructionBusErrorException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,INSTRUCTION_BUS_ERROR | 0xdfff0000 // set special exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiInstructionBusErrorDispatch
+
+ SBTTL("Integer Overflow Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiIntegerOverflowDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an integer overflow exception code is read
+// from the cause register. When this routine is entered, interrupts are
+// disabled.
+//
+// The function of this routine is to raise an integer overflow exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiIntegerOverflowException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t0,STATUS_INTEGER_OVERFLOW // set exception code
+ sw t0,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiIntegerOverflowDispatch
+
+ SBTTL("Interrupt Exception")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ EXCEPTION_HANDLER(KiInterruptHandler)
+
+ NESTED_ENTRY(KiInterruptDistribution, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when an interrupt exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to determine the highest priority pending
+// interrupt, raise the IRQL to the level of the highest interrupt, and then
+// dispatch the interrupt to the proper service routine.
+//
+// Arguments:
+//
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The old PSR with EXL and mode set.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiInterruptException)
+
+ .set noreorder
+ .set noat
+ lbu t1,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ srl t2,t7,CAUSE_INTPEND + 4 // isolate high interrupt pending bits
+ and t2,t2,0xf //
+ bne zero,t2,10f // if ne, use high bits as index
+ sb t1,TrOldIrql(s8) // save old IRQL
+ srl t2,t7,CAUSE_INTPEND // isolate low interrupt pending bits
+ and t2,t2,0xf //
+ addu t2,t2,16 // bias low bits index by 16
+10: lbu t0,KiPcr + PcIrqlMask(t2) // get new IRQL from mask table
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ nor t2,t2,zero // complement interrupt enable mask
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+
+//
+// It is possible that the interrupt was asserted and then deasserted before
+// the interrupt dispatch code executed. Therefore, there may be an interrupt
+// pending at the current or a lower level. This interrupt is not yet valid
+// and cannot be processed until the IRQL is lowered.
+//
+
+ sltu t4,t1,t0 // check if old IRQL less than new
+ beq zero,t4,40f // if eq, no valid interrupt pending
+ subu t4,t0,DISPATCH_LEVEL + 1 // check if above dispatch level
+
+//
+// If the interrupt level is above dispatch level, then execute the service
+// routine on the interrupt stack. Otherwise, execute the service on the
+// current stack.
+//
+
+ bgezal t4,60f // if gez, above dispatch level
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+
+//
+// N.B. The following code is duplicated on the control path where the stack
+// is switched to the interrupt stack. This is done to avoid branching
+// logic.
+//
+
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ mtc0 t9,psr // enable interrupts
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ sll t0,t0,2 // compute offset in vector table
+ lw a0,KiPcr + PcInterruptRoutine(t0) // get service routine address
+
+#if DBG
+
+ sw a0,TrExceptionRecord(s8) // save service routine address
+
+#endif
+
+//
+// Increment interrupt count and call interrupt service routine.
+//
+// N.B. It is known that the interrupt is either an APC interrupt or
+// a dispatch interrupt, and therefore, the volatile floating
+// state is saved and restored to avoid saves and restores in
+// both interrupt dispatchers.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ lw t2,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t3,PbInterruptCount(t2) // increment the count of interrupts
+ addu t3,t3,1 //
+ sw t3,PbInterruptCount(t2) // store result
+ jal a0 // call interrupt service routine
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Common exit point for special dispatch and APC interrupt bypass.
+//
+// Restore state and exit interrupt.
+//
+
+ ALTERNATE_ENTRY(KiInterruptExit)
+
+40: lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t3,TrOldIrql(s8) // get old IRQL
+ and t4,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,50f // if eq, previous mode was kernel
+ sb t3,KiPcr + PcCurrentIrql(zero) // restore old IRQL
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possible APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+50: sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// Switch to interrupt stack.
+//
+
+60: j KiSwitchStacks //
+
+//
+// Increment number of bypassed dispatch interrupts and check if an APC
+// interrupt is pending and the old IRQL is zero.
+//
+
+ ALTERNATE_ENTRY(KiContinueInterrupt)
+
+ .set noreorder
+ .set noat
+ lw t7,KiPcr + PcPrcb(zero) // get current PRCB
+ li t1,1 << PSR_CU1 // get coprocessor 1 enable bit
+ mfc0 t9,psr // get current PSR
+ mtc0 t1,psr // disable interrupts - 3 cycle hazzard
+ lw t1,PbDpcBypassCount(t7) // increment the DPC bypass count
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ lbu t8,TrOldIrql(s8) // get old IRQL
+ mfc0 t6,cause // get exception cause register
+ addu t1,t1,1 //
+ sw t1,PbDpcBypassCount(t7) // store result
+ and t5,t6,APC_INTERRUPT // check for an APC interrupt
+ beq zero,t5,70f // if eq, no APC interrupt
+ li t0,APC_LEVEL // set new IRQL to APC_LEVEL
+ bne zero,t8,70f // if ne, APC interrupts blocked
+ move a0,zero // set previous mode to kernel
+
+//
+// An APC interrupt is pending.
+//
+
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+ nor t2,t2,zero // complement interrupt enable mask
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+ or t9,t9,t3 // merge new interrupt enable mask
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ and t6,t6,DISPATCH_INTERRUPT // clear APC interrupt pending
+ mtc0 t6,cause //
+ mtc0 t9,psr // enable interrupts
+ .set at
+ .set reorder
+
+ lw t1,PbApcBypassCount(t7) // increment the APC bypass count
+ addu t1,t1,1 //
+ sw t1,PbApcBypassCount(t7) //
+ move a1,zero // set exception frame address
+ move a2,zero // set trap frame address
+ jal KiDeliverApc // deliver kernel mode APC
+
+70: RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+ j KiInterruptExit //
+
+ .end KiInterruptDistribution
+
+ SBTTL("Interrupt Stack Switch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument register area
+ .space 2 * 4 // fill
+SwSp: .space 4 // saved stack pointer
+SwRa: .space 4 // saved return address
+SwFrameLength: // length of stack frame
+
+ EXCEPTION_HANDLER(KiInterruptHandler)
+
+ NESTED_ENTRY(KiInterruptStackSwitch, SwFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sw sp,SwSp(sp) // save stack pointer
+ sw ra,SwRa(sp) // save return address
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//
+// The interrupt level is above dispatch level. Execute the interrupt
+// service routine on the interrupt stack.
+//
+// N.B. The following code is duplicated on the control path where the stack
+// is not switched to the interrupt stack. This is done to avoid branching
+// logic.
+//
+
+
+ ALTERNATE_ENTRY(KiSwitchStacks)
+
+ .set noreorder
+ .set noat
+ lw t4,KiPcr + PcOnInterruptStack(zero) // get stack indicator
+ sw sp,KiPcr + PcOnInterruptStack(zero) // set new stack indicator
+ sw t4,TrOnInterruptStack(s8) // save previous stack indicator
+ move t5,sp // save current stack pointer
+ bne zero,t4,10f // if ne, aleady on interrupt stack
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+
+//
+// Switch to the interrupt stack.
+//
+
+ lw t6,KiPcr + PcInitialStack(zero) // get old initial stack address
+ lw t7,KiPcr + PcStackLimit(zero) // and stack limit
+ lw sp,KiPcr + PcInterruptStack(zero) // set interrupt stack address
+ sw t6,KiPcr + PcSavedInitialStack(zero) // save old stack address
+ sw t7,KiPcr + PcSavedStackLimit(zero) // and stack limit
+ sw sp,KiPcr + PcInitialStack(zero) // set new initial stack address
+ subu t4,sp,KERNEL_STACK_SIZE // and stack limit
+ sw t4,KiPcr + PcStackLimit(zero) //
+10: subu sp,sp,SwFrameLength // allocate stack frame
+ sw t5,SwSp(sp) // save previous stack pointer
+ sw ra,SwRa(sp) // save return address
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ mtc0 t9,psr // enable interrupts
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ .set at
+ .set reorder
+
+ sll t0,t0,2 // compute offset in vector table
+ lw a0,KiPcr + PcInterruptRoutine(t0) // get service routine address
+
+#if DBG
+
+ sw a0,TrExceptionRecord(s8) // save service routine address
+
+#endif
+
+//
+// Increment interrupt count and call interrupt service routine.
+//
+
+ lw t2,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t3,PbInterruptCount(t2) // increment the count of interrupts
+ addu t3,t3,1 //
+ sw t3,PbInterruptCount(t2) // store result
+ jal a0 // call interrupt service routine
+
+//
+// Restore state, and exit interrupt.
+//
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lbu t8,TrOldIrql(s8) // get old IRQL
+ lw t9,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible later, then restore the volatile register state.
+//
+
+ lw t2,TrOnInterruptStack(s8) // get saved stack indicator
+ sb t8,KiPcr + PcCurrentIrql(zero) // restore old IRQL
+ sw t9,KiPcr + PcSavedT7(zero) // save processor status
+ bne zero,t2,KiTrapExit // if ne, stay on interrupt stack
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ lw t3,KiPcr + PcSavedInitialStack(zero) // get old initial stack
+ lw t4,KiPcr + PcSavedStackLimit(zero) // get old stack limit
+ sltu t8,t8,DISPATCH_LEVEL // check if IRQL less than dispatch
+ sw t3,KiPcr + PcInitialStack(zero) // restore old initial stack
+ sw t4,KiPcr + PcStackLimit(zero) // restore old stack limit
+ mfc0 t6,cause // get exception cause register
+ beq zero,t8,KiTrapExit // if eq, old IRQL dispatch or above
+ sw t2,KiPcr + PcOnInterruptStack(zero) // restore stack indicator
+
+//
+// Check if a DPC interrupt is pending since the old IRQL is less than
+// DISPATCH_LEVEL and it is more efficient to directly dispatch than
+// let the interrupt logic request the interrupt.
+//
+
+ and t8,t6,DISPATCH_INTERRUPT // check for dispatch interrupt
+ beql zero,t8,40f // if eq, no dispatch interrupt
+ lw t7,KiPcr + PcCurrentThread(zero) // get current thread address
+
+//
+// A dispatch interrupt is pending.
+//
+
+ move sp,s8 // set correct stack pointer
+ li t0,DISPATCH_LEVEL // set new IRQL to DISPATCH_LEVEL
+ lbu t3,KiPcr + PcIrqlTable(t0) // get new mask from IRQL table
+ li t2,PSR_ENABLE_MASK // get PSR enable mask
+ nor t2,t2,zero // complement interrupt enable mask
+ sll t3,t3,PSR_INTMASK // shift table entry into position
+ and t9,t9,t2 // clear interrupt mask, EXL, and KSU
+ or t9,t9,t3 // merge new interrupt enable mask
+ or t9,t9,1 << PSR_IE // set interrupt enable
+ sb t0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+ mtc0 t9,psr // enable interrupts
+ .set at
+ .set reorder
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+//
+// N.B. The following code returns to the main interrupt dispatch so
+// get and set context APCs can virtually unwind the stack properly.
+//
+
+ la ra,KiContinueInterrupt // set return address
+ j KiDispatchInterrupt // process dispatch interrupt
+
+//
+// If the previous mode is user and a user mode APC is pending, then
+// request an APC interrupt.
+//
+
+ .set noreorder
+ .set noat
+40: and t4,t9,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,50f // if eq, previous mode was kernel
+ ld AT,TrXIntAt(s8) // restore integer register AT
+ lbu t3,ThApcState + AsUserApcPending(t7) // get user APC pending
+ sb zero,ThAlerted(t7) // clear kernel mode alerted
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t6,t6,t3 // merge possible APC interrupt request
+ mtc0 t6,cause // set exception cause register
+ .set at
+ .set reorder
+
+//
+// Common trap exit sequence for all traps.
+//
+
+ ALTERNATE_ENTRY(KiTrapExit)
+
+ .set noreorder
+ .set noat
+ ld AT,TrXIntAt(s8) // restore integer register AT
+50: ld v0,TrXIntV0(s8) // restore integer register v0
+ ld v1,TrXIntV1(s8) // restore integer register v1
+ ld a0,TrXIntA0(s8) // restore integer registers a0 - a3
+ ld a1,TrXIntA1(s8) //
+ ld a2,TrXIntA2(s8) //
+ ld t0,TrXIntLo(s8) // restore lo and hi integer registers
+ ld t1,TrXIntHi(s8) //
+ ld a3,TrXIntA3(s8) //
+ mtlo t0 //
+ mthi t1 //
+ ld t0,TrXIntT0(s8) // restore integer registers t0 - t7
+ ld t1,TrXIntT1(s8) //
+ ld t2,TrXIntT2(s8) //
+ ld t3,TrXIntT3(s8) //
+ ld t4,TrXIntT4(s8) //
+ ld t5,TrXIntT5(s8) //
+ ld t6,TrXIntT6(s8) //
+ ld t7,TrXIntT7(s8) //
+ ld s0,TrXIntS0(s8) // restore integer registers s0 - s7
+ ld s1,TrXIntS1(s8) //
+ ld s2,TrXIntS2(s8) //
+ ld s3,TrXIntS3(s8) //
+ ld s4,TrXIntS4(s8) //
+ ld s5,TrXIntS5(s8) //
+ ld s6,TrXIntS6(s8) //
+ ld s7,TrXIntS7(s8) //
+ ld t8,TrXIntT8(s8) // restore integer registers t8 - t9
+ ld t9,TrXIntT9(s8) //
+
+//
+// Common exit sequence for system services.
+//
+
+ ALTERNATE_ENTRY(KiServiceExit)
+
+ ld gp,TrXIntGp(s8) // restore integer register gp
+ ld sp,TrXIntSp(s8) // restore stack pointer
+ ld ra,TrXIntRa(s8) // restore return address
+ ld s8,TrXIntS8(s8) // restore integer register s8
+
+//
+// WARNING: From this point on no TB Misses can be tolerated.
+//
+
+ li k0,1 << PSR_EXL // set EXL bit in temporary PSR
+ mtc0 k0,psr // set new PSR value - 3 cycle hazzard
+ lw k0,KiPcr + PcSavedT7(zero) // get previous processor status
+ lw k1,KiPcr + PcSavedEpc(zero) // get continuation address
+ nop //
+ mtc0 k0,psr // set new PSR value - 3 cycle hazzard
+ mtc0 k1,epc // set continuation PC
+ nop //
+ nop //
+ eret //
+ nop // errata
+ nop //
+ nop //
+ eret //
+ .set at
+ .set reorder
+
+ .end KiInterruptStackSwitch
+
+ SBTTL("Interrupt Exception Handler")
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiInterruptHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+//
+// Routine Description:
+//
+// Control reaches here when an exception is not handled by an interrupt
+// service routine or an unwind is initiated in an interrupt service
+// routine that would result in an unwind through the interrupt dispatcher.
+// This is considered to be a fatal system error and bug check is called.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved s8 register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ NESTED_ENTRY(KiInterruptHandler, HandlerFrameLength, zero)
+
+ subu sp,sp,HandlerFrameLength // allocate stack frame
+ sw ra,HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw t0,ErExceptionFlags(a0) // get exception flags
+ li a0,INTERRUPT_UNWIND_ATTEMPTED // assume unwind in progress
+ and t1,t0,EXCEPTION_UNWIND // check if unwind in progress
+ bne zero,t1,10f // if ne, unwind in progress
+ li a0,INTERRUPT_EXCEPTION_NOT_HANDLED // set bug check code
+10: jal KeBugCheck // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiInterruptHandler
+
+ SBTTL("Memory Management Exceptions")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiVirtualMemoryDispatch, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a modify, read miss, or write miss exception
+// code is read from the cause register. When this routine is entered,
+// interrupts are disabled.
+//
+// The function of this routine is to call memory management in an attempt
+// to resolve the problem. If memory management can resolve the problem,
+// then execution is continued. Otherwise an exception record is constructed
+// and an exception is raised.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiReadMissException)
+
+ li a0,0 // set read indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+
+//
+// N.B. The following code is a work around for a chip bug where the bad
+// virtual address is not correct on an instruction stream TB miss.
+//
+// If the exception PC is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+// If the instruction at the exception PC is not in the TB or the
+// TB entry is invalid, then the bad virtual address is incorrect
+// and the instruction is repeated.
+//
+// If the instruction at the exception PC is valid and is a load or
+// a store instruction, then the effective address is computed and
+// compared with the bad virtual address. If the comparison is equal,
+// then the bad virtual address is correct. Otherwise, the address is
+// incorrect and the instruction is repeated.
+//
+// If the instruction at the exception PC is valid, is not a load or
+// store instruction, and is not the last instruction in the page,
+// the bad virtual address is correct.
+//
+// If the instruction at the exception PC is valid, is not a load or
+// a store instruction, and is the last instruction in the page, then
+//
+// If the exception PC + 4 is equal to the bad virtual address,
+// then the bad virtual address is correct.
+//
+// If the instruction at the exception PC + 4 is not in the TB
+// or the TB entry is invalid, then the bad virtual address is
+// incorrect and the instruction is repeated.
+//
+// If the instruction at the exception PC + 4 is valid and is a
+// load or a store instruction, then the effective address is
+// computed and compared with the bad virtual address. If the
+// comparison is equal, the the bad virtual address is correct.
+// Otherwise, the address is incorrect and the instruction is
+// repeated.
+//
+
+#if !defined(NT_UP)
+
+ lw t7,TrFir(s8) // get exception PC
+
+ .set noreorder
+ .set noat
+ srl t0,t7,30 // isolate high bits of exception PC
+ beq a1,t7,30f // if eq, addresses match
+ xor a2,t0,0x2 // check for kseg0 or kseg1 address
+
+//
+// If the instruction at the exception PC is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated.
+//
+
+ beq zero,a2,4f // if eq, kseg0 or kseg1 address
+ srl t1,t7,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ mfc0 v0,entryhi // get current VPN2 and PID
+ sll t1,t1,ENTRYHI_VPN2 //
+ and v1,v0,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t7,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,3f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+3: mtc0 zero,pagemask // restore page mask register
+ beq zero,t5,20f // if eq, PTE not valid but in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ nop // 2 cycle hazzard
+ nop //
+
+//
+// If the instruction at the exception PC is a load or a store instruction,
+// then compute its effective virtual address. Otherwise, check to determine
+// if the instruction is at the end of the page.
+//
+
+4: lw t0,0(t7) // get instruction value
+ ld t1,KiLoadInstructionSet // get load/store instruction set
+ li t2,1 // compute opcode set member
+ srl t3,t0,32 - 6 // right justify opcode value
+ dsll t2,t2,t3 // shift opcode member into position
+ and t2,t2,t1 // check if load/store instruction
+ bne zero,t2,10f // if ne, load/store instruction
+ srl t1,t0,21 - 3 // extract base register number
+
+//
+// If the instruction at the exception PC + 4 is not the first instruction in
+// next page, then the bad virtual address is correct.
+//
+
+5: addu t0,t7,4 // compute next instruction address
+ and t1,t0,0xfff // isolate offset in page
+ bne zero,t1,30f // if ne, not in next page
+ srl t1,t0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+
+//
+// If the exception PC + 4 is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+
+ beq a1,t0,30f // if eq, address match
+ sll t1,t1,ENTRYHI_VPN2 //
+
+//
+// If the instruction at the exception PC + 4 is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated. Otherwise, the bad virtual address is correct.
+//
+
+ beq zero,a2,8f // if eq, kseg0 or kseg1 address
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t0,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,7f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+7: mtc0 zero,pagemask // restore page mask register
+ beq zero,t5,20f // if eq, PTE is invalid
+ mtc0 v0,entryhi // restore VPN2 and PID
+ nop // 2 cycle hazzard
+ nop //
+
+//
+// If the first instruction in the next page is a load/store, then compute
+// its effective virtual address. Otherwise, the bad virtual address is not
+// valid and the instruction at the exception PC should be repeated.
+//
+
+8: lw t0,0(t0) // get instruction value
+ ld t1,KiLoadInstructionSet // get load/store instruction set
+ li t2,1 // compute opcode set member
+ srl t3,t0,32 - 6 // right justify opcode value
+ dsll t2,t2,t3 // shift opcode member into position
+ and t2,t2,t1 // check if load/store instruction
+ beq zero,t2,20f // if eq, not load/store instruction
+ srl t1,t0,21 - 3 // extract base register number
+
+//
+// The faulting instruction was a load/store instruction.
+//
+// Compute the effect virtual address and check to detemrine if it is equal
+// to the bad virtual address.
+//
+
+10: and t1,t1,0x1f << 3 // isolate base register number
+ la t2,12f // get base address of load table
+ addu t2,t2,t1 // compute address of register load
+ j t2 // dispath to register load routine
+ sll t1,t0,16 // shift displacement into position
+
+12: b 14f // zero
+ move t2,zero //
+
+ b 14f // at
+ lw t2,TrXIntAt(s8) //
+
+ b 14f // v0
+ lw t2,TrXIntV0(s8) //
+
+ b 14f // v1
+ lw t2,TrXIntV1(s8) //
+
+ b 14f // a0
+ lw t2,TrXIntA0(s8) //
+
+ b 14f // a1
+ lw t2,TrXIntA1(s8) //
+
+ b 14f // a2
+ lw t2,TrXIntA2(s8) //
+
+ b 14f // a3
+ lw t2,TrXIntA3(s8) //
+
+ b 14f // t0
+ lw t2,TrXIntT0(s8) //
+
+ b 14f // t1
+ lw t2,TrXIntT1(s8) //
+
+ b 14f // t2
+ lw t2,TrXIntT2(s8) //
+
+ b 14f // t3
+ lw t2,TrXIntT3(s8) //
+
+ b 14f // t4
+ lw t2,TrXIntT4(s8) //
+
+ b 14f // t5
+ lw t2,TrXIntT5(s8) //
+
+ b 14f // t6
+ lw t2,TrXIntT6(s8) //
+
+ b 14f // t7
+ lw t2,TrXIntT7(s8) //
+
+ b 14f // s0
+ move t2,s0 //
+
+ b 14f // s1
+ move t2,s1 //
+
+ b 14f // s2
+ move t2,s2 //
+
+ b 14f // s3
+ move t2,s3 //
+
+ b 14f // s4
+ move t2,s4 //
+
+ b 14f // s5
+ move t2,s5 //
+
+ b 14f // s6
+ move t2,s6 //
+
+ b 14f // s7
+ move t2,s7 //
+
+ b 14f // t8
+ lw t2,TrXIntT8(s8) //
+
+ b 14f // t9
+ lw t2,TrXIntT9(s8) //
+
+ b 14f // k0
+ move t2,zero //
+
+ b 14f // k1
+ move t2,zero //
+
+ b 14f // gp
+ lw t2,TrXIntGp(s8) //
+
+ b 14f // sp
+ lw t2,TrXIntSp(s8) //
+
+ b 14f // s8
+ lw t2,TrXIntS8(s8) //
+
+ lw t2,TrXIntRa(s8) // ra
+
+//
+// If the effective virtual address matches the bad virtual address, then
+// the bad virtual address is correct. Otherwise, repeat the instruction.
+//
+
+14: sra t1,t1,16 // sign extend displacement value
+ addu t3,t2,t1 // compute effective load address
+ beq a1,t3,30f // if eq, bad virtual address is okay
+ nop // fill
+
+#if DBG
+
+ lw ra,KiMismatchCount // increment address mismatch count
+ nop // TB fills
+ addu ra,ra,1 //
+ sw ra,KiMismatchCount // store result
+
+#endif
+
+
+//
+// N.B. PSR and EPC may have changed because of TB miss and need to be
+// reloaded.
+//
+
+20: nop // 2 cycle hazzard
+ nop //
+ lw t0,TrPsr(s8) // get previous processor state
+ lw t1,TrFir(s8) // get continuation address
+
+#if DBG
+
+ lw ra,KiBadVaddrCount // increment number of second level
+ nop // TB fills
+ addu ra,ra,1 //
+ sw ra,KiBadVaddrCount // store result
+
+#endif
+
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+#else
+
+ b 30f // join common code
+
+#endif
+
+ ALTERNATE_ENTRY(KiReadMissException9.x)
+
+ li a0,0 // set read indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+ b 30f // join common code
+
+ ALTERNATE_ENTRY(KiModifyException)
+
+ ALTERNATE_ENTRY(KiWriteMissException)
+
+ li a0,1 // set write indicator
+ lw a1,KiPcr + PcBadVaddr(zero) // get bad virtual address
+
+//
+// Common code for modify, read miss, and write miss exceptions.
+//
+
+30: sw t8,TrExceptionRecord + ErExceptionAddress(s8) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a2,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw a0,TrExceptionRecord + ErExceptionInformation(s8) // save load/store indicator
+ sw a1,TrExceptionRecord + ErExceptionInformation + 4(s8) // save bad virtual address
+ sw a2,TrExceptionRecord + ErExceptionCode(s8) // save previous mode
+ jal MmAccessFault // call memory management fault routine
+
+//
+// Check if working set watch is enabled.
+//
+
+ lbu t0,PsWatchEnabled // get working set watch enable flag
+ lw t1,TrExceptionRecord + ErExceptionCode(s8) // get previous mode
+ move a0,v0 // set status of fault resolution
+ bltz v0,40f // if ltz, unsuccessful resolution
+ beq zero,t0,35f // if eq, watch not enabled
+ lw a1,TrExceptionRecord + ErExceptionAddress(s8) // get exception address
+ lw a2,TrExceptionRecord + ErExceptionInformation + 4(s8) // set bad address
+ jal PsWatchWorkingSet // record working set information
+
+//
+// Check if the debugger has any owed breakpoints.
+//
+
+35: lbu t0,KdpOweBreakpoint // get owned breakpoint flag
+ beq zero,t0,37f // if eq, no owed breakpoints
+ jal KdSetOwedBreakpoints // insert breakpoints if necessary
+37: j KiAlternateExit //
+
+//
+// The exception was not resolved. Fill in the remainder of the exception
+// record and attempt to dispatch the exception.
+//
+
+40: addu a0,s8,TrExceptionRecord // compute exception record address
+ lw a3,ErExceptionCode(a0) // restore previous mode
+ li t1,STATUS_IN_PAGE_ERROR | 0x10000000 // get special code
+ beq v0,t1,60f // if eq, special bug check code
+ li t0,2 // set number of parameters
+ li t1,STATUS_ACCESS_VIOLATION // get access violation code
+ beq v0,t1,50f // if eq, access violation
+ li t1,STATUS_GUARD_PAGE_VIOLATION // get guard page violation code
+ beq v0,t1,50f // if eq, guard page violation
+ li t1,STATUS_STACK_OVERFLOW // get stack overflow code
+ beq v0,t1,50f // if eq, stack overflow
+ li t0,3 // set number of parameters
+ sw v0,ErExceptionInformation + 8(a0) // save real status value
+ li v0,STATUS_IN_PAGE_ERROR // set in page error status
+50: sw v0,ErExceptionCode(a0) // save exception code
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+
+//
+// Generate a bug check - A page fault has occured at an IRQL that is greater
+// than APC_LEVEL.
+//
+
+60: li a0,IRQL_NOT_LESS_OR_EQUAL // set bug check code
+ lw a1,TrExceptionRecord + ErExceptionInformation + 4(s8) // set bad virtual address
+ lbu a2,KiPcr + PcCurrentIrql(zero) // set current IRQL
+ lw a3,TrExceptionRecord + ErExceptionInformation(s8) // set load/store indicator
+ lw t1,TrFir(s8) // set exception PC
+ sw t1,4 * 4(sp) //
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiVirtualMemoryDispatch
+
+ SBTTL("System Service Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ EXCEPTION_HANDLER(KiSystemServiceHandler)
+
+ NESTED_ENTRY(KiSystemServiceDispatch, TrapFrameLength, zero);
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp - TrapFrameLength(sp) // save stack pointer
+ subu sp,sp,TrapFrameLength // allocate trap frame
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a system call exception code is read from
+// the cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to call the specified system service.
+//
+// N.B. The exception dispatcher jumps to the correct entry point depending
+// on whether the system service is a fast path event pair servive or
+// a normal service. The new PSR has been loaded before the respective
+// routines are entered.
+//
+// Arguments:
+//
+// v0 - Supplies the system service code.
+// t0 - Supplies the address of the current thread object.
+// t9 - Supplies the previous PSR with the EXL and mode set.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiSystemServiceException)
+
+ START_REGION(KiSystemServiceDispatchStart)
+
+ ALTERNATE_ENTRY(KiSystemServiceNormal)
+
+ srl t9,t9,PSR_PMODE // isolate previous processor mode
+ lbu t3,ThPreviousMode(t0) // get old previous mode from thread object
+ lw t4,ThTrapFrame(t0) // get current trap frame address
+ and t9,t9,0x1 // isolate previous mode
+ sb t9,ThPreviousMode(t0) // set new previous mode in thread object
+ sb t3,TrPreviousMode(s8) // save old previous mode of thread object
+ sw t4,TrTrapFrame(s8) // save current trap frame address
+
+#if DBG
+
+ lbu t7,ThKernelApcDisable(t0) // get current APC disable count
+ lbu t8,ThApcStateIndex(t0) // get current APC state index
+ sb t7,TrExceptionRecord(s8) // save APC disable count
+ sb t8,TrExceptionRecord + 1(s8) // save APC state index
+
+#endif
+
+//
+// If the specified system service number is not within range, then
+// attempt to convert the thread to a GUI thread and retry the service
+// dispatch.
+//
+// N.B. The argument registers a0-a3, the system service number in v0,
+// and the thread address in t0 must be preserved while attempting
+// to convert the thread to a GUI thread.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceRepeat)
+
+ sw s8,ThTrapFrame(t0) // save address of trap frame
+ lw t6,ThServiceTable(t0) // get service descriptor table address
+ srl t1,v0,SERVICE_TABLE_SHIFT // isolate service descriptor offset
+ and t1,t1,SERVICE_TABLE_MASK //
+ add t6,t6,t1 // compute service descriptor address
+ lw t4,SdLimit(t6) // get service number limit
+ lw t5,SdBase(t6) // get service table address
+ and t7,v0,SERVICE_NUMBER_MASK // isolate service table offset
+ sll v1,t7,2 // compute system service offset value
+ sltu t4,t7,t4 // check if invalid service number
+ addu v1,v1,t5 // compute address of service entry
+ beq zero,t4,50f // if eq, invalid service number
+ lw v1,0(v1) // get address of service routine
+
+#if DBG
+
+ lw t6,SdCount(t6) // get service count table address
+ sll t5,t7,2 // compute system service offset value
+ beq zero,t6,12f // if eq, table not defined
+ addu t6,t6,t5 // compute address of service entry
+ lw t7,0(t6) // increment system service count
+ addu t7,t7,1 //
+ sw t7,0(t6) // store result
+12: //
+
+#endif
+
+//
+// If the system service is a GUI service and the GDI user batch queue is
+// not empty, then call the appropriate service to flush the user batch.
+//
+
+ xor t2,t1,SERVICE_TABLE_TEST // check if GUI system service
+ bne zero,t2,15f // if ne, not GUI system service
+ lw t3,KiPcr + PcTeb(zero) // get current thread TEB address
+ sw v1,TrXIntV1(s8) // save service routine address
+ sw a0,TrXIntA0(s8) // save possible arguments 1 and 2
+ lw t4,TeGdiBatchCount(t3) // get number of batched GDI calls
+ sw a1,TrXIntA1(s8) //
+ sw a2,TrXIntA2(s8) // save possible third argument
+ lw t5,KeGdiFlushUserBatch // get address of flush routine
+ beq zero,t4,15f // if eq, no batched calls
+ sw a3,TrXIntA3(s8) // save possible fourth argument
+ jal t5 // flush GDI user batch
+ lw v1,TrXIntV1(s8) // restore service routine address
+ lw a0,TrXIntA0(s8) // restore possible arguments
+ lw a1,TrXIntA1(s8) //
+ lw a2,TrXIntA2(s8) //
+ lw a3,TrXIntA3(s8) //
+15: addu a0,a0,zero // make sure of sign extension
+ addu a1,a1,zero // N.B. needed for 64-bit addressing
+ and t1,v1,1 // check if any in-memory arguments
+ beq zero,t1,30f // if eq, no in-memory arguments
+
+//
+// The following code captures arguments that were passed in memory on the
+// callers stack. This is necessary to ensure that the caller does not modify
+// the arguments after they have been probed and is also necessary in kernel
+// mode because a trap frame has been allocated on the stack.
+//
+// If the previous mode is user, then the user stack is probed for readability.
+//
+// N.B. The maximum possible number of parameters are copied to avoid loop
+// and computational overhead.
+//
+
+ START_REGION(KiSystemServiceStartAddress)
+
+ subu sp,sp,TrapFrameArguments // allocate argument list space
+ lw t0,TrXIntSp(s8) // get previous stack pointer
+ beq zero,t9,20f // if eq, previous mode was kernel
+ li t1,MM_USER_PROBE_ADDRESS // get user probe address
+ sltu t2,t0,t1 // check if stack in user region
+ bne zero,t2,20f // if ne, stack in user region
+ move t0,t1 // set invalid user stack address
+20: ld t1,16(t0) // get twelve argument values from
+ ld t2,24(t0) // callers stack
+ ld t3,32(t0) //
+ ld t4,40(t0) //
+ ld t5,48(t0) //
+ ld t6,56(t0) //
+ sd t1,16(sp) // stores arguments on kernel stack
+ sd t2,24(sp) //
+ sd t3,32(sp) //
+ sd t4,40(sp) //
+ sd t5,48(sp) //
+ sd t6,56(sp) //
+
+ END_REGION(KiSystemServiceEndAddress)
+
+ subu v1,v1,1 // clear low bit of service address
+
+//
+// Call system service.
+//
+
+30: addu a2,a2,zero // make sure of sign extension
+ addu a3,a3,zero // needed for 64-bit addressing
+ jal v1 // call system service
+
+//
+// Restore old trap frame address from the current trap frame.
+//
+
+ ALTERNATE_ENTRY(KiSystemServiceExit)
+
+ lw a0,KiPcr + PcPrcb(zero) // get processor block address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t3,TrTrapFrame(s8) // get old trap frame address
+ lw t0,PbSystemCalls(a0) // increment number of system calls
+ addu t0,t0,1 //
+ sw t0,PbSystemCalls(a0) //
+ sw t3,ThTrapFrame(t2) // restore old trap frame address
+
+//
+// Restore state and exit system service.
+//
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lbu t3,TrPreviousMode(s8) // get old previous mode
+
+#if DBG
+
+ lbu a2,ThKernelApcDisable(t2) // get current APC disable count
+ lbu a3,ThApcStateIndex(t2) // get current APC state index
+ lbu t5,TrExceptionRecord(s8) // get previous APC disable count
+ lbu t6,TrExceptionRecord + 1(s8) // get previous APC state index
+ xor t7,t5,a2 // compare APC disable count
+ xor t8,t6,a3 // compare APC state index
+ or t9,t8,t7 // merge comparison value
+ bne zero,t9,60f // if ne, invalid state or count
+ nop // fill
+
+#endif
+
+ and t4,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t4,40f // if eq, previous mode was kernel
+ sb t3,ThPreviousMode(t2) // restore old previous mode
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possilbe APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+40: sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiServiceExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+//
+// The specified system service number is not within range. Attempt to
+// convert the thread to a GUI thread if specified system service is
+// not a base service and the thread has not already been converted to
+// a GUI thread.
+//
+// N.B. The argument register a0-a3, the system service number in v0,
+// and the thread address in t0 must be preserved if an attempt
+// is made to convert the thread to a GUI thread.
+//
+
+50: xor t2,t1,SERVICE_TABLE_TEST // check if GUI system service
+ sw v0,TrXIntV0(s8) // save system service number
+ bne zero,t2,55f // if ne, not GUI system service
+ sw a0,TrXIntA0(s8) // save argument register a0
+ sw a1,TrXIntA1(s8) // save argument registers a1-a3
+ sw a2,TrXIntA2(s8) //
+ sw a3,TrXIntA3(s8) //
+ jal PsConvertToGuiThread // attempt to convert to GUI thread
+ move v1,v0 // save completion status
+ move s8,sp // reset trap frame address
+ lw v0,TrXIntV0(s8) // restore system service number
+ lw a0,TrXIntA0(s8) // restore argument registers a0-a3
+ lw a1,TrXIntA1(s8) //
+ lw a2,TrXIntA2(s8) //
+ lw a3,TrXIntA3(s8) //
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ beq zero,v1,KiSystemServiceRepeat // if eq, successful conversion
+
+//
+// Return invalid system service status for invalid service code.
+//
+
+55: li v0,STATUS_INVALID_SYSTEM_SERVICE // set completion status
+ b KiSystemServiceExit //
+
+//
+// An attempt is being made to exit a system service while kernel APCs are
+// disabled, or while attached to another process and the previous mode is
+// not kernel.
+//
+// a2 - Supplies the APC disable count.
+// a3 - Supplies the APC state index.
+//
+
+#if DBG
+
+60: li a0,APC_INDEX_MISMATCH // set bug check code
+ move a1,t5 // set previous APC disable
+ sw t6,4 * 4(sp) // set previous state index
+ jal KeBugCheckEx // call bug check routine
+ j KiExceptionExit // dummy jump for filler
+
+#endif
+
+ START_REGION(KiSystemServiceDispatchEnd)
+
+ .end KiSystemServiceDispatch
+
+ SBTTL("System Service Exception Handler")
+//++
+//
+// EXCEPTION_DISPOSITION
+// KiSystemServiceHandler (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN ULONG EstablisherFrame,
+// IN OUT PCONTEXT ContextRecord,
+// IN OUT PDISPATCHER_CONTEXT DispatcherContext
+// )
+//
+// Routine Description:
+//
+// Control reaches here when a exception is raised in a system service
+// or the system service dispatcher, and for an unwind during a kernel
+// exception.
+//
+// If an unwind is being performed and the system service dispatcher is
+// the target of the unwind, then an exception occured while attempting
+// to copy the user's in-memory argument list. Control is transfered to
+// the system service exit by return a continue execution disposition
+// value.
+//
+// If an unwind is being performed and the previous mode is user, then
+// bug check is called to crash the system. It is not valid to unwind
+// out of a system service into user mode.
+//
+// If an unwind is being performed, the previous mode is kernel, the
+// system service dispatcher is not the target of the unwind, and the
+// thread does not own any mutexes, then the previous mode field from
+// the trap frame is restored to the thread object. Otherwise, bug
+// check is called to crash the system. It is invalid to unwind out of
+// a system service while owning a mutex.
+//
+// If an exception is being raised and the exception PC is within the
+// range of the system service dispatcher in-memory argument copy code,
+// then an unwind to the system service exit code is initiated.
+//
+// If an exception is being raised and the exception PC is not within
+// the range of the system service dispatcher, and the previous mode is
+// not user, then a continue searh disposition value is returned. Otherwise,
+// a system service has failed to handle an exception and bug check is
+// called. It is invalid for a system service not to handle all exceptions
+// that can be raised in the service.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// EstablisherFrame (a1) - Supplies the frame pointer of the establisher
+// of this exception handler.
+//
+// N.B. This is not actually the frame pointer of the establisher of
+// this handler. It is actually the stack pointer of the caller
+// of the system service. Therefore, the establisher frame pointer
+// is not used and the address of the trap frame is determined by
+// examining the saved s8 register in the context record.
+//
+// ContextRecord (a2) - Supplies a pointer to a context record.
+//
+// DispatcherContext (a3) - Supplies a pointer to the dispatcher context
+// record.
+//
+// Return Value:
+//
+// If bug check is called, there is no return from this routine and the
+// system is crashed. If an exception occured while attempting to copy
+// the user in-memory argument list, then there is no return from this
+// routine, and unwind is called. Otherwise, ExceptionContinueSearch is
+// returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiSystemServiceHandler)
+
+ subu sp,sp,HandlerFrameLength // allocate stack frame
+ sw ra,HdRa(sp) // save return address
+
+ PROLOGUE_END
+
+ lw t0,ErExceptionFlags(a0) // get exception flags
+ and t1,t0,EXCEPTION_UNWIND // check if unwind in progress
+ bne zero,t1,40f // if ne, unwind in progress
+
+//
+// An exception is in progress.
+//
+// If the exception PC is within the in-memory argument copy code of the
+// system service dispatcher, then call unwind to transfer control to the
+// system service exit code. Otherwise, check if the previous mode is user
+// or kernel mode.
+//
+//
+
+ lw t0,ErExceptionAddress(a0) // get address of exception
+ la t1,KiSystemServiceStartAddress // get start address of range
+ sltu t3,t0,t1 // check if before start of range
+ la t2,KiSystemServiceEndAddress // get end address of range
+ bne zero,t3,10f // if ne, before start of range
+ sltu t3,t0,t2 // check if before end of range
+ bne zero,t3,30f // if ne, before end of range
+
+//
+// If the previous mode was kernel mode, then a continue search disposition
+// value is returned. Otherwise, the exception was raised in a system service
+// and was not handled by that service. Call bug check to crash the system.
+//
+
+10: lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t1,ThPreviousMode(t0) // get previous mode from thread object
+ bne zero,t1,20f // if ne, previous mode was user
+
+//
+// Previous mode is kernel mode.
+//
+
+ li v0,ExceptionContinueSearch // set disposition code
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// Previous mode is user mode. Call bug check to crash the system.
+//
+
+20: li a0,SYSTEM_SERVICE_EXCEPTION // set bug check code
+ jal KeBugCheck // call bug check routine
+
+//
+// The exception was raised in the system service dispatcher. Unwind to the
+// the system service exit code.
+//
+
+30: lw a3,ErExceptionCode(a0) // set return value
+ move a2,zero // set exception record address
+ move a0,a1 // set target frame address
+ la a1,KiSystemServiceExit // set target PC address
+ jal RtlUnwind // unwind to system service exit
+
+//
+// An unwind is in progress.
+//
+// If a target unwind is being performed, then continue execution is returned
+// to transfer control to the system service exit code. Otherwise, restore the
+// previous mode if the previous mode is not user and there are no mutexes owned
+// by the current thread.
+//
+
+40: and t1,t0,EXCEPTION_TARGET_UNWIND // check if target unwind in progress
+ bne zero,t1,60f // if ne, target unwind in progress
+
+//
+// An unwind is being performed through the system service dispatcher. If the
+// previous mode is not kernel or the current thread owns one or more mutexes,
+// then call bug check and crash the system. Otherwise, restore the previous
+// mode in the current thread object.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,CxXIntS8(a2) // get address of trap frame
+ lbu t3,ThPreviousMode(t0) // get previous mode from thread object
+ lbu t4,TrPreviousMode(t1) // get previous mode from trap frame
+ bne zero,t3,50f // if ne, previous mode was user
+
+//
+// Restore previous from trap frame to thread object and continue the unwind
+// operation.
+//
+
+ sb t4,ThPreviousMode(t0) // restore previous mode from trap frame
+ li v0,ExceptionContinueSearch // set disposition value
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+//
+// An attempt is being made to unwind into user mode. Call bug check to crash
+// the system.
+//
+
+50: li a0,SYSTEM_UNWIND_PREVIOUS_USER // set bug check code
+ jal KeBugCheck // call bug check
+
+//
+// A target unwind is being performed. Return a continue execution disposition
+// value.
+//
+
+60: li v0,ExceptionContinueSearch // set disposition value
+ addu sp,sp,HandlerFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiSystemServiceHandler
+
+ SBTTL("Trap Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiTrapDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a trap exception code is read from the
+// cause register. When this routine is entered, interrupts are disabled.
+//
+// The function of this routine is to raise an array bounds exceeded
+// exception.
+//
+// N.B. Integer register v1 is not usuable in the first instuction of the
+// routine.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiTrapException)
+
+ addu a0,s8,TrExceptionRecord // compute exception record address
+ sw t8,ErExceptionAddress(a0) // save address of exception
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ li t1,STATUS_ARRAY_BOUNDS_EXCEEDED // set exception code
+ sw t1,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ sw zero,ErNumberParameters(a0) // set number of parameters
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiTrapDispatch
+
+ SBTTL("User Address Error Dispatch")
+//++
+//
+// Routine Description:
+//
+// The following code is never executed. Its purpose is to allow the
+// kernel debugger to walk call frames backwards through an exception,
+// to support unwinding through exceptions for system services, and to
+// support get/set user context.
+//
+//--
+
+ NESTED_ENTRY(KiUserAddressErrorDispatch, TrapFrameLength, zero)
+
+ .set noreorder
+ .set noat
+ sd sp,TrXIntSp(sp) // save stack pointer
+ sd ra,TrXIntRa(sp) // save return address
+ sw ra,TrFir(sp) // save return address
+ sd s8,TrXIntS8(sp) // save frame pointer
+ sd gp,TrXIntGp(sp) // save general pointer
+ sd s0,TrXIntS0(sp) // save integer registers s0 - s7
+ sd s1,TrXIntS1(sp) //
+ sd s2,TrXIntS2(sp) //
+ sd s3,TrXIntS3(sp) //
+ sd s4,TrXIntS4(sp) //
+ sd s5,TrXIntS5(sp) //
+ sd s6,TrXIntS6(sp) //
+ sd s7,TrXIntS7(sp) //
+ move s8,sp // set frame pointer
+ .set at
+ .set reorder
+
+ PROLOGUE_END
+
+//++
+//
+// Routine Description:
+//
+// Control reaches here when a read or write user address error exception
+// is generated from the XTB miss handler. A user address error exception
+// occurs when an invalid 64-bit user address is generated. Interrupts are
+// disabled when this routine is entered.
+//
+// The function of this routine is to raise an access violation exception.
+//
+// Arguments:
+//
+// t6 - The previous mode.
+// t7 - The cause register with the BD bit set.
+// t8 - The address of the faulting instruction.
+// t9 - The new PSR with EXL and mode clear.
+// gp - Supplies a pointer to the system short data area.
+// s8 - Supplies a pointer to the trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiUserAddressErrorException)
+
+ lw a1,KiPcr + PcBadVaddr(zero) // get the bad virtual address
+
+//
+// N.B. The following code is a work around for a chip bug where the bad
+// virtual address is not correct on an instruction stream TB miss.
+//
+// If the exception PC is equal to the bad virtual address, then the
+// bad virtual address is correct.
+//
+// If the instruction at the exception PC is not in the TB or the
+// TB entry is invalid, then the bad virtual address is incorrect
+// and the instruction is repeated.
+//
+// Otherwise, the bad virtual address is correct.
+//
+
+#if !defined(NT_UP)
+
+ move t7,t8 // get address of faulting instruction
+
+ .set noreorder
+ .set noat
+ srl t0,t7,30 // isolate high bits of exception PC
+ beq a1,t7,30f // if eq, addresses match
+ xor a2,t0,0x2 // check for kseg0 or kseg1 address
+
+//
+// If the instruction at the exception PC is not in the TB or the TB entry
+// invalid, then the bad virtual address is not valid and the instruction is
+// repeated.
+//
+
+ beq zero,a2,30f // if eq, kseg0 or kseg1 address
+ srl t1,t7,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ mfc0 v0,entryhi // get current VPN2 and PID
+ sll t1,t1,ENTRYHI_VPN2 //
+ and v1,v0,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t1,t1,v1 // merge PID with VPN2 of address
+ mtc0 t1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t2,index // read result of probe
+ nop // 1 cycle hazzard
+ bltzl t2,20f // if ltz, entry not in TB
+ mtc0 v0,entryhi // restore VPN2 and PID
+ sll t3,t7,31 - 12 // shift page bit into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t5,entrylo1 // read low part of TB entry
+ mfc0 t4,entrylo0 //
+ bltzl t3,10f // if ltz, check second PTE
+ and t5,t5,1 << ENTRYLO_V // check if second PTE valid
+ and t5,t4,1 << ENTRYLO_V // check if first PTE valid
+10: mtc0 zero,pagemask // restore page mask register
+ mtc0 v0,entryhi // restore VPN2 and PID
+ bne zero,t5,30f // if ne, PTE valid
+
+//
+// N.B. PSR and EPC may have changed because of TB miss and need to be
+// reloaded.
+//
+
+20: nop // 2 cycle hazzard
+ nop //
+ lw t0,TrPsr(s8) // get previous processor state
+ lw t1,TrFir(s8) // get continuation address
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+ j KiTrapExit // join common code
+ sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ .set at
+ .set reorder
+
+#endif
+
+30: addu a0,s8,TrExceptionRecord // compute exception record address
+
+ .set noreorder
+ .set noat
+ mtc0 t9,psr // set new PSR
+ move a3,t6 // set previous mode
+ .set at
+ .set reorder
+
+ sw zero,ErExceptionInformation(a0) // save load/store indicator
+ sw a1,ErExceptionInformation + 4(a0) // save bad virtual address
+ sw t8,ErExceptionAddress(a0) // set exception address
+
+//
+// If the address is a reference to the last 64k of user address space, then
+// treat the error as an address error. Otherwise, treat the error as an
+// access violation.
+//
+
+ li t3,STATUS_ACCESS_VIOLATION // set exception code
+ li t4,0x7fff0000 // get address mask value
+ and t5,t4,t1 // isolate high address bits
+ bne t4,t5,40f // if ne, invalid user address
+ li t3,STATUS_DATATYPE_MISALIGNMENT // set exception code
+40: sw t3,ErExceptionCode(a0) //
+ sw zero,ErExceptionFlags(a0) // set exception flags
+ sw zero,ErExceptionRecord(a0) // set associated record
+ li t0,2 // set number of exception parameters
+ sw t0,ErNumberParameters(a0) //
+ jal KiExceptionDispatch // join common code
+ j KiExceptionExit // dummy jump for filler
+
+ .end KiUserAddressErrorDispatch
+
+ SBTTL("Exception Dispatch")
+//++
+//
+// Routine Desription:
+//
+// Control is transfered to this routine to call the exception
+// dispatcher to resolve an exception.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to an exception record.
+//
+// a3 - Supplies the previous processor mode.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ NESTED_ENTRY(KiExceptionDispatch, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+ PROLOGUE_END
+
+ move a1,sp // set exception frame address
+ move a2,s8 // set trap frame address
+ li t0,TRUE // set first chance TRUE
+ sw t0,ExArgs + (4 * 4)(sp) //
+ jal KiDispatchException // call exception dispatcher
+
+ SBTTL("Exception Exit")
+//++
+//
+// Routine Desription:
+//
+// Control is transfered to this routine to exit from an exception.
+//
+// N.B. This transfer of control occurs from:
+//
+// 1. a fall through from the above code.
+// 2. an exit from the continue system service.
+// 3. an exit from the raise exception system service.
+// 4. an exit into user mode from thread startup.
+//
+// N.B. The alternate exit point is used by memory management which does
+// generate an exception frame.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+// sp - Supplies a pointer to an exception frame.
+//
+// Return Value:
+//
+// There is no return from this routine.
+//
+//--
+
+ ALTERNATE_ENTRY(KiExceptionExit)
+
+ ldc1 f20,ExFltF20(sp) // restore floating registers f20 - f31
+ ldc1 f22,ExFltF22(sp) //
+ ldc1 f24,ExFltF24(sp) //
+ ldc1 f26,ExFltF26(sp) //
+ ldc1 f28,ExFltF28(sp) //
+ ldc1 f30,ExFltF30(sp) //
+
+ ALTERNATE_ENTRY(KiAlternateExit)
+
+ lw t1,TrFsr(s8) // get previous floating status
+ li t0,1 << PSR_CU1 // set coprocessor 1 enable bit
+
+ .set noreorder
+ .set noat
+ mtc0 t0,psr // disable interrupts - 3 cycle hazzard
+ ctc1 t1,fsr // restore floating status
+ lw t0,TrPsr(s8) // get previous processor status
+ lw t1,TrFir(s8) // get continuation address
+ lw t2,KiPcr + PcCurrentThread(zero) // get current thread address
+ and t3,t0,1 << PSR_PMODE // check if previous mode was user
+ beq zero,t3,10f // if eq, previous mode was kernel
+ sw t0,KiPcr + PcSavedT7(zero) // save processor status
+
+//
+// If a user mode APC is pending, then request an APV interrupt.
+//
+
+ lbu t3,ThApcState + AsUserApcPending(t2) // get user APC pending
+ sb zero,ThAlerted(t2) // clear kernel mode alerted
+ mfc0 t4,cause // get exception cause register
+ sll t3,t3,(APC_LEVEL + CAUSE_INTPEND - 1) // shift APC pending
+ or t4,t4,t3 // merge possible APC interrupt request
+ mtc0 t4,cause // set exception cause register
+
+//
+// Save the new processor status and continuation PC in the PCR so a TB
+// is not possible, then restore the volatile register state.
+//
+
+10: sw t1,KiPcr + PcSavedEpc(zero) // save continuation address
+ ldc1 f0,TrFltF0(s8) // restore floating register f0
+ ldc1 f2,TrFltF2(s8) // restore floating registers f2 - f19
+ ldc1 f4,TrFltF4(s8) //
+ ldc1 f6,TrFltF6(s8) //
+ ldc1 f8,TrFltF8(s8) //
+ ldc1 f10,TrFltF10(s8) //
+ ldc1 f12,TrFltF12(s8) //
+ ldc1 f14,TrFltF14(s8) //
+ ldc1 f16,TrFltF16(s8) //
+ j KiTrapExit //
+ ldc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiExceptionDispatch
+
+ SBTTL("Disable Interrupts")
+//++
+//
+// BOOLEAN
+// KiDisableInterrupts (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function disables interrupts and returns whether interrupts
+// were previously enabled.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// A boolean value that determines whether interrupts were previously
+// enabled (TRUE) or disabled(FALSE).
+//
+//--
+
+ LEAF_ENTRY(KiDisableInterrupts)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,psr // get current processor status
+ li t1,~(1 << PSR_IE) // set interrupt enable mask
+ and t2,t1,t0 // clear interrupt enable
+ mtc0 t2,psr // disable interrupts
+ and v0,t0,1 << PSR_IE // iosolate current interrupt enable
+ srl v0,v0,PSR_IE //
+ .set at
+ .set reorder
+
+ j ra // return
+
+ .end KiDisableInterrupts
+
+ SBTTL("Restore Interrupts")
+//++
+//
+// VOID
+// KiRestoreInterrupts (
+// IN BOOLEAN Enable
+// )
+//
+// Routine Description:
+//
+// This function restores the interrupt enable that was returned by
+// the disable interrupts function.
+//
+// Arguments:
+//
+// Enable (a0) - Supplies the interrupt enable value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreInterrupts)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,psr // get current processor status
+ and a0,a0,0xff // isolate interrupt enable
+ sll t1,a0,PSR_IE // shift interrupt enable into position
+ or t1,t1,t0 // merge interrupt enable with PSR
+ mtc0 t1,psr // restore previous interrupt enable
+ nop //
+ .set at
+ .set reorder
+
+ j ra // return
+
+ .end KiRestoreInterrupts
+
+ SBTTL("Fill Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN BOOLEAN Invalid
+// )
+//
+// Routine Description:
+//
+// This function fills a translation buffer entry. If the entry is already
+// in the translation buffer, then the entry is overwritten. Otherwise, a
+// random entry is overwritten.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// Invalid (a2) - Supplies a boolean value that determines whether the
+// TB entry should be invalidated.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillEntryTb)
+
+ and a0,a0,~0x7 // clear low bits of PTE address
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+
+#if DBG
+
+ xor t2,t1,t0 // compare G-bits
+ and t2,t2,1 << ENTRYLO_G // isolate comparison
+ beq zero,t2,5f // if eq, G-bits match
+ break KERNEL_BREAKPOINT // break into kernel debugger
+5: //
+
+#endif
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ mtc0 a1,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ bltz t3,20f // if ltz, entry is not in TB
+ nop // fill
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry within range
+ beq zero,t4,10f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+10: tlbwi // overwrite indexed entry
+ nop // 3 cycle hazzard
+ nop //
+ b 30f //
+ nop //
+
+20: tlbwr // overwrite random TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillEntryTb
+
+ SBTTL("Fill Large Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillLargeEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN ULONG PageSize
+// )
+//
+// Routine Description:
+//
+// This function fills a large translation buffer entry.
+//
+// N.B. It is assumed that the large entry is not in the TB and therefore
+// the TB is not probed.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// PageSize (a2) - Supplies the size of the large page table entry.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillLargeEntryTb)
+
+ and a0,a0,~0x7 // clear low bits of PTE address
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+ subu a2,a2,1 // compute the page mask value
+ srl a2,a2,PAGE_SHIFT //
+ sll a2,a2,PAGE_SHIFT + 1 //
+ nor a3,a2,zero // compute virtual address mask
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and a1,a3,a1 // isolate large entry virtual address
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ li a3,LARGE_ENTRY // set large entry index
+ mtc0 a1,entryhi // set entry high value for large entry
+ mtc0 a2,pagemask // set page mask value
+ mtc0 a3,index //
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ nop // 1 cycle hazzard
+ tlbwi // overwrite large TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mtc0 zero,pagemask // clear page mask value
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillLargeEntryTb
+
+ SBTTL("Fill Fixed Translation Buffer Entry")
+//++
+//
+// VOID
+// KeFillFixedEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN ULONG Index
+// )
+//
+// Routine Description:
+//
+// This function fills a fixed translation buffer entry.
+//
+// Arguments:
+//
+// Pte (a0) - Supplies a pointer to the page table entries that are to be
+// written into the TB.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// Index (a2) - Supplies the index where the TB entry is to be written.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFillFixedEntryTb)
+
+ lw t0,0(a0) // get first PTE value
+ lw t1,4(a0) // get second PTE value
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t3,entryhi // get current PID and VPN2
+ srl a1,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll a1,a1,ENTRYHI_VPN2 //
+ and t3,t3,PID_MASK << ENTRYHI_PID // isolate current PID
+ or a1,t3,a1 // merge PID with VPN2 of virtual address
+ mtc0 a1,entryhi // set VPN2 and PID for probe
+ mtc0 t0,entrylo0 // set first PTE value
+ mtc0 t1,entrylo1 // set second PTE value
+ mtc0 a2,index // set TB entry index
+ nop // 1 cycle hazzard
+ tlbwi // overwrite indexed TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeFillFixedEntryTb
+
+ SBTTL("Flush Entire Translation Buffer")
+//++
+//
+// VOID
+// KeFlushCurrentTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function flushes the random part of the translation buffer.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeFlushCurrentTb)
+
+ j KiFlushRandomTb // execute common code
+
+ .end KeFlushCurrentTb
+
+ SBTTL("Flush Fixed Translation Buffer Entries")
+//++
+//
+// VOID
+// KiFlushFixedTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called to flush all the fixed entries from the
+// translation buffer.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushFixedTb)
+
+ .set noreorder
+ .set noat
+ move t0,zero // set base index of fixed TB entries
+ j KiFlushTb //
+ mfc0 t3,wired // set highest index number + 1
+ .set at
+ .set reorder
+
+ .end KiFlushFixedTb
+
+ SBTTL("Flush Random Translation Buffer Entries")
+//++
+//
+// VOID
+// KiFlushRandomTb (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called to flush all the random entries from the TB.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushRandomTb)
+
+ .set noreorder
+ .set noat
+ mfc0 t0,wired // set base index of random TB entries
+ lw t3,KeNumberTbEntries // set number of entries
+ .set at
+ .set reorder
+
+ ALTERNATE_ENTRY(KiFlushTb)
+
+ li t4,KSEG0_BASE // set high part of TB entry
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ sll t0,t0,INDEX_INDEX // shift starting index into position
+ sll t3,t3,INDEX_INDEX // shift ending index into position
+ and t1,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ li t4,KSEG0_BASE // set invalidate address
+ or t4,t4,t1 // merge PID with VPN2 of virtual address
+ mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ mtc0 t4,entryhi //
+ mtc0 t0,index // set TB entry index
+10: addu t0,t0,1 << INDEX_INDEX //
+ tlbwi // write TB entry
+ bne t0,t3,10b // if ne, more entries to flush
+ mtc0 t0,index // set TB entry index
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushRandomTb
+
+ SBTTL("Flush Multiple TB Entry")
+//++
+//
+// VOID
+// KiFlushMultipleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID *Virtual,
+// IN ULONG Count
+// )
+//
+// Routine Description:
+//
+// This function flushes a multiples entries from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies a pointer to an array of virtual addresses of
+// the entries that are flushed from the translation buffer.
+//
+// Count (a2) - Supplies the number of TB entries to flush.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushMultipleTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ nop //
+ and a3,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+10: lw v0,0(a1) // get virtual address
+ addu a1,a1,4 // advance to next entry
+ subu a2,a2,1 // reduce number of entries
+ srl t2,v0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ or t2,t2,a3 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe TB for entry
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ nop //
+ bltz t3,30f // if ltz, entry is not in TB
+ lui t2,KSEG0_BASE >> 16 // set invalidate address
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry region
+ beq zero,t4,20f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+20: mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ or t2,t2,a3 // merge PID with VPN2 of invalid address
+ mtc0 t2,entryhi // set VPN2 and PID for TB write
+ nop // 1 cycle hazzard
+ tlbwi // overwrite index TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+30: bgtz a2,10b // if gtz, more entires to flush
+ mtc0 zero,pagemask // restore page mask register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushMultipleTb
+
+ SBTTL("Flush Single TB Entry")
+//++
+//
+// VOID
+// KiFlushSingleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID Virtual
+// )
+//
+// Routine Description:
+//
+// This function flushes a single entry from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (a0) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (a1) - Supplies the virtual address of the entry that is to
+// be flushed from the translation buffer.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiFlushSingleTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ srl t2,a1,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ and a2,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t2,t2,a2 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe TB for entry
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ nop //
+ bltz t3,20f // if ltz, entry is not in TB
+ lui t2,KSEG0_BASE >> 16 // set invalid address
+
+#if DBG
+
+ sltu t4,t3,FIXED_ENTRIES // check if fixed entry region
+ beq zero,t4,10f // if eq, index not in fixed region
+ nop //
+ break KERNEL_BREAKPOINT // break into debugger
+
+#endif
+
+10: mtc0 zero,entrylo0 // set low part of TB entry
+ mtc0 zero,entrylo1 //
+ or t2,t2,a2 // merge PID with VPN2 of invalid address
+ mtc0 t2,entryhi // set VPN2 and PID for TB write
+ nop // 1 cycle hazzard
+ tlbwi // overwrite index TB entry
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mtc0 zero,pagemask // restore page mask register
+ .set at
+ .set reorder
+
+20: ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiFlushSingleTb
+
+ SBTTL("Probe Tb Entry")
+//++
+//
+// ULONG
+// KiProbeEntryTb (
+// IN PVOID VirtualAddress
+// )
+//
+// Routine Description:
+//
+// This function is called to determine if a specified entry is valid
+/// and within the fixed portion of the TB.
+//
+// Arguments:
+//
+// VirtualAddress - Supplies the virtual address to probe.
+//
+// Return Value:
+//
+// A value of TRUE is returned if the specified entry is valid and within
+// the fixed part of the TB. Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KiProbeEntryTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,entryhi // get current PID and VPN2
+ srl t2,a0,ENTRYHI_VPN2 // isolate VPN2 of virtual address
+ sll t2,t2,ENTRYHI_VPN2 //
+ and t1,t1,PID_MASK << ENTRYHI_PID // isolate current PID
+ or t2,t2,t1 // merge PID with VPN2 of virtual address
+ mtc0 t2,entryhi // set VPN2 and PID for probe
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ tlbp // probe for entry in TB
+ nop // 2 cycle hazzard
+ nop //
+ mfc0 t3,index // read result of probe
+ li v0,FALSE // set to return failure
+ bltz t3,20f // if ltz, entry is not in TB
+ sll a0,a0,0x1f - (ENTRYHI_VPN2 - 1) // shift VPN<12> into sign
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ bltz a0,10f // if ltz, check second PTE
+ mfc0 t2,entrylo1 // get second PTE for probe
+ mfc0 t2,entrylo0 // get first PTE for probe
+10: mtc0 t1,entryhi // restore current PID
+ mtc0 zero,pagemask // restore page mask register
+ sll t2,t2,0x1f - ENTRYLO_V // shift valid bit into sign position
+ bgez t2,20f // if geq, entry is not valid
+ srl t3,INDEX_INDEX // isolate TB index
+ and t3,t3,0x3f //
+ mfc0 t4,wired // get number of wired entries
+ nop // fill
+ sltu v0,t3,t4 // check if entry in fixed part of TB
+ .set at
+ .set reorder
+
+20: ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ j ra // return
+
+ .end KiProbeEntryTb
+
+ SBTTL("Read Tb Entry")
+//++
+//
+// VOID
+// KiReadEntryTb (
+// IN ULONG Index,
+// OUT PTB_ENTRY TbEntry
+// )
+//
+// Routine Description:
+//
+// This function is called to read an entry from the TB.
+//
+// Arguments:
+//
+// Index - Supplies the index of the entry to read.
+//
+// TbEntry - Supplies a pointer to a TB entry structure that receives the
+// contents of the specified TB entry.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiReadEntryTb)
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ sll a0,INDEX_INDEX // shift index into position
+ mfc0 t1,entryhi // save entry high register
+ mtc0 a0,index // set TB entry index
+ nop //
+ tlbr // read entry from TB
+ nop // 3 cycle hazzard
+ nop //
+ nop //
+ mfc0 t2,entrylo0 // save first PTE value
+ mfc0 t3,entrylo1 // save second PTE value
+ mfc0 t4,entryhi // save entry high value
+ mfc0 t5,pagemask // save page mask value
+ mtc0 t1,entryhi // restore entry high register
+ mtc0 zero,pagemask // restore page mask register
+ nop // 1 cycle hazzard
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+ sw t2,TbEntrylo0(a1) // set first PTE value
+ sw t3,TbEntrylo1(a1) // set second PTE value
+ sw t4,TbEntryhi(a1) // set entry high value
+ sw t5,TbPagemask(a1) // set page mask value
+ j ra // return
+
+ .end KiReadEntryTb
+
+ SBTTL("Passive Release")
+//++
+//
+// VOID
+// KiPassiveRelease (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function is called when an interrupt has been passively released.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiPassiveRelease)
+
+ j ra // return
+
+ .end KiPassiveRelease
diff --git a/private/ntos/ke/mips/xxapcint.s b/private/ntos/ke/mips/xxapcint.s
new file mode 100644
index 000000000..0d7c5c8c8
--- /dev/null
+++ b/private/ntos/ke/mips/xxapcint.s
@@ -0,0 +1,123 @@
+// TITLE("Asynchronous Procedure Call (APC) Interrupt")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxapcint.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// Asynchronous Procedure Call (APC) interrupt.
+//
+// Author:
+//
+// David N. Cutler (davec) 3-Apr-1990
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Asynchronous Procedure Call Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at APC_LEVEL. Its function is to allocate an exception frame and call
+// the kernel APC delivery routine to deliver kernel mode APCs and to check
+// if a user mode APC should be delivered. If a user mode APC should be
+// delivered, then the kernel APC delivery routine constructs a context
+// frame on the user stack and alters the exception and trap frames so that
+// control will be transfered to the user APC dispatcher on return from the
+// interrupt.
+//
+// N.B. On entry to this routine all integer registers and the volatile
+// floating registers have been saved. The remainder of the machine
+// state is saved if and only if the previous mode was user mode.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY(KiApcInterrupt, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Determine the previous mode.
+//
+
+ lw t0,TrPsr(s8) // get saved processor status
+ srl t0,t0,PSR_KSU + 1 // isolate previous mode
+ and a0,t0,0x1 //
+ beq zero,a0,20f // if eq, kernel mode
+
+//
+// The previous mode was user.
+//
+// Save the nonvolatile floating state so a context record can be
+// properly constructed to deliver an APC to user mode if required.
+// It is also necessary to save the volatile floating state for
+// suspend/resume operations.
+//
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Clear APC interrupt.
+//
+
+20: DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ li t2,~APC_INTERRUPT // clear APC interrupt pending
+ and t1,t1,t2 //
+ mtc0 t1,cause //
+ nop //
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+//
+// Attempt to deliver an APC.
+//
+
+ move a1,sp // set address of exception frame
+ move a2,s8 // set address of trap frame
+ jal KiDeliverApc // call APC delivery routine
+
+//
+// Deallocate stack frame and return.
+//
+
+ lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate exception frame
+ j ra // return
+
+ .end KiApcInterrupt
diff --git a/private/ntos/ke/mips/xxclock.s b/private/ntos/ke/mips/xxclock.s
new file mode 100644
index 000000000..cdd8a7818
--- /dev/null
+++ b/private/ntos/ke/mips/xxclock.s
@@ -0,0 +1,592 @@
+// TITLE("Interval and Profile Clock Interrupts")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxclock.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// interval and profile clock interrupts.
+//
+// Author:
+//
+// David N. Cutler (davec) 27-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeMaximumIncrement 4
+ .extern KeTickCount 3 * 4
+ .extern KeTimeAdjustment 4
+ .extern KiAdjustDpcThreshold 4
+ .extern KiIdealDpcRate 4
+ .extern KiMaximumDpcQueueDepth 4
+ .extern KiProfileListHead 2 * 4
+ .extern KiProfileLock 4
+ .extern KiTickOffset 4
+
+ SBTTL("Update System Time")
+//++
+//
+// VOID
+// KeUpdateSystemTime (
+// IN PKTRAP_FRAME TrapFrame,
+// IN ULONG TimeIncrement
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the system time and check to
+// determine if a timer has expired.
+//
+// N.B. This routine is executed on a single processor in a multiprocess
+// system. The remainder of the processors only execute the quantum end
+// and runtime update code.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// TimeIncrement (a1) - Supplies the time increment in 100ns units.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateSystemTime)
+
+//
+// Update the interrupt time.
+//
+
+ ld t8,KiPcr2 + Pc2InterruptTime // get interrupt time
+ daddu t8,t8,a1 // add time increment value
+ sd t8,KiPcr2 + Pc2InterruptTime // store interrupt time
+
+//
+// Update tick offset and check for "system clock" tick.
+//
+
+ lw a2,KiTickOffset // get tick offset value
+ sub a2,a2,a1 // subtract time increment
+ ld v0,KeTickCount // get low and high 1 tick count
+ la t0,KiTimerTableListHead // get base address of timer table
+ sw a2,KiTickOffset // store tick offset value
+ bgtz a2,10f // if gtz, tick not completed
+ lw a3,KeMaximumIncrement // get maximum increment value
+
+//
+// Update system time.
+//
+
+ lw t1,KeTimeAdjustment // get time adjustment value
+ ld t2,KiPcr2 + Pc2SystemTime // get low and high 1 system time
+ daddu t2,t2,t1 // add time increment value
+ sd t2,KiPcr2 + Pc2SystemTime // store low nad high 1 system time
+
+//
+// Update the tick count.
+//
+// N.B. The tick count is updated in a very strict manner so that an
+// interlock does not have to be used in an MP system. This is
+// required for backward compatibility with old drivers and file
+// systems.
+//
+
+ daddu t2,v0,1 // increment tick count
+ dsrl t3,t2,32 // get high half of tick count
+ sw t2,KiPcr2 + Pc2TickCountLow(zero) // store low tick count
+
+ .set noreorder
+ .set noat
+ sw t3,KeTickCount + 8 // store high 2 tick count
+ sd t2,KeTickCount // store low and high 1 tick count
+ .set at
+ .set reorder
+
+//
+// Compute next tick offset value.
+//
+
+ addu a3,a3,a2 // add maximum increment to residue
+ sw a3,KiTickOffset // store tick offset value
+
+//
+// Check to determine if a timer has expired at the current hand value.
+//
+
+ and t1,v0,TIMER_TABLE_SIZE - 1 // reduce to table table index
+ sll t2,t1,3 // compute timer table listhead address
+ addu t2,t2,t0 //
+ lw t3,LsFlink(t2) // get address of first timer in list
+ beq t2,t3,5f // if eq, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+
+ ld t4,TiDueTime - TiTimerListEntry(t3) // get timer due time
+ sltu t9,t8,t4 // check if timer is due
+ beq zero,t9,20f // if eq, timer has expired
+
+//
+// Check to determine if a timer has expired at the next hand value.
+//
+
+5: addu v0,v0,1 // advance hand value to next entry
+10: and t1,v0,TIMER_TABLE_SIZE - 1 // reduce to table table index
+ sll t2,t1,3 // compute timer table listhead address
+ addu t2,t2,t0 //
+ lw t3,LsFlink(t2) // get address of first timer in list
+ beq t2,t3,40f // if eq, no timer active
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+
+ ld t4,TiDueTime - TiTimerListEntry(t3) // get timer due time
+ sltu t9,t8,t4 // check if timer is due
+ bne zero,t9,40f // if ne, timer has not expired
+
+//
+// Put timer expiration DPC in the system DPC list and initiate a dispatch
+// interrupt on the current processor.
+//
+
+20: la t0,KiTimerExpireDpc // get expiration DPC address
+ lw a1,KiPcr + PcPrcb(zero) // get address of PRCB
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ addu t3,a1,PbDpcListHead // compute DPC listhead address
+ addu v1,a1,PbDpcLock // compute DPC lock address
+
+#if !defined(NT_UP)
+
+30: ll t4,0(v1) // get current lock value
+ move t5,t3 // set lock ownership value
+ bne zero,t4,30b // if ne, spin lock owned
+ sc t5,0(v1) // set spin lock owned
+ beq zero,t5,30b // if eq, store conditional failed
+
+#endif
+
+ lw t4,DpLock(t0) // get DPC inserted state
+ bne zero,t4,35f // if ne, DPC entry already inserted
+ lw t4,LsBlink(t3) // get address of last entry in list
+ sw v1,DpLock(t0) // set DPC inserted state
+ sw v0,DpSystemArgument1(t0) // set timer table hand value
+ addu t0,t0,DpDpcListEntry // compute address of DPC list entry
+ sw t0,LsBlink(t3) // set address of new last entry
+ sw t0,LsFlink(t4) // set next link in old last entry
+ sw t3,LsFlink(t0) // set address of next entry
+ sw t4,LsBlink(t0) // set address of previous entry
+ lw t5,PbDpcQueueDepth(a1) // increment DPC queue depth
+ addu t5,t5,1 //
+ sw t5,PbDpcQueueDepth(a1) //
+
+ .set noreorder
+ .set noat
+ mfc0 t3,cause // get exception cause register
+ or t3,t3,DISPATCH_INTERRUPT // merge dispatch interrut request
+ mtc0 t3,cause // set exception cause register
+ .set at
+ .set reorder
+
+35: //
+
+#if !defined(NT_UP)
+
+ sw zero,0(v1) // set spin lock not owned
+
+#endif
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+40: blez a2,50f // if lez, full tick
+ j ra // return
+50: j KeUpdateRunTime
+
+ .end KeUpdateSystemTime
+
+ SBTTL("Update Thread and Process Runtime")
+//++
+//
+// VOID
+// KeUpdateRunTime (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the runtime of the current
+// thread, update the runtime of the current thread's process, and decrement
+// the current thread's quantum.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateRunTime)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t2,ThApcState + AsProcess(t0) // get address of current process
+ lw t3,TrPsr(a0) // get saved processor status
+ lw t5,KiPcr + PcPrcb(zero) // get current processor block address
+ lw t7,PbDpcRoutineActive(t5) // get DPC active flag
+ and t4,t3,0x1 << PSR_PMODE // isolate previous processor mode
+ bne zero,t4,30f // if ne, previous mode was user
+
+//
+// If a DPC is active, then increment the time spent executing DPC routines.
+// Otherwise, if the old IRQL is greater than DPC level, then increment the
+// time spent executing interrupt services routines. Otherwise, increment
+// the time spent in kernel mode for the current thread.
+//
+
+ lbu t6,TrOldIrql(a0) // get previous IRQL
+ subu t6,t6,DISPATCH_LEVEL // compare IRQL with DPC level
+ bltz t6,20f // if ltz, increment thread kernel time
+ addu t8,t5,PbInterruptTime // compute interrupt time address
+ bgtz t6,10f // if gtz, increment interrupt time
+ addu t8,t5,PbDpcTime // compute DPC time address
+ beq zero,t7,20f // if eq, increment thread kernel time
+
+//
+// Update the time spend in DPC/interrupt processing.
+//
+
+10: lw t6,0(t8) // get processor time
+ addu t6,t6,1 // increment processor time
+ sw t6,0(t8) // store processor time
+ addu t9,t5,PbKernelTime // compute processor kernel time address
+ b 50f //
+
+//
+// Update the time spent in kernel mode for the current thread.
+//
+
+20: lw t6,ThKernelTime(t0) // get kernel time
+ addu t6,t6,1 // increment kernel time
+ sw t6,ThKernelTime(t0) // store kernel time
+ addu t2,t2,PrKernelTime // compute process kernel time address
+ addu t9,t5,PbKernelTime // compute processor kernel time address
+ b 40f //
+
+//
+// Update the time spent in user mode for the current thread.
+//
+
+30: lw t6,ThUserTime(t0) // get user time
+ addu t6,t6,1 // increment user time
+ sw t6,ThUserTime(t0) // store user time
+ addu t2,t2,PrUserTime // compute process user time address
+ addu t9,t5,PbUserTime // compute processor user time address
+
+//
+// Update the time spent in kernel/user mode for the current thread's process.
+//
+// N.B. The update of the process time must be synchronized across processors.
+//
+
+40: ll t6,0(t2) // get process time
+ addu t6,t6,1 // increment process time
+ sc t6,0(t2) // store process time
+ beq zero,t6,40b // if eq, store conditional failed
+
+//
+// Update the time spent in kernel/user mode for the current processor.
+//
+
+50: lw t6,0(t9) // get processor time
+ addu t6,t6,1 // increment processor time
+ sw t6,0(t9) // store processor time
+
+//
+// Update the DPC request rate which is computed as the average between
+// the previous rate and the current rate.
+//
+
+ lw a0,PbDpcCount(t5) // get current DPC count
+ lw a1,PbDpcLastCount(t5) // get last DPC count
+ lw a2,PbDpcRequestRate(t5) // get last DPC request rate
+ lw a3,PbDpcQueueDepth(t5) // get current DPC queue depth
+ sw a0,PbDpcLastCount(t5) // set last DPC count
+ subu a0,a0,a1 // compute count during interval
+ addu a0,a0,a2 // compute sum of current and last
+ srl a0,a0,1 // average current and last
+ sw a0,PbDpcRequestRate(t5) // set new DPC request rate
+
+//
+// If the current DPC queue depth is not zero, a DPC routine is not active,
+// and a DPC interrupt has not been requested, then request a dispatch
+// interrupt, decrement the maximum DPC queue depth, and reset the threshold
+// counter if appropriate.
+//
+
+ lw v0,PbDpcInterruptRequested(t5) // get DPC interrupt requested
+ beq zero,a3,60f // if eq, DPC queue is empty
+ or v0,v0,t7 // merge DPC interrupt requested and active
+ bne zero,v0,60f // if ne, DPC active or interrupt requested
+
+ DISABLE_INTERRUPTS(a1) // disable interrupt
+
+ .set noreorder
+ .set noat
+ mfc0 a2,cause // get exception cause register
+ lw v0,PbMaximumDpcQueueDepth(t5) // get maximum queue depth
+ lw v1,KiIdealDpcRate // get ideal DPC rate
+ or a2,a2,DISPATCH_INTERRUPT // merge dispatch interrut request
+ mtc0 a2,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(a1) // enable interrupts
+
+ sltu a0,a0,v1 // test if current rate less than ideal
+ lw a1,KiAdjustDpcThreshold // reset initial threshold counter
+ sw a1,PbAdjustDpcThreshold(t5) //
+ beq zero,a0,KiDecrementQuantum // if eq, rate greater or equal ideal
+ subu v0,v0,1 // decrement maximum DPC queue depth
+ beq zero,v0,KiDecrementQuantum // if eq, current value is one
+ sw v0,PbMaximumDpcQueueDepth(t5) // set new maximum DPC queue depth
+ b KiDecrementQuantum //
+
+//
+// The DPC queue is empty or a DPC routine is active or a DPC interrupt
+// has been requested. Count down the adjustment threshold and if the
+// count reaches zero, then increment the maximum DPC queue depth, but
+// no above the initial value and reset the adjustment threshold value.
+//
+
+60: lw a0,PbAdjustDpcThreshold(t5) // get adjustment threshold counter
+ lw a1,PbMaximumDpcQueueDepth(t5) // get current maximum queue depth
+ lw a2,KiMaximumDpcQueueDepth // get initial maximum queue depth
+ subu a0,a0,1 // decrement adjustment threshold counter
+ sw a0,PbAdjustDpcThreshold(t5) //
+ bne zero,a0,KiDecrementQuantum // if ne, adjustment counter not zero
+ lw a0,KiAdjustDpcThreshold //set new DPC threshold counter
+ sw a0,PbAdjustDpcThreshold(t5) //
+ beq a1,a2,KiDecrementQuantum // if eq, currently at maximum depth
+ addu a1,a1,1 // increment current maximum queue depth
+ sw a1,PbMaximumDpcQueueDepth(t5) // set new maximum DPC queue depth
+
+//
+// Decrement current thread quantum and check to determine if a quantum end
+// has occurred.
+//
+
+ ALTERNATE_ENTRY(KiDecrementQuantum)
+
+ lb t6,ThQuantum(t0) // get current thread quantum
+ sub t6,t6,CLOCK_QUANTUM_DECREMENT // decrement current quantum
+ sb t6,ThQuantum(t0) // store thread quantum
+ bgtz t6,60f // if gtz, quantum remaining
+
+//
+// Set quantum end flag and initiate a dispatch interrupt on the current
+// processor.
+//
+
+ lw t1,PbIdleThread(t5) // get address of idle
+ beq t0,t1,60f // if eq, idle thread
+ sw sp,KiPcr + PcQuantumEnd(zero) // set quantum end indicator
+
+ DISABLE_INTERRUPTS(t0) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t1,cause // get exception cause register
+ or t1,t1,DISPATCH_INTERRUPT // merge dispatch interrupt request
+ mtc0 t1,cause // set exception cause register
+ nop // 1 cycle hazzard
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t0) // enable interrupts
+
+60: j ra // return
+
+ .end KeUpdateRunTime
+
+
+ SBTTL("Process Profile Interrupt")
+//++
+//
+// VOID
+// KeProfileInterruptWithSource (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE ProfileSource
+// )
+//
+// VOID
+// KeProfileInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// profile timer. Its function is to update the profile information for
+// the currently active profile objects.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// ProfileSource (a1) - Supplies the source of the profile interrupt
+// KeProfileInterrupt is an alternate entry for backwards
+// compatibility that sets the source to zero (ProfileTime)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space 4 * 4 // argument save area
+ .space 3 * 4 //
+PfRa: .space 4 // return address
+ProfileFrameLength: // profile frame length
+
+ NESTED_ENTRY(KeProfileInterrupt, ProfileFrameLength, zero)
+
+ move a1, zero // set profile source to ProfileTime
+
+ ALTERNATE_ENTRY(KeProfileInterruptWithSource)
+
+ subu sp,sp,ProfileFrameLength // allocate stack frame
+ sw ra,PfRa(sp) // save return address
+
+ PROLOGUE_END
+
+#if !defined(NT_UP)
+
+10: ll t0,KiProfileLock // get current lock value
+ move t1,s0 // set ownership value
+ bne zero,t0,10b // if ne, spin lock owned
+ sc t1,KiProfileLock // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+ lw a2,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw a2,ThApcState + AsProcess(a2) // get address of current process
+ addu a2,a2,PrProfileListHead // compute profile listhead address
+ jal KiProcessProfileList // process the process profile list
+ la a2,KiProfileListHead // get profile listhead address
+ jal KiProcessProfileList // process the system profile list
+
+#if !defined(NT_UP)
+
+ sw zero,KiProfileLock // set spin lock not owned
+
+#endif
+
+ lw ra,PfRa(sp) // restore return address
+ addu sp,sp,ProfileFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeProfileInterrupt
+
+ SBTTL("Process Profile List")
+//++
+//
+// VOID
+// KiProcessProfileList (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE Source,
+// IN PLIST_ENTRY ListHead
+// )
+//
+// Routine Description:
+//
+// This routine is called to process a profile list.
+//
+// Arguments:
+//
+// TrapFrame (a0) - Supplies a pointer to a trap frame.
+//
+// Source (a1) - Supplies profile source to match
+//
+// ListHead (a2) - Supplies a pointer to a profile list.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiProcessProfileList)
+
+ lw t8,LsFlink(a2) // get address of next entry
+ li a3,0xfffffffc // set bucket mask value
+ beq a2,t8,30f // if eq, end of list
+ lw t0,TrFir(a0) // get interrupt PC address
+ lw t6,KiPcr + PcSetMember(zero) // get current processor member
+
+//
+// Scan profile list and increment profile buckets as appropriate.
+//
+
+10: lw t1,PfRangeBase - PfProfileListEntry(t8) // get base of range
+ lw t2,PfRangeLimit - PfProfileListEntry(t8) // get limit of range
+ lhu t3,PfSource - PfProfileListEntry(t8) // get source
+ lhu t4,PfAffinity - PfProfileListEntry(t8) // get affinity
+ bne t3,a1,20f // if ne, source mismatch
+ sltu v0,t0,t1 // check against range base
+ sltu v1,t0,t2 // check against range limit
+ and t5,t6,t4 // check against processor
+ bne zero,v0,20f // if ne, less that range base
+ beq zero,v1,20f // if eq, not less that range limit
+ beq zero,t5,20f // if eq, affinity mismatch
+ subu t1,t0,t1 // compute offset in range
+ lw t2,PfBucketShift - PfProfileListEntry(t8) // get shift count
+ lw v0,PfBuffer - PfProfileListEntry(t8) // get profile buffer address
+ srl v1,t1,t2 // compute bucket offset
+ and v1,v1,a3 // clear low order offset bits
+ addu v1,v1,v0 // compute bucket address
+ lw v0,0(v1) // increment profile bucket
+ addu v0,v0,1 //
+ sw v0,0(v1) //
+20: lw t8,LsFlink(t8) // get address of next entry
+ bne a2,t8,10b // if ne, more entries in profile list
+30: j ra // return
+
+ .end KiProcessProfileList
diff --git a/private/ntos/ke/mips/xxflshtb.c b/private/ntos/ke/mips/xxflshtb.c
new file mode 100644
index 000000000..77b9a44be
--- /dev/null
+++ b/private/ntos/ke/mips/xxflshtb.c
@@ -0,0 +1,593 @@
+/*++
+
+Copyright (c) 1992-1994 Microsoft Corporation
+
+Module Name:
+
+ xxflshtb.c
+
+Abstract:
+
+ This module implements machine dependent functions to flush the
+ translation buffer.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ );
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ );
+
+VOID
+KeFlushEntireTb (
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the entire translation buffer (TB) on all
+ processors that are currently running threads which are children
+ of the current process or flushes the entire translation buffer
+ on all processors in the host configuration.
+
+Arguments:
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors, disable context switching,
+ // and send the flush entire parameters to the target processors,
+ // if any, for execution.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushEntireTbTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush TB on current processor.
+ //
+
+ KeFlushCurrentTb();
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushEntireTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing the entire TB.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the entire TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KeFlushCurrentTb();
+ return;
+}
+
+#endif
+
+
+VOID
+KeFlushMultipleTb (
+ IN ULONG Number,
+ IN PVOID *Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE *PtePointer OPTIONAL,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes multiple entries from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a multiple entries from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies an optional pointer to an array of pointers to
+ page table entries that receive the specified page table entry
+ value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ ULONG Index;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+ ASSERT(Number <= FLUSH_MULTIPLE_MAXIMUM);
+
+ //
+ // Compute the target set of processors.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+
+#endif
+
+ //
+ // If a page table entry address array is specified, then set the
+ // specified page table entries to the specific value.
+ //
+
+ if (ARGUMENT_PRESENT(PtePointer)) {
+ for (Index = 0; Index < Number; Index += 1) {
+ *PtePointer[Index] = PteValue;
+ }
+ }
+
+ //
+ // If any target processors are specified, then send a flush multiple
+ // packet to the target set of processor.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushMultipleTbTarget,
+ (PVOID)Number,
+ (PVOID)Virtual,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified entries from the TB on the current processor.
+ //
+
+ KiFlushMultipleTb(Invalid, &Virtual[0], Number);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushMultipleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Number,
+ IN PVOID Virtual,
+ IN PVOID Pid
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing multiple TB entries.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Pid - Supplies the PID of the TB entries to flush.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ PVOID Array[FLUSH_MULTIPLE_MAXIMUM];
+
+ ASSERT((ULONG)Number <= FLUSH_MULTIPLE_MAXIMUM);
+
+ //
+ // Capture the virtual addresses that are to be flushed from the TB
+ // on the current processor and clear the packet address.
+ //
+
+ for (Index = 0; Index < (ULONG)Number; Index += 1) {
+ Array[Index] = ((PVOID *)(Virtual))[Index];
+ }
+
+ KiIpiSignalPacketDone(SignalDone);
+
+ //
+ // Flush the specified virtual addresses from the TB on the current
+ // processor.
+ //
+
+ KiFlushMultipleTb(TRUE, &Array[0], (ULONG)Number);
+ return;
+}
+
+#endif
+
+
+HARDWARE_PTE
+KeFlushSingleTb (
+ IN PVOID Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE PtePointer,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a single entry from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a single entry from
+ the translation buffer on all processors in the host configuration.
+
+Arguments:
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies a pointer to the page table entry which
+ receives the specified value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ HARDWARE_PTE OldPte;
+ PKPROCESS Process;
+ KAFFINITY TargetProcessors;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Compute the target set of processors.
+ //
+
+#if defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#else
+
+ if (AllProcessors != FALSE) {
+ OldIrql = KeRaiseIrqlToSynchLevel();
+ TargetProcessors = KeActiveProcessors;
+
+ } else {
+ Thread = KeGetCurrentThread();
+ Process = Thread->ApcState.Process;
+ KiLockContextSwap(&OldIrql);
+ TargetProcessors = Process->ActiveProcessors;
+ }
+
+ TargetProcessors &= PCR->NotMember;
+
+#endif
+
+ //
+ // Capture the previous contents of the page table entry and set the
+ // page table entry to the new value.
+ //
+
+ OldPte = *PtePointer;
+ *PtePointer = PteValue;
+
+ //
+ // If any target processors are specified, then send a flush single
+ // packet to the target set of processors.
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushSingleTbTarget,
+ (PVOID)Virtual,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified entry from the TB on the current processor.
+ //
+
+ KiFlushSingleTb(Invalid, Virtual);
+
+ //
+ // Wait until all target processors have finished.
+ //
+
+#if defined(NT_UP)
+
+ KeLowerIrql(OldIrql);
+
+#else
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ if (AllProcessors != FALSE) {
+ KeLowerIrql(OldIrql);
+
+ } else {
+ KiUnlockContextSwap(OldIrql);
+ }
+
+#endif
+
+ return OldPte;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiFlushSingleTbTarget (
+ IN PULONG SignalDone,
+ IN PVOID Virtual,
+ IN PVOID Pid,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing a single TB entry.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ RequestPacket - Supplies a pointer to a flush single TB packet address.
+
+ Pid - Not used.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush a single entry form the TB on the current processor.
+ //
+
+ KiIpiSignalPacketDone(SignalDone);
+ KiFlushSingleTb(TRUE, Virtual);
+ return;
+}
+
+#endif
diff --git a/private/ntos/ke/mips/xxintsup.s b/private/ntos/ke/mips/xxintsup.s
new file mode 100644
index 000000000..0370ec766
--- /dev/null
+++ b/private/ntos/ke/mips/xxintsup.s
@@ -0,0 +1,713 @@
+// TITLE("Interrupt Object Support Routines")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxintsup.s
+//
+// Abstract:
+//
+// This module implements the code necessary to support interrupt objects.
+// It contains the interrupt dispatch code and the code template that gets
+// copied into an interrupt object.
+//
+// Author:
+//
+// David N. Cutler (davec) 2-Apr-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Synchronize Execution")
+//++
+//
+// BOOLEAN
+// KeSynchronizeExecution (
+// IN PKINTERRUPT Interrupt,
+// IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
+// IN PVOID SynchronizeContext
+// )
+//
+// Routine Description:
+//
+// This function synchronizes the execution of the specified routine with the
+// execution of the service routine associated with the specified interrupt
+// object.
+//
+// Arguments:
+//
+// Interrupt (a0) - Supplies a pointer to a control object of type interrupt.
+//
+// SynchronizeRoutine (a1) - Supplies a pointer to a function whose execution
+// is to be synchronized with the execution of the service routine associated
+// with the specified interrupt object.
+//
+// SynchronizeContext (a2) - Supplies a pointer to an arbitrary data structure
+// which is to be passed to the function specified by the SynchronizeRoutine
+// parameter.
+//
+// Return Value:
+//
+// The value returned by the SynchronizeRoutine function is returned as the
+// function value.
+//
+//--
+
+ .struct 0
+SyArg: .space 4 * 4 // argument register save area
+SyS0: .space 4 // saved integer register s0
+SyIrql: .space 4 // saved IRQL value
+ .space 4 // fill for alignment
+SyRa: .space 4 // saved return address
+SyFrameLength: // length of stack frame
+SyA0: .space 4 // saved argument registers a0 - a2
+SyA1: .space 4 //
+SyA2: .space 4 //
+
+ NESTED_ENTRY(KeSynchronizeExecution, SyFrameLength, zero)
+
+ subu sp,sp,SyFrameLength // allocate stack frame
+ sw ra,SyRa(sp) // save return address
+ sw s0,SyS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+ sw a1,SyA1(sp) // save synchronization routine address
+ sw a2,SyA2(sp) // save synchronization routine context
+
+//
+// Raise IRQL to the synchronization level and acquire the associated
+// spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s0,InActualLock(a0) // get address of spin lock
+
+#endif
+
+ lbu a0,InSynchronizeIrql(a0) // get synchronization IRQL
+ addu a1,sp,SyIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise IRQL to synchronization IRQL
+
+#if defined(R4000) && !defined(NT_UP)
+
+10: ll t0,0(s0) // get current lock value
+ move t1,s0 // set lock ownership value
+ bne zero,t0,10b // if ne, spin lock owned
+ sc t1,0(s0) // set spin lock owned
+ beq zero,t1,10b // if eq, store conditional failed
+
+#endif
+
+//
+// Call specified routine passing the specified context parameter.
+//
+
+ lw t0,SyA1(sp) // get synchronize routine address
+ lw a0,SyA2(sp) // get synchronize routine context
+ jal t0 // call specified routine
+
+//
+// Release spin lock, lower IRQL to its previous level, and return the value
+// returned by the specified routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s0) // set spin lock not owned
+
+#endif
+
+ lbu a0,SyIrql(sp) // get saved IRQL
+ move s0,v0 // save return value
+ jal KeLowerIrql // lower IRQL to previous level
+ move v0,s0 // set return value
+ lw s0,SyS0(sp) // restore integer register s0
+ lw ra,SyRa(sp) // restore return address
+ addu sp,sp,SyFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KeSynchronizeExecution
+
+ SBTTL("Chained Dispatch")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to more than one interrupt object. Its
+// function is to walk the list of connected interrupt objects and call
+// each interrupt service routine. If the mode of the interrupt is latched,
+// then a complete traversal of the chain must be performed. If any of the
+// routines require saving the volatile floating point machine state, then
+// it is only saved once.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ChArg: .space 4 * 4 // argument register save area
+ChS0: .space 4 // saved integer registers s0 - s6
+ChS1: .space 4 //
+ChS2: .space 4 //
+ChS3: .space 4 //
+ChS4: .space 4 //
+ChS5: .space 4 //
+ChS6: .space 4 //
+ChRa: .space 4 // saved return address
+ChFrameLength: // length of stack frame
+ChIrql: .space 4 // saved IRQL value
+
+ NESTED_ENTRY(KiChainedDispatch, ChFrameLength, zero)
+
+ subu sp,sp,ChFrameLength // allocate stack frame
+ sw ra,ChRa(sp) // save return address
+ sw s0,ChS0(sp) // save integer registers s0 - s6
+ sw s1,ChS1(sp) //
+ sw s2,ChS2(sp) //
+ sw s3,ChS3(sp) //
+ sw s4,ChS4(sp) //
+ sw s5,ChS5(sp) //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw s6,ChS6(sp) //
+
+#endif
+
+ PROLOGUE_END
+
+//
+// Initialize loop variables.
+//
+
+ addu s0,a0,InInterruptListEntry // set address of listhead
+ move s1,s0 // set address of first entry
+ move s2,zero // clear floating state saved flag
+ lbu s3,InMode(a0) // get mode of interrupt
+ lbu s4,InIrql(a0) // get interrupt source IRQL
+
+//
+// Walk the list of connected interrupt objects and call the respective
+// interrupt service routines.
+//
+
+10: subu a0,s1,InInterruptListEntry // compute interrupt object address
+ lbu t0,InFloatingSave(a0) // get floating save flag
+ bne zero,s2,20f // if ne, floating state already saved
+ beq zero,t0,20f // if eq, don't save floating state
+
+//
+// Save volatile floating registers f0 - f19 in trap frame.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+ li s2,1 // set floating state saved flag
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+20: lbu s5,InSynchronizeIrql(a0) // get synchronization IRQL
+ beq s4,s5,25f // if eq, IRQL levels are the same
+ move a0,s5 // set synchronization IRQL
+ addu a1,sp,ChIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+ subu a0,s1,InInterruptListEntry // recompute interrupt object address
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+25: //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s6,InActualLock(a0) // get address of spin lock
+30: ll t1,0(s6) // get current lock value
+ move t2,s6 // set lock ownership value
+ bne zero,t1,30b // if ne, spin lock owned
+ sc t2,0(s6) // set spin lock owned
+ beq zero,t2,30b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s6) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ beq s4,s5,35f // if eq, IRQL levels are the same
+ move a0,s4 // set interrupt source IRQL
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Get next list entry and check for end of loop.
+//
+
+35: lw s1,LsFlink(s1) // get next interrupt object address
+ beq zero,v0,40f // if eq, interrupt not handled
+ beq zero,s3,50f // if eq, level sensitive interrupt
+40: bne s0,s1,10b // if ne, not end of list
+
+//
+// Either the interrupt is level sensitive and has been handled or the end of
+// the interrupt object chain has been reached. Check to determine if floating
+// machine state needs to be restored.
+//
+
+50: beq zero,s2,60f // if eq, floating state not saved
+
+//
+// Restore volatile floating registers f0 - f19 from trap frame.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Restore integer registers s0 - s6, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+60: lw s0,ChS0(sp) // restore integer registers s0 - s6
+ lw s1,ChS1(sp) //
+ lw s2,ChS2(sp) //
+ lw s3,ChS3(sp) //
+ lw s4,ChS4(sp) //
+ lw s5,ChS5(sp) //
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s6,ChS6(sp) //
+
+#endif
+
+ lw ra,ChRa(sp) // restore return address
+ addu sp,sp,ChFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiChainedDispatch
+
+ SBTTL("Floating Dispatch")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to save the volatile floating machine state and then call the specified
+// interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+FlArg: .space 4 * 4 // argument register save area
+FlS0: .space 4 // saved integer registers s0 - s1
+FlS1: .space 4 //
+FlIrql: .space 4 // saved IRQL value
+FlRa: .space 4 // saved return address
+FlFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiFloatingDispatch, FlFrameLength, zero)
+
+ subu sp,sp,FlFrameLength // allocate stack frame
+ sw ra,FlRa(sp) // save return address
+ sw s0,FlS0(sp) // save integer registers s0 - s1
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw s1,FlS1(sp) //
+
+#endif
+
+ PROLOGUE_END
+
+//
+// Save volatile floating registers f0 - f19 in trap frame.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE // save volatile floating state
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+ move s0,a0 // save address of interrupt object
+ lbu a0,InSynchronizeIrql(s0) // get synchronization IRQL
+ lbu t0,InIrql(s0) // get interrupt source IRQL
+ beq a0,t0,10f // if eq, IRQL levels are the same
+ addu a1,sp,FlIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+10: move a0,s0 // restore address of interrupt object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s1,InActualLock(a0) // get address of spin lock
+20: ll t1,0(s1) // get current lock value
+ move t2,s1 // set lock ownership value
+ bne zero,t1,20b // if ne, spin lock owned
+ sc t2,0(s1) // set spin lock owned
+ beq zero,t2,20b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s1) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the interrupt source level if synchronization level is not
+// the same as the interrupt source level.
+//
+
+ lbu a0,InIrql(s0) // get interrupt source IRQL
+ lbu t0,InSynchronizeIrql(s0) // get synchronization IRQL
+ beq a0,t0,30f // if eq, IRQL levels are the same
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Restore volatile floating registers f0 - f19 from trap frame.
+//
+
+30: RESTORE_VOLATILE_FLOAT_STATE // restore volatile floating state
+
+//
+// Restore integer registers s0 - s1, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,FlS0(sp) // restore integer registers s0 - s1
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s1,FlS1(sp) //
+
+#endif
+
+ lw ra,FlRa(sp) // restore return address
+ addu sp,sp,FlFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiFloatingDispatch
+
+ SBTTL("Interrupt Dispatch - Raise IRQL")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// N.B. This routine raises the interrupt level to the synchronization
+// level specified in the interrupt object.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+RdArg: .space 4 * 4 // argument register save area
+RdS0: .space 4 // saved integer register s0
+ .space 4 // fill
+RdIrql: .space 4 // saved IRQL value
+RdRa: .space 4 // saved return address
+RdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchRaise, RdFrameLength, zero)
+
+ subu sp,sp,RdFrameLength // allocate stack frame
+ sw ra,RdRa(sp) // save return address
+ sw s0,RdS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+//
+// Raise IRQL to synchronization level.
+//
+
+ move s0,a0 // save address of interrupt object
+ lbu a0,InSynchronizeIrql(s0) // get synchronization IRQL
+ addu a1,sp,RdIrql // compute address to save IRQL
+ jal KeRaiseIrql // raise to synchronization IRQL
+ move a0,s0 // restore address of interrupt object
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ lw s0,InActualLock(a0) // get address of spin lock
+10: ll t1,0(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(s0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failed
+
+#endif
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+#if defined(R4000) && !defined(NT_UP)
+
+ sw zero,0(s0) // set spin lock not owned
+
+#endif
+
+//
+// Lower IRQL to the previous level.
+//
+
+ lbu a0,RdIrql(sp) // get previous IRQL
+ jal KeLowerIrql // lower to interrupt source IRQL
+
+//
+// Restore integer register s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,RdS0(sp) // restore integer registers s0 - s1
+ lw ra,RdRa(sp) // restore return address
+ addu sp,sp,RdFrameLength // deallocate stack frame
+ j ra // return
+
+ .end KiInterruptDispatchRaise
+
+ SBTTL("Interrupt Dispatch - Same IRQL")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+#if defined(NT_UP)
+
+ LEAF_ENTRY(KiInterruptDispatchSame)
+
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ j t0 // jump to service routine
+
+#else
+
+ .struct 0
+SdArg: .space 4 * 4 // argument register save area
+SdS0: .space 4 // saved integer register s0
+ .space 4 * 2 // fill
+SdRa: .space 4 // saved return address
+SdFrameLength: // length of stack frame
+
+ NESTED_ENTRY(KiInterruptDispatchSame, SdFrameLength, zero)
+
+ subu sp,sp,SdFrameLength // allocate stack frame
+ sw ra,SdRa(sp) // save return address
+ sw s0,SdS0(sp) // save integer register s0
+
+ PROLOGUE_END
+
+//
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+ lw s0,InActualLock(a0) // get address of spin lock
+10: ll t1,0(s0) // get current lock value
+ move t2,s0 // set lock ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(s0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failed
+ lw t0,InServiceRoutine(a0) // get address of service routine
+ lw a1,InServiceContext(a0) // get service context
+ jal t0 // call service routine
+
+//
+// Release the service routine spin lock.
+//
+
+ sw zero,0(s0) // set spin lock not owned
+
+//
+// Restore integer register s0, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lw s0,SdS0(sp) // restore integer registers s0 - s1
+ lw ra,SdRa(sp) // restore return address
+ addu sp,sp,SdFrameLength // deallocate stack frame
+ j ra // return
+
+#endif
+
+ .end KiInterruptDispatchSame
+
+ SBTTL("Interrupt Template")
+//++
+//
+// Routine Description:
+//
+// This routine is a template that is copied into each interrupt object. Its
+// function is to determine the address of the respective interrupt object
+// and then transfer control to the appropriate interrupt dispatcher.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt template within an interrupt
+// object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiInterruptTemplate)
+
+ .set noreorder
+ .set noat
+ lw t0,InDispatchAddress - InDispatchCode(a0) // get dispatcher address
+ subu a0,a0,InDispatchCode // compute address of interrupt object
+ j t0 // transfer control to dispatch routine
+ nop //
+ .set at
+ .set reorder
+
+ .end KiInterruptTemplate
+
+ SBTTL("Unexpected Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is not connected to an interrupt object. Its function
+// is to report the error and dismiss the interrupt.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// a0 - Supplies a pointer to the interrupt object.
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiUnexpectedInterrupt)
+
+ j ra // ****** temp ******
+
+ .end KiUnexpectedInterrupt
diff --git a/private/ntos/ke/mips/xxirql.s b/private/ntos/ke/mips/xxirql.s
new file mode 100644
index 000000000..9ededf117
--- /dev/null
+++ b/private/ntos/ke/mips/xxirql.s
@@ -0,0 +1,218 @@
+// TITLE("Manipulate Interrupt Request Level")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// manpirql.s
+//
+// Abstract:
+//
+// This module implements the code necessary to lower and raise the current
+// Interrupt Request Level (IRQL).
+//
+//
+// Author:
+//
+// David N. Cutler (davec) 12-Aug-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KiSynchIrql 4
+
+ SBTTL("Lower Interrupt Request Level")
+//++
+//
+// VOID
+// KeLowerIrql (
+// KIRQL NewIrql
+// )
+//
+// Routine Description:
+//
+// This function lowers the current IRQL to the specified value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeLowerIrql)
+
+ and a0,a0,0xff // isolate new IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeLowerIrql
+
+ SBTTL("Raise Interrupt Request Level")
+//++
+//
+// VOID
+// KeRaiseIrql (
+// KIRQL NewIrql,
+// PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to the specified value and returns
+// the old IRQL value.
+//
+// Arguments:
+//
+// NewIrql (a0) - Supplies the new IRQL value.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that recieves the old
+// IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeRaiseIrql)
+
+ and a0,a0,0xff // isolate new IRQL
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu t2,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ sb t2,0(a1) // store old IRQL
+ j ra // return
+
+ .end KeRaiseIrql
+
+ SBTTL("Raise Interrupt Request Level to DPC Level")
+//++
+//
+// KIRQL
+// KeRaiseIrqlToDpcLevel (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with dispatch level.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KiRaiseIrqlToXxxLevel)
+
+ ALTERNATE_ENTRY(KeRaiseIrqlToDpcLevel)
+
+ li a0,DISPATCH_LEVEL // set new IRQL value
+ b KeSwapIrql // finish in common code
+
+ SBTTL("Swap Interrupt Request Level")
+//++
+//
+// KIRQL
+// KeRaiseIrqlToSynchLevel (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with synchronization level.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeRaiseIrqlToSynchLevel)
+
+ lbu a0,KiSynchIrql // set new IRQL level
+
+//++
+//
+// KIRQL
+// KeSwapIrql (
+// IN KIRQL NewIrql
+// )
+//
+// Routine Description:
+//
+// This function swaps the current IRQL with the specified IRQL.
+//
+// Arguments:
+//
+// NewIrql (a0) - supplies the new IRQL value.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeSwapIrql)
+
+ lbu t0,KiPcr + PcIrqlTable(a0) // get translation table entry
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a0,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KiRaiseIrqlToXxxLevel
diff --git a/private/ntos/ke/mips/xxmiscs.s b/private/ntos/ke/mips/xxmiscs.s
new file mode 100644
index 000000000..ccdf0ead7
--- /dev/null
+++ b/private/ntos/ke/mips/xxmiscs.s
@@ -0,0 +1,289 @@
+// TITLE("Miscellaneous Kernel Functions")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// misc.s
+//
+// Abstract:
+//
+// This module implements machine dependent miscellaneous kernel functions.
+// Functions are provided to request a software interrupt, continue thread
+// execution, flush the write buffer, and perform last chance exception
+// processing.
+//
+// Author:
+//
+// David N. Cutler (davec) 31-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Request Software Interrupt")
+//++
+//
+// VOID
+// KiRequestSoftwareInterrupt (
+// ULONG RequestIrql
+// )
+//
+// Routine Description:
+//
+// This function requests a software interrupt at the specified IRQL
+// level.
+//
+// Arguments:
+//
+// RequestIrql (a0) - Supplies the request IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRequestSoftwareInterrupt)
+
+ li t0,1 << (CAUSE_INTPEND - 1) // get partial request mask value
+
+ DISABLE_INTERRUPTS(t1) // disable interrupts
+
+ .set noreorder
+ .set noat
+ mfc0 t2,cause // get exception cause register
+ sll t0,t0,a0 // shift request mask into position
+ or t2,t2,t0 // merge interrupt request mask
+ mtc0 t2,cause // set exception cause register
+ .set at
+ .set reorder
+
+ ENABLE_INTERRUPTS(t1) // enable interrupts
+
+ j ra // return
+
+ .end KiRequestSoftwareInterrupt
+
+ SBTTL("Continue Execution System Service")
+//++
+//
+// NTSTATUS
+// NtContinue (
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN TestAlert
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to continue execution after
+// an exception has occurred. Its functions is to transfer information from
+// the specified context record into the trap frame that was built when the
+// system service was executed, and then exit the system as if an exception
+// had occurred.
+//
+// Arguments:
+//
+// ContextRecord (a0) - Supplies a pointer to a context record.
+//
+// TestAlert (a1) - Supplies a boolean value that specifies whether alert
+// should be tested for the previous processor mode.
+//
+// N.B. Register s8 is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record is misaligned or is not accessible, then the appropriate
+// status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtContinue, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ li t0,TRUE // set saved s-registers flag
+ sb t0,TrSavedFlag(s8) //
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Transfer information from the context frame to the exception and trap
+// frames.
+//
+
+ sb a1,ExceptionFrameLength + 4(sp) // save test alert argument
+ move a1,sp // set address of exception frame
+ move a2,s8 // set address of trap frame
+ jal KiContinue // transfer context to kernel frames
+
+//
+// If the kernel continuation routine returns success, then exit via the
+// exception exit code. Otherwise return to the system service dispatcher.
+//
+
+ bne zero,v0,20f // if ne, transfer failed
+
+//
+// Check to determine if alert should be tested for the previous processor
+// mode and restore the previous mode in the thread object.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lbu t1,ExceptionFrameLength + 4(sp) // get test alert argument
+ lw t2,TrTrapFrame(s8) // get old trap frame address
+ lbu t3,TrPreviousMode(s8) // get old previous mode
+ lbu a0,ThPreviousMode(t0) // get current previous mode
+ sw t2,ThTrapFrame(t0) // restore old trap frame address
+ sb t3,ThPreviousMode(t0) // restore old previous mode
+ beq zero,t1,10f // if eq, don't test for alert
+ jal KeTestAlertThread // test alert for current thread
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+10: j KiExceptionExit // finish in exception exit
+
+//
+// Context record is misaligned or not accessible.
+//
+
+20: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate stack frame
+ j ra // return
+
+ .end NtContinue
+
+ SBTTL("Raise Exception System Service")
+//++
+//
+// NTSTATUS
+// NtRaiseException (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN FirstChance
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to raise an exception.
+// The exception can be raised as a first or second chance exception.
+//
+// Arguments:
+//
+// ExceptionRecord (a0) - Supplies a pointer to an exception record.
+//
+// ContextRecord (a1) - Supplies a pointer to a context record.
+//
+// FirstChance (a2) - Supplies a boolean value that determines whether
+// this is the first (TRUE) or second (FALSE) chance for dispatching
+// the exception.
+//
+// N.B. Register s8 is assumed to contain the address of a trap frame.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record or exception record is misaligned or is not accessible,
+// then the appropriate status code is returned.
+//
+//--
+
+ NESTED_ENTRY(NtRaiseException, ExceptionFrameLength, zero)
+
+ subu sp,sp,ExceptionFrameLength // allocate exception frame
+ sw ra,ExIntRa(sp) // save return address
+
+ PROLOGUE_END
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ sd s0,TrXIntS0(s8) // save integer registers s0 - s7
+ sd s1,TrXIntS1(s8) //
+ sd s2,TrXIntS2(s8) //
+ sd s3,TrXIntS3(s8) //
+ sd s4,TrXIntS4(s8) //
+ sd s5,TrXIntS5(s8) //
+ sd s6,TrXIntS6(s8) //
+ sd s7,TrXIntS7(s8) //
+ li t0,TRUE // set saved s-registers flag
+ sb t0,TrSavedFlag(s8) //
+
+ sdc1 f20,ExFltF20(sp) // save floating registers f20 - f31
+ sdc1 f22,ExFltF22(sp) //
+ sdc1 f24,ExFltF24(sp) //
+ sdc1 f26,ExFltF26(sp) //
+ sdc1 f28,ExFltF28(sp) //
+ sdc1 f30,ExFltF30(sp) //
+
+//
+// Call the raise exception kernel routine which will marshall the arguments
+// and then call the exception dispatcher.
+//
+
+ sw a2,ExArgs + 16(sp) // set first chance argument
+ move a2,sp // set address of exception frame
+ move a3,s8 // set address of trap frame
+ jal KiRaiseException // call raise exception routine
+
+//
+// If the raise exception routine returns success, then exit via the exception
+// exit code. Otherwise return to the system service dispatcher.
+//
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get current thread address
+ lw t1,TrTrapFrame(s8) // get old trap frame address
+ bne zero,v0,10f // if ne, dispatch not successful
+ sw t1,ThTrapFrame(t0) // restore old trap frame address
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+ j KiExceptionExit // finish in exception exit
+
+//
+// The context or exception record is misaligned or not accessible, or the
+// exception was not handled.
+//
+
+10: lw ra,ExIntRa(sp) // restore return address
+ addu sp,sp,ExceptionFrameLength // deallocate stack frame
+ j ra // return
+
+ .end NtRaiseException
diff --git a/private/ntos/ke/mips/xxmpipi.c b/private/ntos/ke/mips/xxmpipi.c
new file mode 100644
index 000000000..3d32c41c2
--- /dev/null
+++ b/private/ntos/ke/mips/xxmpipi.c
@@ -0,0 +1,209 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+
+Module Name:
+
+ xxmpipi.c
+
+Abstract:
+
+ This module implements MIPS specific MP routine.
+
+Author:
+
+ David N. Cutler 24-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiRestoreProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the current
+ processor context structure in the processor block to the
+ specified trap and exception frames.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from the processor context structure
+ // to the specified trap and exception frames
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame,
+ CONTEXT_FULL,
+ KernelMode);
+
+ return;
+}
+
+VOID
+KiSaveProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the specified trap
+ and exception frames to the processor context structure in the current
+ processor block.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from specified trap and exception
+ // frames to the current processor context structure.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame);
+
+ //
+ // Save the current processor control state.
+ //
+
+ KiSaveProcessorControlState(&Prcb->ProcessorState);
+ return;
+}
+
+VOID
+KiSaveProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ )
+
+/*++
+
+Routine Description:
+
+ This routine saves the processor's control state for debugger.
+
+Arguments:
+
+ ProcessorState (a0) - Supplies a pointer to the processor state.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+
+ //
+ // Read Tb entries and store in the processor state structure.
+ //
+
+ for (Index = 0; Index < KeNumberTbEntries; Index += 1) {
+ KiReadEntryTb(Index, &ProcessorState->TbEntry[Index]);
+ }
+
+ return;
+}
+
+BOOLEAN
+KiIpiServiceRoutine (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+
+ This function is called at IPI_LEVEL to process any outstanding
+ interprocess request for the current processor.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame
+
+Return Value:
+
+ A value of TRUE is returned, if one of more requests were service.
+ Otherwise, FALSE is returned.
+
+--*/
+
+{
+
+ ULONG RequestSummary;
+
+ //
+ // Process any outstanding interprocessor requests.
+ //
+
+ RequestSummary = KiIpiProcessRequests();
+
+ //
+ // If freeze is requested, then freeze target execution.
+ //
+
+ if ((RequestSummary & IPI_FREEZE) != 0) {
+ KiFreezeTargetExecution(TrapFrame, ExceptionFrame);
+ }
+
+ //
+ // Return whether any requests were processed.
+ //
+
+ return (RequestSummary & ~IPI_FREEZE) != 0;
+}
diff --git a/private/ntos/ke/mips/xxregsv.s b/private/ntos/ke/mips/xxregsv.s
new file mode 100644
index 000000000..8b593bfd5
--- /dev/null
+++ b/private/ntos/ke/mips/xxregsv.s
@@ -0,0 +1,151 @@
+// TITLE("Register Save and Restore")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxregsv.s
+//
+// Abstract:
+//
+// This module implements the code necessary to save and restore processor
+// registers during exception and interrupt processing.
+//
+// Author:
+//
+// David N. Cutler (davec) 12-Aug-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Save Volatile Floating Registers")
+//++
+//
+// Routine Desription:
+//
+// This routine is called to save the volatile floating registers.
+//
+// N.B. This routine uses a special argument passing mechanism and destroys
+// no registers. It is assumed that floating register f0 is saved by the
+// caller.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSaveVolatileFloatState)
+
+ .set noreorder
+ .set noat
+
+#if defined(_EXTENDED_FLOAT)
+
+ sdc1 f1,TrDblF1(s8) // save odd floating registers
+ sdc1 f3,TrDblF3(s8) //
+ sdc1 f5,TrDblF5(s8) //
+ sdc1 f7,TrDblF7(s8) //
+ sdc1 f9,TrDblF9(s8) //
+ sdc1 f11,TrDblF11(s8) //
+ sdc1 f13,TrDblF13(s8) //
+ sdc1 f15,TrDblF15(s8) //
+ sdc1 f17,TrDblF17(s8) //
+ sdc1 f19,TrDblF19(s8) //
+ sdc1 f21,TrDblF21(s8) //
+ sdc1 f23,TrDblF23(s8) //
+ sdc1 f25,TrDblF25(s8) //
+ sdc1 f27,TrDblF27(s8) //
+ sdc1 f29,TrDblF29(s8) //
+ sdc1 f31,TrDblF31(s8) //
+
+#endif
+
+ sdc1 f2,TrFltF2(s8) // save even floating registers
+ sdc1 f4,TrFltF4(s8) //
+ sdc1 f6,TrFltF6(s8) //
+ sdc1 f8,TrFltF8(s8) //
+ sdc1 f10,TrFltF10(s8) //
+ sdc1 f12,TrFltF12(s8) //
+ sdc1 f14,TrFltF14(s8) //
+ sdc1 f16,TrFltF16(s8) //
+ j ra // return
+ sdc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiSaveVolatileFloatState)
+
+ SBTTL("Restore Volatile Floating Registers")
+//++
+//
+// Routine Desription:
+//
+// This routine is called to restore the volatile floating registers.
+//
+// N.B. This routine uses a special argument passing mechanism and destroys
+// no registers. It is assumed that floating register f0 is restored by
+// the caller.
+//
+// Arguments:
+//
+// s8 - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreVolatileFloatState)
+
+ .set noreorder
+ .set noat
+
+#if defined(_EXTENDED_FLOAT)
+
+ ldc1 f1,TrDblF1(s8) // save odd floating registers
+ ldc1 f3,TrDblF3(s8) //
+ ldc1 f5,TrDblF5(s8) //
+ ldc1 f7,TrDblF7(s8) //
+ ldc1 f9,TrDblF9(s8) //
+ ldc1 f11,TrDblF11(s8) //
+ ldc1 f13,TrDblF13(s8) //
+ ldc1 f15,TrDblF15(s8) //
+ ldc1 f17,TrDblF17(s8) //
+ ldc1 f19,TrDblF19(s8) //
+ ldc1 f21,TrDblF21(s8) //
+ ldc1 f23,TrDblF23(s8) //
+ ldc1 f25,TrDblF25(s8) //
+ ldc1 f27,TrDblF27(s8) //
+ ldc1 f29,TrDblF29(s8) //
+ ldc1 f31,TrDblF31(s8) //
+
+#endif
+
+ ldc1 f2,TrFltF2(s8) // restore floating registers f2 - f19
+ ldc1 f4,TrFltF4(s8) //
+ ldc1 f6,TrFltF6(s8) //
+ ldc1 f8,TrFltF8(s8) //
+ ldc1 f10,TrFltF10(s8) //
+ ldc1 f12,TrFltF12(s8) //
+ ldc1 f14,TrFltF14(s8) //
+ ldc1 f16,TrFltF16(s8) //
+ j ra // return
+ ldc1 f18,TrFltF18(s8) //
+ .set at
+ .set reorder
+
+ .end KiRestoreVolatileFloatState
diff --git a/private/ntos/ke/mips/xxspinlk.s b/private/ntos/ke/mips/xxspinlk.s
new file mode 100644
index 000000000..fee420f28
--- /dev/null
+++ b/private/ntos/ke/mips/xxspinlk.s
@@ -0,0 +1,540 @@
+// TITLE("Spin Locks")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// spinlock.s
+//
+// Abstract:
+//
+// This module implements the routines for acquiring and releasing
+// spin locks.
+//
+// Author:
+//
+// David N. Cutler (davec) 23-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Initialize Executive Spin Lock")
+//++
+//
+// VOID
+// KeInitializeSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function initialzies an executive spin lock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeInitializeSpinLock)
+
+ sw zero,0(a0) // clear spin lock value
+ j ra // return
+
+ .end KeInitializeSpinlock
+
+ SBTTL("Acquire Executive Spin Lock")
+//++
+//
+// VOID
+// KeAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and acquires
+// the specified executive spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// N.B. The Old IRQL MUST be stored after the lock is acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeAcquireSpinLock)
+
+//
+// Disable interrupts and attempt to acquire the specified spinlock.
+//
+
+ li a2,DISPATCH_LEVEL // set new IRQL level
+
+10: DISABLE_INTERRUPTS(t2) // disable interrupts
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+20: ll t1,0(a0) // get current lock value
+ move t3,t0 // set ownership value
+ bne zero,t1,30f // if ne, spin lock owned
+ sc t3,0(a0) // set spin lock owned
+ beq zero,t3,20b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to DISPATCH_LEVEL and acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ sb v0,0(a1) // store old IRQL
+ j ra // return
+
+#if !defined(NT_UP)
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ b 10b // try again
+#endif
+
+ .end KeAcquireSpinLock
+
+ SBTTL("Acquire SpinLock and Raise to Dpc")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToDpc (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to dispatcher level and acquires
+// the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned at the fucntion value.
+//
+//--
+
+ LEAF_ENTRY(KiAcquireSpinLockRaiseIrql)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToDpc)
+
+ li a1,DISPATCH_LEVEL // set new IRQL level
+ b 10f // finish in common code
+
+
+ SBTTL("Acquire SpinLock and Raise to Synch")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToSynch (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to synchronization level and
+// acquires the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned at the fucntion value.
+//
+//--
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToSynch)
+
+//
+// Disable interrupts and attempt to acquire the specified spinlock.
+//
+
+ lbu a1,KiSynchIrql // set new IRQL level
+
+10: DISABLE_INTERRUPTS(t2) // disable interrupts
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+20: ll t1,0(a0) // get current lock value
+ move t3,t0 // set ownership value
+ bne zero,t1,30f // if ne, spin lock owned
+ sc t3,0(a0) // set spin lock owned
+ beq zero,t3,20b // if eq, store conditional failure
+
+#endif
+
+//
+// Raise IRQL to synchronization level and acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ lbu t0,KiPcr + PcIrqlTable(a1) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu v0,KiPcr + PcCurrentIrql(zero) // get current IRQL
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a1,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+#if !defined(NT_UP)
+
+30: ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ b 10b // try again
+
+#endif
+
+ .end KiAcquireSpinLockRaiseIrql
+
+ SBTTL("Release Executive Spin Lock")
+//++
+//
+// VOID
+// KeReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function releases an executive spin lock and lowers the IRQL
+// to its previous value.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// OldIrql (a1) - Supplies the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ LEAF_ENTRY(KeReleaseSpinLock)
+
+//
+// Release the specified spinlock.
+//
+
+#if !defined(NT_UP)
+
+ sw zero,0(a0) // set spin lock not owned
+
+#endif
+
+//
+// Lower the IRQL to the specified level.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+ and a1,a1,0xff // isolate old IRQL
+ lbu t0,KiPcr + PcIrqlTable(a1) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t2) // disable interrupts
+
+ and t2,t2,t1 // clear current interrupt enables
+ or t2,t2,t0 // set new interrupt enables
+ sb a1,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t2) // enable interrupts
+
+ j ra // return
+
+ .end KeReleaseSpinLock
+
+ SBTTL("Try To Acquire Executive Spin Lock")
+//++
+//
+// BOOLEAN
+// KeTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and attempts
+// to acquires the specified executive spinlock. If the spinlock can be
+// acquired, then TRUE is returned. Otherwise, the IRQL is restored to
+// its previous value and FALSE is returned.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (a1) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// N.B. The Old IRQL MUST be stored after the lock is acquired.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KeTryToAcquireSpinLock)
+
+//
+// Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
+//
+// N.B. The raise IRQL code is duplicated here to avoid any extra overhead
+// since this is such a common operation.
+//
+
+ li a2,DISPATCH_LEVEL // set new IRQL level
+ lbu t0,KiPcr + PcIrqlTable(a2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+ lbu t2,KiPcr + PcCurrentIrql(zero) // get current IRQL
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb a2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+//
+// Try to acquire the specified spinlock.
+//
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move v0,t0 // set ownership value
+ bne zero,t1,20f // if ne, spin lock owned
+ sc v0,0(a0) // set spin lock owned
+ beq zero,v0,10b // if eq, store conditional failure
+
+#else
+
+ li v0,TRUE // set return value
+
+#endif
+
+//
+// The attempt to acquire the specified spin lock succeeded.
+//
+
+ sb t2,0(a1) // store old IRQL
+ j ra // return
+
+//
+// The attempt to acquire the specified spin lock failed. Lower IRQL to its
+// previous value and return FALSE.
+//
+// N.B. The lower IRQL code is duplicated here is avoid any extra overhead
+// since this is such a common operation.
+//
+
+#if !defined(NT_UP)
+
+20: lbu t0,KiPcr + PcIrqlTable(t2) // get translation table entry value
+ li t1,~(0xff << PSR_INTMASK) // get interrupt enable mask
+ sll t0,t0,PSR_INTMASK // shift table entry into position
+
+ DISABLE_INTERRUPTS(t3) // disable interrupts
+
+ and t3,t3,t1 // clear current interrupt enables
+ or t3,t3,t0 // set new interrupt enables
+ sb t2,KiPcr + PcCurrentIrql(zero) // set new IRQL
+
+ ENABLE_INTERRUPTS(t3) // enable interrupts
+
+ li v0,FALSE // set return value
+ j ra // return
+
+#endif
+
+ .end KeTryToAcquireSpinLock
+
+ SBTTL("Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function acquires a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiAcquireSpinLock)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
+
+#if !defined(NT_UP)
+
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move t2,t0 // set ownership value
+ bne zero,t1,10b // if ne, spin lock owned
+ sc t2,0(a0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failure
+
+#endif
+
+ j ra // return
+
+ .end KiAcquireSpinLock
+
+ SBTTL("Release Kernel Spin Lock")
+//++
+//
+// VOID
+// KiReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function releases a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiReleaseSpinLock)
+
+ ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
+
+#if !defined(NT_UP)
+
+
+ sw zero,0(a0) // set spin lock not owned
+
+#endif
+
+ j ra // return
+
+ .end KiReleaseSpinLock
+
+ SBTTL("Try To Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquires the specified kernel spinlock. If
+// the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
+// is returned.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY(KiTryToAcquireSpinLock)
+
+#if !defined(NT_UP)
+
+ li v0,FALSE // assume attempt to acquire will fail
+ lw t0,KiPcr + PcCurrentThread(zero) // get address of current thread
+10: ll t1,0(a0) // get current lock value
+ move t2,t0 // set ownership value
+ bne zero,t1,20f // if ne, spin lock owned
+ sc t2,0(a0) // set spin lock owned
+ beq zero,t2,10b // if eq, store conditional failure
+
+#endif
+
+ li v0,TRUE // set return value
+20: j ra // return
+
+ .end KiTryToAcquireSpinLock
diff --git a/private/ntos/ke/miscc.c b/private/ntos/ke/miscc.c
new file mode 100644
index 000000000..289d49870
--- /dev/null
+++ b/private/ntos/ke/miscc.c
@@ -0,0 +1,679 @@
+/*++
+
+Copyright (c) 1989-1992 Microsoft Corporation
+
+Module Name:
+
+ miscc.c
+
+Abstract:
+
+ This module implements machine independent miscellaneous kernel functions.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, KeAddSystemServiceTable)
+#pragma alloc_text(PAGE, KeSetSwapContextNotifyRoutine)
+#pragma alloc_text(PAGE, KeSetTimeUpdateNotifyRoutine)
+#pragma alloc_text(PAGE, KeSetThreadSelectNotifyRoutine)
+#endif
+
+
+#undef KeEnterCriticalRegion
+VOID
+KeEnterCriticalRegion (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function disables kernel APC's.
+
+ N.B. The following code does not require any interlocks. There are
+ two cases of interest: 1) On an MP system, the thread cannot
+ be running on two processors as once, and 2) if the thread is
+ is interrupted to deliver a kernel mode APC which also calls
+ this routine, the values read and stored will stack and unstack
+ properly.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ //
+ // Simply directly disable kernel APCs.
+ //
+
+ KeGetCurrentThread()->KernelApcDisable -= 1;
+ return;
+}
+
+
+#undef KeLeaveCriticalRegion
+VOID
+KeLeaveCriticalRegion (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function enables kernel APC's and requests an APC interrupt if
+ appropriate.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Increment the kernel APC disable count. If the resultant count is
+ // zero and the thread's kernel APC List is not empty, then request an
+ // APC interrupt.
+ //
+ // For multiprocessor performance, the following code utilizes the fact
+ // that queuing an APC is done by first queuing the APC, then checking
+ // the AST disable count. The following code increments the disable
+ // count first, checks to determine if it is zero, and then checks the
+ // kernel AST queue.
+ //
+ // See also KiInsertQueueApc().
+ //
+
+ KiLeaveCriticalRegion();
+ return;
+}
+
+VOID
+KeQuerySystemTime (
+ OUT PLARGE_INTEGER CurrentTime
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the current system time by determining when the
+ time is stable and then returning its value.
+
+Arguments:
+
+ CurrentTime - Supplies a pointer to a variable that will receive the
+ current system time.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KiQuerySystemTime(CurrentTime);
+ return;
+}
+
+VOID
+KeQueryTickCount (
+ OUT PLARGE_INTEGER CurrentCount
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the current tick count by determining when the
+ count is stable and then returning its value.
+
+Arguments:
+
+ CurrentCount - Supplies a pointer to a variable that will receive the
+ current tick count.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KiQueryTickCount(CurrentCount);
+ return;
+}
+
+ULONG
+KeQueryTimeIncrement (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the time increment value in 100ns units. This
+ is the value that is added to the system time at each interval clock
+ interrupt.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ The time increment value is returned as the function value.
+
+--*/
+
+{
+
+ return KeMaximumIncrement;
+}
+
+VOID
+KeSetDmaIoCoherency (
+ IN ULONG Attributes
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets (enables/disables) DMA I/O coherency with data
+ caches.
+
+Arguments:
+
+ Attributes - Supplies the set of DMA I/O coherency attributes for
+ the host system.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KiDmaIoCoherency = Attributes;
+}
+
+#if defined(i386)
+VOID
+KeSetProfileIrql (
+ IN KIRQL ProfileIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the profile IRQL.
+
+ N.B. There are only two valid values for synchronization IRQL:
+ PROFILE_LEVEL and HIGH_LEVEL.
+
+Arguments:
+
+ Irql - Supplies the synchronization IRQL value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ASSERT((ProfileIrql == PROFILE_LEVEL) || (ProfileIrql == HIGH_LEVEL));
+ KiProfileIrql = ProfileIrql;
+}
+
+#endif
+
+#if defined(_MIPS_) || defined(_ALPHA_)
+VOID
+KeSetSynchIrql (
+ IN KIRQL SynchIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the synchronization IRQL.
+
+ N.B. Synchronization IRQL may be any value between DISPATCH_LEVEL
+ and SYNCH_LEVEL.
+
+Arguments:
+
+ Irql - Supplies the synchronization IRQL value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ASSERT((SynchIrql >= DISPATCH_LEVEL) && (SynchIrql <= SYNCH_LEVEL));
+
+ KiSynchIrql = SynchIrql;
+}
+
+#endif
+
+
+VOID
+KeSetSystemTime (
+ IN PLARGE_INTEGER NewTime,
+ OUT PLARGE_INTEGER OldTime,
+ IN PLARGE_INTEGER HalTimeToSet OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the system time to the specified value and updates
+ timer queue entries to reflect the difference between the old system
+ time and the new system time.
+
+Arguments:
+
+ NewTime - Supplies a pointer to a variable that specifies the new system
+ time.
+
+ OldTime - Supplies a pointer to a variable that will receive the previous
+ system time.
+
+ HalTimeToSet - Supplies an optional time that if specified is to be used
+ to set the time in the realtime clock.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LIST_ENTRY AbsoluteListHead;
+ LIST_ENTRY ExpiredListHead;
+ ULONG Index;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql1;
+ KIRQL OldIrql2;
+ LARGE_INTEGER TimeDelta;
+ TIME_FIELDS TimeFields;
+ PKTIMER Timer;
+
+ ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL);
+
+ //
+ // If a realtime clock value is specified, then convert the time value
+ // to time fields.
+ //
+
+ if (ARGUMENT_PRESENT(HalTimeToSet)) {
+ RtlTimeToTimeFields(HalTimeToSet, &TimeFields);
+ }
+
+ //
+ // Set affinity to the processor that keeps the system time, raise IRQL
+ // to dispatcher level and lock the dispatcher database, then raise IRQL
+ // to HIGH_LEVEL to synchronize with the clock interrupt routine.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)1);
+ KiLockDispatcherDatabase(&OldIrql1);
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql2);
+
+ //
+ // Save the previous system time, set the new system time, and set
+ // the realtime clock, if a time value is specified.
+ //
+
+ KiQuerySystemTime(OldTime);
+
+#ifdef ALPHA
+
+ SharedUserData->SystemTime = *(PULONGLONG)NewTime;
+
+#elif defined(_MIPS_)
+
+ *((DOUBLE *)(&SharedUserData->SystemTime)) = *((DOUBLE *)(NewTime));
+
+#else
+
+ SharedUserData->SystemTime.High2Time = NewTime->HighPart;
+ SharedUserData->SystemTime.LowPart = NewTime->LowPart;
+ SharedUserData->SystemTime.High1Time = NewTime->HighPart;
+
+#endif
+
+ if (ARGUMENT_PRESENT(HalTimeToSet)) {
+ HalSetRealTimeClock(&TimeFields);
+ }
+
+ //
+ // Compute the difference between the previous system time and the new
+ // system time.
+ //
+
+ TimeDelta.QuadPart = NewTime->QuadPart - OldTime->QuadPart;
+
+ //
+ // Update the boot time to reflect the delta. This keeps time based
+ // on boot time constant
+ //
+
+ KeBootTime.QuadPart = KeBootTime.QuadPart + TimeDelta.QuadPart;
+
+ //
+ // Lower IRQL to dispatch level and remove all absolute timers from the
+ // timer queue so their due time can be recomputed.
+ //
+
+ KeLowerIrql(OldIrql2);
+ InitializeListHead(&AbsoluteListHead);
+ for (Index = 0; Index < TIMER_TABLE_SIZE; Index += 1) {
+ ListHead = &KiTimerTableListHead[Index];
+ NextEntry = ListHead->Flink;
+ while (NextEntry != ListHead) {
+ Timer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
+ NextEntry = NextEntry->Flink;
+ if (Timer->Header.Absolute != FALSE) {
+ RemoveEntryList(&Timer->TimerListEntry);
+ InsertTailList(&AbsoluteListHead, &Timer->TimerListEntry);
+ }
+ }
+ }
+
+ //
+ // Recompute the due time and reinsert all absolute timers in the timer
+ // tree. If a timer has already expired, then insert the timer in the
+ // expired timer list.
+ //
+
+ InitializeListHead(&ExpiredListHead);
+ while (AbsoluteListHead.Flink != &AbsoluteListHead) {
+ Timer = CONTAINING_RECORD(AbsoluteListHead.Flink, KTIMER, TimerListEntry);
+ KiRemoveTreeTimer(Timer);
+ Timer->DueTime.QuadPart -= TimeDelta.QuadPart;
+ if (KiReinsertTreeTimer(Timer, Timer->DueTime) == FALSE) {
+ Timer->Header.Inserted = TRUE;
+ InsertTailList(&ExpiredListHead, &Timer->TimerListEntry);
+ }
+ }
+
+ //
+ // If any of the attempts to reinsert a timer failed, then timers have
+ // already expired and must be processed.
+ //
+ // N.B. The following function returns with the dispatcher database
+ // unlocked.
+ //
+
+ KiTimerListExpire(&ExpiredListHead, OldIrql1);
+
+ //
+ // Notify registered components of the time change.
+ //
+
+#ifdef _PNP_POWER_
+
+ ExNotifyCallback(ExCbSetSystemTime, (PVOID)OldTime, (PVOID)NewTime);
+
+#endif
+
+ //
+ // Set affinity back to its original value.
+ //
+
+ KeRevertToUserAffinityThread();
+ return;
+}
+
+VOID
+KeSetTimeIncrement (
+ IN ULONG MaximumIncrement,
+ IN ULONG MinimumIncrement
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the time increment value in 100ns units. This
+ value is added to the system time at each interval clock interrupt.
+
+Arguments:
+
+ MaximumIncrement - Supplies the maximum time between clock interrupts
+ in 100ns units supported by the host HAL.
+
+ MinimumIncrement - Supplies the minimum time between clock interrupts
+ in 100ns units supported by the host HAL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KeMaximumIncrement = MaximumIncrement;
+ KeMinimumIncrement = max(MinimumIncrement, 10 * 1000);
+ KeTimeAdjustment = MaximumIncrement;
+ KeTimeIncrement = MaximumIncrement;
+ KiTickOffset = MaximumIncrement;
+}
+
+BOOLEAN
+KeAddSystemServiceTable(
+ IN PULONG Base,
+ IN PULONG Count OPTIONAL,
+ IN ULONG Limit,
+ IN PUCHAR Number,
+ IN ULONG Index
+ )
+
+/*++
+
+Routine Description:
+
+ This function allows the caller to add a system service table
+ to the system
+
+Arguments:
+
+ Base - Supplies the address of the system service table dispatch
+ table.
+
+ Count - Supplies an optional pointer to a table of per system service
+ counters.
+
+ Limit - Supplies the limit of the service table. Services greater
+ than or equal to this limit will fail.
+
+ Arguments - Supplies the address of the argument count table.
+
+ Index - Supplies index of the service table.
+
+Return Value:
+
+ TRUE - The operation was successful.
+
+ FALSE - the operation failed. A service table is already bound to
+ the specified location, or the specified index is larger than
+ the maximum allowed index.
+
+--*/
+
+{
+
+ PAGED_CODE();
+
+ //
+ // If a system service table is already defined for the specified
+ // index, then return FALSE. Otherwise, establish the new system
+ // service table.
+ //
+
+ if ((Index > NUMBER_SERVICE_TABLES - 1) ||
+ (KeServiceDescriptorTable[Index].Base != NULL) ||
+ (KeServiceDescriptorTableShadow[Index].Base != NULL)) {
+ return FALSE;
+
+ } else {
+
+ //
+ // If the service table index is equal to the Win32 table, then
+ // only update the shadow system service table. Otherwise, both
+ // the shadow and static system service tables are updated.
+ //
+
+ KeServiceDescriptorTableShadow[Index].Base = Base;
+ KeServiceDescriptorTableShadow[Index].Count = Count;
+ KeServiceDescriptorTableShadow[Index].Limit = Limit;
+ KeServiceDescriptorTableShadow[Index].Number = Number;
+ if (Index != 1) {
+ KeServiceDescriptorTable[Index].Base = Base;
+ KeServiceDescriptorTable[Index].Count = Count;
+ KeServiceDescriptorTable[Index].Limit = Limit;
+ KeServiceDescriptorTable[Index].Number = Number;
+ }
+
+ return TRUE;
+ }
+}
+
+VOID
+FASTCALL
+KeSetSwapContextNotifyRoutine(
+ IN PSWAP_CONTEXT_NOTIFY_ROUTINE NotifyRoutine
+ )
+/*++
+
+Routine Description:
+
+ This function sets the address of a callout routine which will be called
+ at each context swtich.
+
+Arguments:
+
+ NotifyRoutine - Supplies the address of the swap context notify callout
+ routine.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PAGED_CODE();
+
+ KiSwapContextNotifyRoutine = NotifyRoutine;
+ return;
+}
+
+VOID
+FASTCALL
+KeSetThreadSelectNotifyRoutine(
+ IN PTHREAD_SELECT_NOTIFY_ROUTINE NotifyRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the address of a callout routine which will be called
+ when a thread is being selected for execution.
+
+Arguments:
+
+ NotifyRoutine - Supplies the address of the thread select notify callout
+ routine.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PAGED_CODE();
+
+ KiThreadSelectNotifyRoutine = NotifyRoutine;
+ return;
+}
+
+VOID
+FASTCALL
+KeSetTimeUpdateNotifyRoutine(
+ IN PTIME_UPDATE_NOTIFY_ROUTINE NotifyRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the address of a callout routine which will be called
+ each time the runtime for a thread is updated.
+
+Arguments:
+
+ RoutineNotify - Supplies the address of the time update notify callout
+ routine.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PAGED_CODE();
+
+ KiTimeUpdateNotifyRoutine = NotifyRoutine;
+ return;
+}
diff --git a/private/ntos/ke/mp/makefile b/private/ntos/ke/mp/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/ke/mp/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/ke/mp/makefile.inc b/private/ntos/ke/mp/makefile.inc
new file mode 100644
index 000000000..4cb325478
--- /dev/null
+++ b/private/ntos/ke/mp/makefile.inc
@@ -0,0 +1,4 @@
+#
+# Currently the MP and UP files are the same.
+#
+!INCLUDE ..\up\makefile.inc
diff --git a/private/ntos/ke/mp/sources b/private/ntos/ke/mp/sources
new file mode 100644
index 000000000..dbeb18d62
--- /dev/null
+++ b/private/ntos/ke/mp/sources
@@ -0,0 +1,29 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+NT_UP=0
+
+TARGETPATH=..\..\mpobj
+
+!include ..\sources.inc
diff --git a/private/ntos/ke/mutntobj.c b/private/ntos/ke/mutntobj.c
new file mode 100644
index 000000000..140e9d966
--- /dev/null
+++ b/private/ntos/ke/mutntobj.c
@@ -0,0 +1,344 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ mutntobj.c
+
+Abstract:
+
+ This module implements the kernel mutant object. Functions are
+ provided to initialize, read, and release mutant objects.
+
+ N.B. Kernel mutex objects have been subsumed by mutant objects.
+
+Author:
+
+ David N. Cutler (davec) 16-Oct-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input mutant is
+// really a kmutant and not something else, like deallocated pool.
+//
+
+#define ASSERT_MUTANT(E) { \
+ ASSERT((E)->Header.Type == MutantObject); \
+}
+
+VOID
+KeInitializeMutant (
+ IN PRKMUTANT Mutant,
+ IN BOOLEAN InitialOwner
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel mutant object.
+
+Arguments:
+
+ Mutant - Supplies a pointer to a dispatcher object of type mutant.
+
+ InitialOwner - Supplies a boolean value that determines whether the
+ current thread is to be the initial owner of the mutant object.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY ListEntry;
+ PRKTHREAD Thread;
+
+ //
+ // Initialize standard dispatcher object header, set the owner thread to
+ // NULL, set the abandoned state to FALSE, and set the APC disable count
+ // to zero (this is the only thing that distinguishes a mutex from a mutant).
+ //
+
+ Mutant->Header.Type = MutantObject;
+ Mutant->Header.Size = sizeof(KMUTANT) / sizeof(LONG);
+ if (InitialOwner == TRUE) {
+ Thread = KeGetCurrentThread();
+ Mutant->Header.SignalState = 0;
+ Mutant->OwnerThread = Thread;
+ ListEntry = Thread->MutantListHead.Blink;
+ InsertHeadList(ListEntry, &Mutant->MutantListEntry);
+
+ } else {
+ Mutant->Header.SignalState = 1;
+ Mutant->OwnerThread = (PKTHREAD)NULL;
+ }
+
+ InitializeListHead(&Mutant->Header.WaitListHead);
+ Mutant->Abandoned = FALSE;
+ Mutant->ApcDisable = 0;
+ return;
+}
+
+VOID
+KeInitializeMutex (
+ IN PRKMUTANT Mutant,
+ IN ULONG Level
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel mutex object. The level number
+ is ignored.
+
+ N.B. Kernel mutex objects have been subsumed by mutant objects.
+
+Arguments:
+
+ Mutex - Supplies a pointer to a dispatcher object of type mutex.
+
+ Level - Ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY ListEntry;
+
+ //
+ // Initialize standard dispatcher object header, set the owner thread to
+ // NULL, set the abandoned state to FALSE, adn set the APC disable count
+ // to one (this is the only thing that distinguishes a mutex from a mutant).
+ //
+
+ Mutant->Header.Type = MutantObject;
+ Mutant->Header.Size = sizeof(KMUTANT) / sizeof(LONG);
+ Mutant->Header.SignalState = 1;
+ InitializeListHead(&Mutant->Header.WaitListHead);
+ Mutant->OwnerThread = (PKTHREAD)NULL;
+ Mutant->Abandoned = FALSE;
+ Mutant->ApcDisable = 1;
+ return;
+}
+
+LONG
+KeReadStateMutant (
+ IN PRKMUTANT Mutant
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a mutant object.
+
+Arguments:
+
+ Mutant - Supplies a pointer to a dispatcher object of type mutant.
+
+Return Value:
+
+ The current signal state of the mutant object.
+
+--*/
+
+{
+
+ ASSERT_MUTANT(Mutant);
+
+ //
+ // Return current signal state of mutant object.
+ //
+
+ return Mutant->Header.SignalState;
+}
+
+LONG
+KeReleaseMutant (
+ IN PRKMUTANT Mutant,
+ IN KPRIORITY Increment,
+ IN BOOLEAN Abandoned,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a mutant object by incrementing the mutant
+ count. If the resultant value is one, then an attempt is made to
+ satisfy as many Waits as possible. The previous signal state of
+ the mutant is returned as the function value. If the Abandoned
+ parameter is TRUE, then the mutant object is released by settings
+ the signal state to one.
+
+Arguments:
+
+ Mutant - Supplies a pointer to a dispatcher object of type mutant.
+
+ Increment - Supplies the priority increment that is to be applied
+ if setting the event causes a Wait to be satisfied.
+
+ Abandoned - Supplies a boolean value that signifies whether the
+ mutant object is being abandoned.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KeReleaseMutant will be immediately followed by a call to one
+ of the kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the mutant object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+ PRKTHREAD Thread;
+
+
+ ASSERT_MUTANT(Mutant);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current signal state of the mutant object.
+ //
+
+ OldState = Mutant->Header.SignalState;
+
+ //
+ // If the Abandoned parameter is TRUE, then force the release of the
+ // mutant object by setting its ownership count to one and setting its
+ // abandoned state to TRUE. Otherwise increment mutant ownership count.
+ // If the result count is one, then remove the mutant object from the
+ // thread's owned mutant list, set the owner thread to NULL, and attempt
+ // to satisfy a Wait for the mutant object if the mutant object wait
+ // list is not empty.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Abandoned != FALSE) {
+ Mutant->Header.SignalState = 1;
+ Mutant->Abandoned = TRUE;
+
+ } else {
+
+ //
+ // If the Mutant object is not owned by the current thread, then
+ // unlock the dispatcher data base and raise an exception. Otherwise
+ // increment the ownership count.
+ //
+
+ if (Mutant->OwnerThread != Thread) {
+ KiUnlockDispatcherDatabase(OldIrql);
+ ExRaiseStatus(Mutant->Abandoned ?
+ STATUS_ABANDONED : STATUS_MUTANT_NOT_OWNED);
+ }
+
+ Mutant->Header.SignalState += 1;
+ }
+
+ if (Mutant->Header.SignalState == 1) {
+ if (OldState <= 0) {
+ RemoveEntryList(&Mutant->MutantListEntry);
+ Thread->KernelApcDisable += Mutant->ApcDisable;
+ if ((Thread->KernelApcDisable == 0) &&
+ (IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]) == FALSE)) {
+ Thread->ApcState.KernelApcPending = TRUE;
+ KiRequestSoftwareInterrupt(APC_LEVEL);
+ }
+ }
+
+ Mutant->OwnerThread = (PKTHREAD)NULL;
+ if (IsListEmpty(&Mutant->Header.WaitListHead) == FALSE) {
+ KiWaitTest(Mutant, Increment);
+ }
+ }
+
+ //
+ // If the value of the Wait argument is TRUE, then return to
+ // caller with IRQL raised and the dispatcher database locked.
+ // Else release the dispatcher database lock and lower IRQL to
+ // its previous value.
+ //
+
+ if (Wait != FALSE) {
+ Thread->WaitNext = Wait;
+ Thread->WaitIrql = OldIrql;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ //
+ // Return previous signal state of mutant object.
+ //
+
+ return OldState;
+}
+
+LONG
+KeReleaseMutex (
+ IN PRKMUTANT Mutex,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a mutex object.
+
+ N.B. Kernel mutex objects have been subsumed by mutant objects.
+
+Arguments:
+
+ Mutex - Supplies a pointer to a dispatcher object of type mutex.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KeReleaseMutex will be immediately followed by a call to one
+ of the kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the mutex object.
+
+--*/
+
+{
+
+ ASSERT_MUTANT(Mutex);
+
+ //
+ // Release the specified mutex object with defaults for increment
+ // and abandoned parameters.
+ //
+
+ return KeReleaseMutant(Mutex, 1, FALSE, Wait);
+}
diff --git a/private/ntos/ke/ppc/alignem.c b/private/ntos/ke/ppc/alignem.c
new file mode 100644
index 000000000..6a117e60f
--- /dev/null
+++ b/private/ntos/ke/ppc/alignem.c
@@ -0,0 +1,888 @@
+/*++
+
+Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+
+Module Name:
+
+ alignem.c
+
+Abstract:
+
+ This module implements the code necessary to emulate unaligned data
+ references.
+
+Author:
+
+ Rick Simpson 4-Aug-1993
+
+ Based on MIPS version by David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+VOID
+KiSetFloatRegisterValue (
+ IN ULONG,
+ IN DOUBLE,
+ OUT PKEXCEPTION_FRAME,
+ OUT PKTRAP_FRAME
+ );
+
+DOUBLE
+KiGetFloatRegisterValue (
+ IN ULONG,
+ IN PKEXCEPTION_FRAME,
+ IN PKTRAP_FRAME
+ );
+
+/*++
+ When PowerPC takes an Alignment Interrupt, the hardware loads the following:
+ SRR 0 <- Address of instruction causing the interrupt
+ SRR 1 <- MSR
+ DAR <- Effective address of the misaligned reference as computed
+ by the instruction that caused the interrupt
+ DSISR <- Several fields relevant to the failing instruction:
+ Bits 12..13 <- Extended op-code (XO) if instr is DS-form
+ Bits 15..21 <- Index into the table below, identifying
+ (for the most part) the failing instr
+ Bits 22..26 <- RT/RS/FRT/FRS field (reg no.) of instr
+ Bits 27..31 <- RA (reg no.) field for update-form instrs
+
+ For the most part, it is not necessary to retrieve the actual instruction
+ in order to emulate the effects of an unaligned load or store. Enough
+ information is in the DSISR to distinguish most cases. Special processing
+ is required for certain instructions -- the DSISR does not have enough
+ information for them.
+
+ It is unnecessary to compute the failing effective address by emulating
+ the instruction's addressing arithmetic, because the value required is
+ contained in the DAR.
+
+ The table here is indexed by bits 15..21 of the DSISR.
+
+ The "escape" flag indicates that some sort of special handling is needed,
+ for one of the following reasons:
+ 1) More than one instruction maps to the same DSISR value
+ (ld/ldu/lwa, std/stdu)
+ 2) The instruction is load-and-reserve or store-conditional,
+ and misalignment should not be "fixed up"
+ 3) The instruction is a byte-reversed load or store
+ 4) The instruction is "ecowx" or "eciwx"
+ 5) The instruction is "dcbz"
+ 6) The instruction is "stfiwx"
+
+ NOTE: Even though lwz and lwarx share the same DSISR value (0), the
+ table entry for position 0 is used only for lwz. This is so that the
+ most likely case (load word from unaligned address) can take the
+ mainline path. The less likely case (load word and reserve from
+ unaligned address) is ignored and treated as if it were simply load
+ word. Unaligned addresses are not supported for lwarx/stwcx. in the
+ PowerPC architecture. The implementation here (allowing lwarx to
+ proceed as if it were lwx, without establishing a reservation) is
+ allowable according to the architecture; a matching store conditional
+ (stwcx.) to the same unaligned address will fail (return FALSE from
+ this routine), so the incorrect reservation address will be caught
+ then.
+--*/
+
+typedef struct _ALFAULT {
+ ULONG Valid : 1; // Valid DSISR value (1) vs. Should not occur (0)
+ ULONG Load : 1; // Load (1) vs. Store (0)
+ ULONG Length : 2; // Length: 2 bytes (1), 4 bytes (2), 8 bytes (3)
+ ULONG Signed : 1; // Sign-extended (1) vs. Zero-extended (0)
+ ULONG Fixed : 1; // Fixed point (1) vs. Floating point (0)
+ ULONG Update : 1; // Update-form (1) vs. Non-Update-form (0)
+ ULONG Escape : 1; // Needs special processing (1) vs. Regular (0)
+} ALFAULT, *PALFAULT;
+
+// Table indices for instructions needing special handling
+
+#define LDARX_INDEX_VALUE 1
+#define LD_INDEX_VALUE 13
+#define STD_INDEX_VALUE 15
+#define STWCX_INDEX_VALUE 66
+#define STDCX_INDEX_VALUE 67
+#define LWBRX_INDEX_VALUE 72
+#define STWBRX_INDEX_VALUE 74
+#define LHBRX_INDEX_VALUE 76
+#define STHBRX_INDEX_VALUE 78
+#define ECIWX_INDEX_VALUE 84
+#define ECOWX_INDEX_VALUE 86
+#define DCBZ_INDEX_VALUE 95
+#define STFIWX_INDEX_VALUE 111
+
+static ALFAULT AlFault[128] = {
+
+// Valid Load Length Signed Fixed Update Escape
+ { 1, 1, 2, 0, 1, 0, 0 }, // 0 lwz, lwarx
+ { 1, 1, 3, 0, 1, 0, 1 }, // 1 ldarx
+ { 1, 0, 2, 0, 1, 0, 0 }, // 2 stw
+ { 0, 0, 0, 0, 0, 0, 0 }, // 3
+ { 1, 1, 1, 0, 1, 0, 0 }, // 4 lhz
+ { 1, 1, 1, 1, 1, 0, 0 }, // 5 lha
+ { 1, 0, 1, 0, 1, 0, 0 }, // 6 sth
+ { 0, 0, 0, 0, 0, 0, 0 }, // 7
+ { 1, 1, 2, 0, 0, 0, 0 }, // 8 lfs
+ { 1, 1, 3, 0, 0, 0, 0 }, // 9 lfd
+ { 1, 0, 2, 0, 0, 0, 0 }, // 10 stfs
+ { 1, 0, 3, 0, 0, 0, 0 }, // 11 stfd
+ { 0, 0, 0, 0, 0, 0, 0 }, // 12
+ { 1, 1, 0, 0, 0, 0, 1 }, // 13 ld, ldu, lwa
+ { 0, 0, 0, 0, 0, 0, 0 }, // 14
+ { 1, 0, 0, 0, 0, 0, 1 }, // 15 std, stdu
+ { 1, 1, 2, 0, 1, 1, 0 }, // 16 lwzu
+ { 0, 0, 0, 0, 0, 0, 0 }, // 17
+ { 1, 0, 2, 0, 1, 1, 0 }, // 18 stwu
+ { 0, 0, 0, 0, 0, 0, 0 }, // 19
+ { 1, 1, 1, 0, 1, 1, 0 }, // 20 lhzu
+ { 1, 1, 1, 1, 1, 1, 0 }, // 21 lhau
+ { 1, 0, 1, 0, 1, 1, 0 }, // 22 sthu
+ { 0, 0, 0, 0, 0, 0, 0 }, // 23
+ { 1, 1, 2, 0, 0, 1, 0 }, // 24 lfsu
+ { 1, 1, 3, 0, 0, 1, 0 }, // 25 lfdu
+ { 1, 0, 2, 0, 0, 1, 0 }, // 26 stfsu
+ { 1, 0, 3, 0, 0, 1, 0 }, // 27 stfdu
+ { 0, 0, 0, 0, 0, 0, 0 }, // 28
+ { 0, 0, 0, 0, 0, 0, 0 }, // 29
+ { 0, 0, 0, 0, 0, 0, 0 }, // 30
+ { 0, 0, 0, 0, 0, 0, 0 }, // 31
+ { 1, 1, 3, 0, 1, 0, 0 }, // 32 ldx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 33
+ { 1, 0, 3, 0, 1, 0, 0 }, // 34 stdx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 35
+ { 0, 0, 0, 0, 0, 0, 0 }, // 36
+ { 1, 1, 2, 1, 1, 0, 0 }, // 37 lwax
+ { 0, 0, 0, 0, 0, 0, 0 }, // 38
+ { 0, 0, 0, 0, 0, 0, 0 }, // 39
+ { 0, 0, 0, 0, 0, 0, 0 }, // 40
+ { 0, 0, 0, 0, 0, 0, 0 }, // 41
+ { 0, 0, 0, 0, 0, 0, 0 }, // 42
+ { 0, 0, 0, 0, 0, 0, 0 }, // 43
+ { 0, 0, 0, 0, 0, 0, 0 }, // 44
+ { 0, 0, 0, 0, 0, 0, 0 }, // 45
+ { 0, 0, 0, 0, 0, 0, 0 }, // 46
+ { 0, 0, 0, 0, 0, 0, 0 }, // 47
+ { 1, 1, 3, 0, 1, 1, 0 }, // 48 ldux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 49
+ { 1, 0, 3, 0, 1, 1, 0 }, // 50 stdux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 51
+ { 0, 0, 0, 0, 0, 0, 0 }, // 52
+ { 1, 1, 2, 1, 1, 1, 0 }, // 53 lwaux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 54
+ { 0, 0, 0, 0, 0, 0, 0 }, // 55
+ { 0, 0, 0, 0, 0, 0, 0 }, // 56
+ { 0, 0, 0, 0, 0, 0, 0 }, // 57
+ { 0, 0, 0, 0, 0, 0, 0 }, // 58
+ { 0, 0, 0, 0, 0, 0, 0 }, // 59
+ { 0, 0, 0, 0, 0, 0, 0 }, // 60
+ { 0, 0, 0, 0, 0, 0, 0 }, // 61
+ { 0, 0, 0, 0, 0, 0, 0 }, // 62
+ { 0, 0, 0, 0, 0, 0, 0 }, // 63
+ { 0, 0, 0, 0, 0, 0, 0 }, // 64
+ { 0, 0, 0, 0, 0, 0, 0 }, // 65
+ { 1, 0, 2, 0, 1, 0, 1 }, // 66 stwcx.
+ { 1, 0, 3, 0, 1, 0, 1 }, // 67 stdcx.
+ { 0, 0, 0, 0, 0, 0, 0 }, // 68
+ { 0, 0, 0, 0, 0, 0, 0 }, // 69
+ { 0, 0, 0, 0, 0, 0, 0 }, // 70
+ { 0, 0, 0, 0, 0, 0, 0 }, // 71
+ { 1, 1, 2, 0, 1, 0, 1 }, // 72 lwbrx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 73
+ { 1, 0, 2, 0, 1, 0, 1 }, // 74 stwbrx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 75
+ { 1, 1, 1, 0, 1, 0, 1 }, // 76 lhbrx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 77
+ { 1, 0, 1, 0, 1, 0, 1 }, // 78 sthbrx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 79
+ { 0, 0, 0, 0, 0, 0, 0 }, // 80
+ { 0, 0, 0, 0, 0, 0, 0 }, // 81
+ { 0, 0, 0, 0, 0, 0, 0 }, // 82
+ { 0, 0, 0, 0, 0, 0, 0 }, // 83
+ { 1, 1, 2, 0, 1, 0, 1 }, // 84 eciwx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 85
+ { 1, 0, 2, 0, 1, 0, 1 }, // 86 ecowx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 87
+ { 0, 0, 0, 0, 0, 0, 0 }, // 88
+ { 0, 0, 0, 0, 0, 0, 0 }, // 89
+ { 0, 0, 0, 0, 0, 0, 0 }, // 90
+ { 0, 0, 0, 0, 0, 0, 0 }, // 91
+ { 0, 0, 0, 0, 0, 0, 0 }, // 92
+ { 0, 0, 0, 0, 0, 0, 0 }, // 93
+ { 0, 0, 0, 0, 0, 0, 0 }, // 94
+ { 1, 0, 0, 0, 0, 0, 1 }, // 95 dcbz
+ { 1, 1, 2, 0, 1, 0, 0 }, // 96 lwzx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 97
+ { 1, 0, 2, 0, 1, 0, 0 }, // 98 stwzx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 99
+ { 1, 1, 1, 0, 1, 0, 0 }, // 100 lhzx
+ { 1, 1, 1, 1, 1, 0, 0 }, // 101 lhax
+ { 1, 0, 1, 0, 1, 0, 0 }, // 102 sthx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 103
+ { 1, 1, 2, 0, 0, 0, 0 }, // 104 lfsx
+ { 1, 1, 3, 0, 0, 0, 0 }, // 105 lfdx
+ { 1, 0, 2, 0, 0, 0, 0 }, // 106 stfsx
+ { 1, 0, 3, 0, 0, 0, 0 }, // 107 stfdx
+ { 0, 0, 0, 0, 0, 0, 0 }, // 108
+ { 0, 0, 0, 0, 0, 0, 0 }, // 109
+ { 0, 0, 0, 0, 0, 0, 0 }, // 110
+ { 1, 0, 2, 0, 1, 0, 1 }, // 111 stfiwx
+ { 1, 1, 2, 0, 1, 1, 0 }, // 112 lwzux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 113
+ { 1, 0, 2, 0, 1, 1, 0 }, // 114 stwux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 115
+ { 1, 1, 1, 0, 1, 1, 0 }, // 116 lhzux
+ { 1, 1, 1, 1, 1, 1, 0 }, // 117 lhaux
+ { 1, 0, 1, 0, 1, 1, 0 }, // 118 sthux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 119
+ { 1, 1, 2, 0, 0, 1, 0 }, // 120 lfsux
+ { 1, 1, 3, 0, 0, 1, 0 }, // 121 lfdux
+ { 1, 0, 2, 0, 0, 1, 0 }, // 122 stfsux
+ { 1, 0, 3, 0, 0, 1, 0 }, // 123 stfdux
+ { 0, 0, 0, 0, 0, 0, 0 }, // 124
+ { 0, 0, 0, 0, 0, 0, 0 }, // 125
+ { 0, 0, 0, 0, 0, 0, 0 }, // 126
+ { 0, 0, 0, 0, 0, 0, 0 } // 127
+};
+
+BOOLEAN
+KiEmulateReference (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate an unaligned data reference to an
+ address in the user part of the address space.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the data reference is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG BranchAddress;
+ PUCHAR DataAddress;
+
+ union {
+ DOUBLE Double;
+ float Float;
+ ULONG Long;
+ SHORT Short;
+ } DataReference;
+ PUCHAR DataValue = (PUCHAR) &DataReference;
+
+ PVOID ExceptionAddress;
+ DSISR DsisrValue;
+ ULONG TableIndex;
+ ULONG DataRegNum;
+ ALFAULT Info;
+ KIRQL OldIrql;
+
+ //
+ // Call out to profile interrupt if alignment profiling is active
+ //
+ if (KiProfileAlignmentFixup) {
+
+ if (++KiProfileAlignmentFixupCount >= KiProfileAlignmentFixupInterval) {
+
+ KeRaiseIrql(PROFILE_LEVEL, &OldIrql);
+ KiProfileAlignmentFixupCount = 0;
+ KeProfileInterruptWithSource(TrapFrame, ProfileAlignmentFixup);
+ KeLowerIrql(OldIrql);
+
+ }
+ }
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // unaligned reference causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // PowerPC has no branch-delay-slot complexities like MIPS
+ //
+
+ BranchAddress = TrapFrame->Iar + 4;
+
+ //
+ // The effective address of the reference from the DAR was saved
+ // in the exception record. Check to make sure it is within the
+ // user part of the address space. Alignment exceptions take
+ // precedence over memory management exceptions (this is true
+ // for PowerPC as well as MIPS) and the address could be a
+ // system address.
+ //
+
+ DataAddress = (PUCHAR) (ExceptionRecord->ExceptionInformation[1]);
+
+ if ((ULONG)DataAddress < MM_USER_PROBE_ADDRESS) {
+
+ //
+ // Get information about the failing instruction from saved DSISR.
+ //
+
+ DsisrValue = *(DSISR*) &(ExceptionRecord->ExceptionInformation[2]);
+ TableIndex = DsisrValue.Index;
+ DataRegNum = DsisrValue.DataReg;
+ Info = AlFault[TableIndex];
+
+ //
+ // If table entry is marked invalid, we have some sort of logic error.
+ //
+
+ if (!Info.Valid)
+ return FALSE;
+
+ //
+ // If table entry does not indicate special processing needed,
+ // emulate the execution of the instruction
+ //
+
+ if (!Info.Escape) {
+
+ //
+ // Integer or float load or store
+ //
+
+ if (Info.Fixed) {
+
+ //
+ // Integer register
+ //
+
+ if (Info.Load) {
+
+ //
+ // Integer load
+ //
+
+ switch (Info.Length) {
+
+ //
+ // Halfword integer load
+ //
+
+ case 1:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ KiSetRegisterValue
+ (DataRegNum,
+ Info.Signed ? // sign extension ...
+ (ULONG) ((LONG) DataReference.Short) :
+ (ULONG) ((USHORT) DataReference.Short),
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Fullword integer load
+ //
+
+ case 2:
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetRegisterValue
+ (DataRegNum,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Doubleword integer load
+ //
+
+ case 3:
+ return FALSE; // Have no 8-byte integer regs yet
+
+ }
+ } else {
+
+ //
+ // Integer store
+ //
+
+ switch (Info.Length) {
+
+ //
+ // Halfword integer store
+ //
+
+ case 1:
+ DataReference.Short = (SHORT)
+ KiGetRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ break;
+
+ //
+ // Fullword integer store
+ //
+
+ case 2: // Word
+ DataReference.Long =
+ KiGetRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ break;
+
+ //
+ // Doubleword integer store
+ //
+
+ case 3:
+
+ return FALSE; // Have no 8-byte integer regs yet
+ }
+ }
+ } else { // Floating point
+
+ //
+ // Floating-point register
+ //
+
+ if (Info.Load) { // Floating point load
+
+ //
+ // Floating-point load
+ //
+
+ if (Info.Length == 2) {
+
+ //
+ // Floating-point single precision load
+ //
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ KiSetFloatRegisterValue
+ (DataRegNum,
+ (DOUBLE) DataReference.Float,
+ ExceptionFrame,
+ TrapFrame);
+
+ } else {
+
+ //
+ // Floating-point double precision load
+ //
+ DataValue[0] = DataAddress[0];
+ DataValue[1] = DataAddress[1];
+ DataValue[2] = DataAddress[2];
+ DataValue[3] = DataAddress[3];
+ DataValue[4] = DataAddress[4];
+ DataValue[5] = DataAddress[5];
+ DataValue[6] = DataAddress[6];
+ DataValue[7] = DataAddress[7];
+ KiSetFloatRegisterValue
+ (DataRegNum,
+ DataReference.Double,
+ ExceptionFrame,
+ TrapFrame);
+ }
+ } else {
+
+ //
+ // Floating-point store
+ //
+
+ if (Info.Length == 2) {
+
+ //
+ // Floating-point single precision store
+ //
+
+ DataReference.Float = (float)
+ KiGetFloatRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+
+ } else {
+
+ //
+ // Floating-point double precision store
+ //
+ DataReference.Double =
+ KiGetFloatRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ DataAddress[4] = DataValue[4];
+ DataAddress[5] = DataValue[5];
+ DataAddress[6] = DataValue[6];
+ DataAddress[7] = DataValue[7];
+ }
+ }
+ }
+
+ //
+ // See if "update" (post-increment) form of addressing
+ //
+
+ if (Info.Update)
+ KiSetRegisterValue // Store effective addr back into base reg
+ (DsisrValue.UpdateReg,
+ (ULONG) DataAddress,
+ ExceptionFrame,
+ TrapFrame);
+
+ }
+
+ //
+ // Table indicates that special processing is needed, either because
+ // the DSISR does not contain enough information to disambiguate the
+ // failing instruction, or the instruction is not a load or store,
+ // or the instruction has some other unusual requirement.
+ //
+
+ else { // Info.Escape == 1
+ switch (TableIndex) {
+
+ //
+ // Doubleword integers not yet supported
+ //
+
+ case LD_INDEX_VALUE:
+ case STD_INDEX_VALUE:
+ return FALSE;
+
+ //
+ // Load-and-reserve, store-conditional not supported
+ // for misaligned addresses
+ //
+
+ case LDARX_INDEX_VALUE:
+ case STWCX_INDEX_VALUE:
+ case STDCX_INDEX_VALUE:
+ return FALSE;
+
+ //
+ // Integer byte-reversed fullword load
+ //
+
+ case LWBRX_INDEX_VALUE:
+ DataValue[0] = DataAddress[3];
+ DataValue[1] = DataAddress[2];
+ DataValue[2] = DataAddress[1];
+ DataValue[3] = DataAddress[0];
+ KiSetRegisterValue
+ (DataRegNum,
+ DataReference.Long,
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Integer byte-reversed fullword store
+ //
+
+ case STWBRX_INDEX_VALUE:
+ DataReference.Long =
+ KiGetRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[3];
+ DataAddress[1] = DataValue[2];
+ DataAddress[2] = DataValue[1];
+ DataAddress[3] = DataValue[0];
+ break;
+
+ //
+ // Integer byte-reversed halfword load
+ //
+
+ case LHBRX_INDEX_VALUE:
+ DataValue[0] = DataAddress[1];
+ DataValue[1] = DataAddress[0];
+ KiSetRegisterValue
+ (DataRegNum,
+ Info.Signed ? // sign extension ...
+ (ULONG) ((LONG) DataReference.Short) :
+ (ULONG) ((USHORT) DataReference.Short),
+ ExceptionFrame,
+ TrapFrame);
+ break;
+
+ //
+ // Integer byte-reversed halfword store
+ //
+
+ case STHBRX_INDEX_VALUE:
+ DataReference.Short = (SHORT)
+ KiGetRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[1];
+ DataAddress[1] = DataValue[0];
+ break;
+
+ //
+ // Special I/O instructions not supported yet
+ //
+
+ case ECIWX_INDEX_VALUE:
+ case ECOWX_INDEX_VALUE:
+ return FALSE;
+
+ //
+ // Data Cache Block Zero
+ //
+ // dcbz causes an alignment fault if cache is disabled
+ // for the address range covered by the block.
+ //
+ // A data cache block is 32 bytes long, we emulate this
+ // instruction by storing 8 zero integers a the address
+ // specified.
+ //
+ // Note, dcbz zeros the block "containing" the address
+ // so we round down first.
+ //
+
+ case DCBZ_INDEX_VALUE: {
+ PULONG DcbAddress = (PULONG)((ULONG)DataAddress & ~0x1f);
+
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ break;
+ }
+
+ //
+ // Store Floating as Integer
+ //
+
+ case STFIWX_INDEX_VALUE:
+ DataReference.Double =
+ KiGetFloatRegisterValue
+ (DataRegNum,
+ ExceptionFrame,
+ TrapFrame);
+ DataAddress[0] = DataValue[0];
+ DataAddress[1] = DataValue[1];
+ DataAddress[2] = DataValue[2];
+ DataAddress[3] = DataValue[3];
+ }
+ }
+
+ TrapFrame->Iar = BranchAddress;
+ return TRUE;
+ }
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ }
+
+ //
+ // Return a value of FALSE.
+ //
+
+ return FALSE;
+}
+
+BOOLEAN
+KiEmulateDcbz (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to emulate a Data Cache Block Zero instruction.
+ The PowerPC hardware will raise an alignment exception if a DCBZ is
+ attempted on non-cached memory. We need to emulate this even in kernel
+ mode so we can debug h/w problems by disabling the data cache.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ A value of TRUE is returned if the data reference is successfully
+ emulated. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ PUCHAR DataAddress;
+ PVOID ExceptionAddress;
+ DSISR DsisrValue;
+ ULONG TableIndex;
+ ULONG DataRegNum;
+ ALFAULT Info;
+
+ //
+ // Save the original exception address in case another exception
+ // occurs.
+ //
+
+ ExceptionAddress = ExceptionRecord->ExceptionAddress;
+
+ //
+ // Any exception that occurs during the attempted emulation of the
+ // unaligned reference causes the emulation to be aborted. The new
+ // exception code and information is copied to the original exception
+ // record and a value of FALSE is returned.
+ //
+
+ try {
+
+ //
+ // The effective address of the reference from the DAR was saved
+ // in the exception record. Check to make sure it is within the
+ // user part of the address space. Alignment exceptions take
+ // precedence over memory management exceptions (this is true
+ // for PowerPC as well as MIPS) and the address could be a
+ // system address.
+ //
+
+ DataAddress = (PUCHAR) (ExceptionRecord->ExceptionInformation[1]);
+
+ //
+ // Get information about the failing instruction from saved DSISR.
+ //
+
+ DsisrValue = *(DSISR*) &(ExceptionRecord->ExceptionInformation[2]);
+ TableIndex = DsisrValue.Index;
+ DataRegNum = DsisrValue.DataReg;
+ Info = AlFault[TableIndex];
+
+ //
+ // If table entry is valid and does not indicate special processing
+ // needed, and is a DCBZ instruction, emulate the execution of the
+ // instruction
+ //
+
+ if (Info.Valid && Info.Escape && (TableIndex == DCBZ_INDEX_VALUE)) {
+
+ //
+ // Data Cache Block Zero
+ //
+ // A data cache block is 32 bytes long, we emulate this
+ // instruction by storing 8 zero integers a the address
+ // specified.
+ //
+ // Note, dcbz zeros the block "containing" the address
+ // so we round down first.
+ //
+
+ PULONG DcbAddress = (PULONG)((ULONG)DataAddress & ~0x1f);
+
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+ *DcbAddress++ = 0;
+
+ //
+ // Bump instruction address to next instruction.
+ //
+
+ TrapFrame->Iar += 4;
+
+ return TRUE;
+ }
+
+ //
+ // If an exception occurs, then copy the new exception information to the
+ // original exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Preserve the original exception address.
+ //
+
+ ExceptionRecord->ExceptionAddress = ExceptionAddress;
+ }
+
+ //
+ // Return a value of FALSE.
+ //
+
+ return FALSE;
+}
diff --git a/private/ntos/ke/ppc/allproc.c b/private/ntos/ke/ppc/allproc.c
new file mode 100644
index 000000000..f34eee338
--- /dev/null
+++ b/private/ntos/ke/ppc/allproc.c
@@ -0,0 +1,423 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+Copyright (c) 1994 Motorola, IBM Corp.
+
+Module Name:
+
+ allproc.c
+
+Abstract:
+
+ This module allocates and intializes kernel resources required
+ to start a new processor, and passes a complete processor state
+ structure to the HAL to obtain a new processor.
+
+Author:
+
+ David N. Cutler 29-Apr-1993
+ Joe Notarangelo 30-Nov-1993
+ Pat Carr 16-Aug-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+
+#pragma alloc_text(INIT, KeStartAllProcessors)
+
+#endif
+
+//
+// Define macro to round up to 64-byte boundary and define block sizes.
+//
+
+#define ROUND_UP(x) ((sizeof(x) + 63) & (~63))
+#define BLOCK1_SIZE (3 * KERNEL_STACK_SIZE)
+#define BLOCK2_SIZE (ROUND_UP(KPRCB) + ROUND_UP(ETHREAD) + 64)
+
+//
+// Define barrier wait static data.
+//
+
+#if !defined(NT_UP)
+
+ULONG KiBarrierWait = 0;
+
+#endif
+
+#if !defined(NT_UP)
+MEMORY_ALLOCATION_DESCRIPTOR KiFreePcrPagesDescriptor;
+#endif
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ );
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiStartProcessor (
+ IN PLOADER_PARAMETER_BLOCK Loaderblock
+ );
+
+
+VOID
+KeStartAllProcessors(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called during phase 1 initialization on the master boot
+ processor to start all of the other registered processors.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#if !defined(NT_UP)
+
+ ULONG MemoryBlock1;
+ ULONG MemoryBlock2;
+ ULONG Number;
+ ULONG PcrAddress;
+ ULONG PcrPage;
+ PKPRCB Prcb;
+ KPROCESSOR_STATE ProcessorState;
+ volatile PRESTART_BLOCK RestartBlock;
+ BOOLEAN Started;
+ PHYSICAL_ADDRESS PcrPhysicalAddress;
+ PMEMORY_ALLOCATION_DESCRIPTOR KiPcrPagesDescriptor = KeLoaderBlock->u.Ppc.PcrPagesDescriptor;
+
+ //
+ // If the registered number of processors is greater than the maximum
+ // number of processors supported, then only allow the maximum number
+ // of supported processors.
+ //
+
+ if (KeRegisteredProcessors > MAXIMUM_PROCESSORS) {
+ KeRegisteredProcessors = MAXIMUM_PROCESSORS;
+ }
+
+ //
+ // Set barrier that will prevent any other processor from entering the
+ // idle loop until all processors have been started.
+ //
+
+ KiBarrierWait = 1;
+
+ //
+ // Initialize the processor state that will be used to start each of
+ // processors. Each processor starts in the system initialization code
+ // with address of the loader parameter block as an argument.
+ //
+
+ RtlZeroMemory(&ProcessorState, sizeof(KPROCESSOR_STATE));
+ ProcessorState.ContextFrame.Gpr3 = (ULONG)KeLoaderBlock;
+ ProcessorState.ContextFrame.Iar = *(PULONG)KiStartProcessor;
+
+ Number = 0;
+
+ while ((Number+1) < KeRegisteredProcessors) {
+
+ //
+ // Allocate a DPC stack, an idle thread kernel stack, a panic
+ // stack, a PCR page, a processor block, and an executive thread
+ // object. If the allocation fails or the allocation cannot be
+ // made from nonpaged pool, then stop starting processors.
+ //
+
+ if (Number >= KiPcrPagesDescriptor->PageCount) {
+ break;
+ }
+
+ MemoryBlock1 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK1_SIZE);
+ if ((PVOID)MemoryBlock1 == NULL) {
+ break;
+ }
+
+ MemoryBlock2 = (ULONG)ExAllocatePool(NonPagedPool, BLOCK2_SIZE);
+ if ((PVOID)MemoryBlock2 == NULL) {
+ ExFreePool((PVOID)MemoryBlock1);
+ break;
+ }
+
+ //
+ // Zero both blocks of allocated memory.
+ //
+
+ RtlZeroMemory((PVOID)MemoryBlock1, BLOCK1_SIZE);
+ RtlZeroMemory((PVOID)MemoryBlock2, BLOCK2_SIZE);
+
+ //
+ // Set address of interrupt stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Ppc.InterruptStack = MemoryBlock1 + (1 * KERNEL_STACK_SIZE);
+
+ //
+ // Set address of idle thread kernel stack in loader parameter block.
+ //
+
+ KeLoaderBlock->KernelStack = MemoryBlock1 + (2 * KERNEL_STACK_SIZE);
+
+ ProcessorState.ContextFrame.Gpr1 = (ULONG)KeLoaderBlock->KernelStack;
+
+ //
+ // Set address of panic stack in loader parameter block.
+ //
+
+ KeLoaderBlock->u.Ppc.PanicStack = MemoryBlock1 + (3 * KERNEL_STACK_SIZE);
+
+ //
+ // Set the page frame of the PCR page in the loader parameter block.
+ //
+
+ PcrPage = KiPcrPagesDescriptor->BasePage + Number;
+ PcrAddress = KSEG0_BASE | (PcrPage << PAGE_SHIFT);
+ RtlZeroMemory((PVOID)PcrAddress, PAGE_SIZE);
+ ProcessorState.ContextFrame.Gpr4 = PcrAddress;
+ KeLoaderBlock->u.Ppc.PcrPage = PcrPage;
+
+ //
+ // Copy the physical address of the PCR2 page from the current
+ // processor's PCR into the loader parameter block for the new
+ // processor.
+ //
+ // Note that in the PCR this is an address rather than a page
+ // number.
+ //
+
+ KeLoaderBlock->u.Ppc.PcrPage2 = PCR->PcrPage2 >> PAGE_SHIFT;
+
+ //
+ // Set the address of the processor block and executive thread in the
+ // loader parameter block.
+ //
+
+ KeLoaderBlock->Prcb = (MemoryBlock2 + 63) & ~63;
+ KeLoaderBlock->Thread = KeLoaderBlock->Prcb + ROUND_UP(KPRCB);
+
+ //
+ // Attempt to start the next processor. If attempt is successful,
+ // then wait for the processor to get initialized. Otherwise,
+ // deallocate the processor resources and terminate the loop.
+ //
+
+ Started = HalStartNextProcessor(KeLoaderBlock, &ProcessorState);
+
+ if (Started == FALSE) {
+
+ ExFreePool((PVOID)MemoryBlock1);
+ ExFreePool((PVOID)MemoryBlock2);
+ break;
+
+ } else {
+
+ //
+ // Wait until boot is finished on the target processor before
+ // starting the next processor. Booting is considered to be
+ // finished when a processor completes its initialization and
+ // drops into the idle loop.
+ //
+
+ Prcb = (PKPRCB)(KeLoaderBlock->Prcb);
+ RestartBlock = Prcb->RestartBlock;
+ while (RestartBlock->BootStatus.BootFinished == 0) {
+ }
+ }
+
+ Number += 1;
+
+ }
+
+ //
+ // Allow all processor that were started to enter the idle loop and
+ // begin execution.
+ //
+
+ KiBarrierWait = 0;
+
+ if ( Number < KiPcrPagesDescriptor->PageCount ) {
+ if ( Number == 0 ) {
+ KiPcrPagesDescriptor->MemoryType = LoaderOsloaderHeap;
+ } else {
+ KiFreePcrPagesDescriptor.BasePage = KiPcrPagesDescriptor->BasePage + Number;
+ KiFreePcrPagesDescriptor.PageCount = KiPcrPagesDescriptor->PageCount - Number;
+ KiFreePcrPagesDescriptor.MemoryType = LoaderOsloaderHeap;
+ InsertTailList(&KeLoaderBlock->MemoryDescriptorListHead,
+ &KiFreePcrPagesDescriptor.ListEntry);
+ }
+ }
+
+#endif
+
+ //
+ // Reset and synchronize the performance counters of all processors.
+ //
+
+ KiCalibratePerformanceCounter();
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounter(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function resets and synchronizes the performance counter on all
+ processors in the configuration.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Count = 1;
+
+#if !defined(NT_UP)
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // Raise IRQL to synchronization level to avoid a possible context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Initialize the reset performance counter packet, compute the target
+ // set of processors, and send the packet to the target processors, if
+ // any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ Count = (LONG)KeNumberProcessors;
+ KiIpiSendPacket(TargetProcessors,
+ KiCalibratePerformanceCounterTarget,
+ (PVOID)&Count,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Reset the performance counter on current processor.
+ //
+
+ HalCalibratePerformanceCounter((volatile PLONG)&Count);
+
+ //
+ // Wait until all target processors have reset and synchronized their
+ // performance counters.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiCalibratePerformanceCounterTarget (
+ IN PULONG SignalDone,
+ IN PVOID Count,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for reseting the performance counter.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Count - Supplies a pointer to the number of processors in the host
+ configuration.
+
+ Parameter2 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ //
+ // Reset and synchronize the perfromance counter on the current processor
+ // and clear the reset performance counter address to signal the source to
+ // continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalCalibratePerformanceCounter((volatile PLONG)Count);
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/ppc/apcuser.c b/private/ntos/ke/ppc/apcuser.c
new file mode 100644
index 000000000..e3b451ca5
--- /dev/null
+++ b/private/ntos/ke/ppc/apcuser.c
@@ -0,0 +1,194 @@
+/*++
+
+Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+
+Module Name:
+
+ apcuser.c
+
+Abstract:
+
+ This module implements the machine dependent code necessary to initialize
+ a user mode APC.
+
+Author:
+
+ Rick Simpson 25-Oct-1993
+
+ based on MIPS version by David N. Cutler (davec) 23-Apr-1990
+
+Environment:
+
+ Kernel mode only, IRQL APC_LEVEL.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define _KXPPC_C_HEADER_
+#include "kxppc.h"
+
+VOID
+KiInitializeUserApc (
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKNORMAL_ROUTINE NormalRoutine,
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to initialize the context for a user mode APC.
+
+Arguments:
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ NormalRoutine - Supplies a pointer to the user mode APC routine.
+
+ NormalContext - Supplies a pointer to the user context for the APC
+ routine.
+
+ SystemArgument1 - Supplies the first system supplied value.
+
+ SystemArgument2 - Supplies the second system supplied value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord;
+ EXCEPTION_RECORD ExceptionRecord;
+ LONG Length;
+ ULONG UserStack;
+ PULONG PUserStack;
+
+ //
+ // Move the user mode state from the trap and exception frames to the
+ // context frame.
+ //
+
+ ContextRecord.ContextFlags = CONTEXT_FULL;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextRecord);
+
+ //
+ // Transfer the context information to the user stack, initialize the
+ // APC routine parameters, and modify the trap frame so execution will
+ // continue in user mode at the user mode APC dispatch routine.
+ //
+ // We build the following structure on the user stack:
+ //
+ // | |
+ // |-------------------------------|
+ // | Stack frame header |
+ // | Back chain points to |
+ // | user's stack frame |
+ // | - - - - - - - - - - - - - - - |
+ // | Context Frame |
+ // | Filled in with state |
+ // | of interrupted user |
+ // | program |
+ // | - - - - - - - - - - - - - - - |
+ // | Trap Frame |
+ // | Empty; for use by |
+ // | NtContinue eventually |
+ // | - - - - - - - - - - - - - - - |
+ // | Save word for TOC ptr |
+ // | - - - - - - - - - - - - - - - |
+ // | Canonical slack space |
+ // |-------------------------------|
+ // | |
+ // | Interrupted user's |
+ // | stack frame |
+ // | |
+ // | |
+ // |-------------------------------|
+ // | |
+
+ try {
+
+ //
+ // Set pointer to KeUserApcDispatcher\'s function descriptor
+ // First word = address of entry point
+ // Second word = address of TOC
+ //
+
+ PULONG FnDesc = (PULONG) KeUserApcDispatcher;
+
+ //
+ // Compute length of context record and new aligned user stack pointer.
+ //
+
+ Length = (STK_MIN_FRAME + CONTEXT_LENGTH + KTRAP_FRAME_LENGTH +
+ sizeof(ULONG) + STK_SLACK_SPACE + 7) & (-8);
+ UserStack = (ContextRecord.Gpr1 & (~7)) - Length;
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // context record to the user stack.
+ //
+
+ ProbeForWrite((PCHAR)UserStack, Length, sizeof(QUAD));
+ RtlCopyMemory((PULONG)(UserStack + STK_MIN_FRAME), &ContextRecord, sizeof(CONTEXT));
+
+ //
+ // Set the back chain in the new stack frame, store the resume
+ // address as if it were the LR value (for stack trace/unwind),
+ // and fill in TOC value as if it had been saved by prologue.
+ //
+
+ PUserStack = (PULONG) UserStack;
+ PUserStack[0] = ContextRecord.Gpr1;
+ PUserStack[(STK_MIN_FRAME + CONTEXT_LENGTH +
+ KTRAP_FRAME_LENGTH) / sizeof(ULONG)] = FnDesc[1];
+
+ //
+ // Set the address of the user APC routine, the APC parameters, the
+ // new frame pointer, and the new stack pointer in the current trap
+ // frame. Set the continuation address so control will be transfered
+ // to the user APC dispatcher.
+ //
+
+ TrapFrame->Gpr1 = UserStack; // stack pointer
+ TrapFrame->Gpr2 = FnDesc[1]; // TOC address from descriptor
+ TrapFrame->Gpr3 = (ULONG)NormalContext; // 1st parameter
+ TrapFrame->Gpr4 = (ULONG)SystemArgument1; // 2nd parameter
+ TrapFrame->Gpr5 = (ULONG)SystemArgument2; // 3rd parameter
+ TrapFrame->Gpr6 = (ULONG)NormalRoutine; // 4th parameter
+ TrapFrame->Iar = FnDesc[0]; // entry point from descriptor
+
+ //
+ // If an exception occurs, then copy the exception information to an
+ // exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // Set the address of the exception to the current program address
+ // and raise the exception by calling the exception dispatcher.
+ //
+
+ ExceptionRecord.ExceptionAddress = (PVOID)(TrapFrame->Iar);
+ KiDispatchException(&ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ UserMode,
+ TRUE);
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/ppc/callback.c b/private/ntos/ke/ppc/callback.c
new file mode 100644
index 000000000..be73c4a95
--- /dev/null
+++ b/private/ntos/ke/ppc/callback.c
@@ -0,0 +1,241 @@
+/*++
+
+Copyright (c) 1994 Microsoft Corporation
+
+Module Name:
+
+ callback.c
+
+Abstract:
+
+ This module implements user mode call back services.
+
+Author:
+
+ David N. Cutler (davec) 29-Oct-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KeUserModeCallback (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ IN PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function call out from kernel mode to a user mode function.
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied
+ to the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that receives
+ the address of the output buffer.
+
+ Outputlength - Supplies a pointer to a variable that receives
+ the length of the output buffer.
+
+Return Value:
+
+ If the callout cannot be executed, then an error status is
+ returned. Otherwise, the status returned by the callback function
+ is returned.
+
+--*/
+
+{
+ PUCALLOUT_FRAME CalloutFrame;
+ ULONG Length;
+ ULONG OldStack;
+ NTSTATUS Status;
+ PKTRAP_FRAME TrapFrame;
+ PULONG UserStack;
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // Get the user mode stack pointer and attempt to copy input buffer
+ // to the user stack.
+ //
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ OldStack = (ULONG)TrapFrame->Gpr1;
+ try {
+
+ //
+ // Compute new user mode stack address, probe for writability,
+ // and copy the input buffer to the user stack.
+ //
+
+ Length = (InputLength +
+ sizeof(QUAD) - 1 + sizeof(UCALLOUT_FRAME)) & ~(sizeof(QUAD) - 1);
+
+ CalloutFrame = (PUCALLOUT_FRAME)(OldStack - Length);
+ ProbeForWrite(CalloutFrame, Length, sizeof(QUAD));
+ RtlMoveMemory(CalloutFrame + 1, InputBuffer, InputLength);
+
+ //
+ // Allocate stack frame fill in callout arguments.
+ //
+
+ CalloutFrame->Buffer = (PVOID)(CalloutFrame + 1);
+ CalloutFrame->Length = InputLength;
+ CalloutFrame->ApiNumber = ApiNumber;
+ CalloutFrame->Frame.BackChain = TrapFrame->Gpr1;
+ CalloutFrame->Lr = TrapFrame->Lr;
+
+ //
+ // If an exception occurs during the probe of the user stack, then
+ // always handle the exception and return the exception code as the
+ // status value.
+ //
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call user mode.
+ //
+
+ TrapFrame->Gpr1 = (ULONG)CalloutFrame;
+ Status = KiCallUserMode(OutputBuffer, OutputLength);
+ TrapFrame->Gpr1 = OldStack;
+
+ //
+ // When returning from user mode, any drawing done to the GDI TEB
+ // batch must be flushed.
+ //
+
+ if (((PTEB)KeGetCurrentThread()->Teb)->GdiBatchCount > 0) {
+
+ //
+ // call GDI batch flush routine
+ //
+
+ KeGdiFlushUserBatch();
+ }
+
+ return Status;
+}
+
+NTSTATUS
+NtW32Call (
+ IN ULONG ApiNumber,
+ IN PVOID InputBuffer,
+ IN ULONG InputLength,
+ OUT PVOID *OutputBuffer,
+ OUT PULONG OutputLength
+ )
+
+/*++
+
+Routine Description:
+
+ This function calls a W32 function.
+
+ N.B. ************** This is a temporary service *****************
+
+Arguments:
+
+ ApiNumber - Supplies the API number.
+
+ InputBuffer - Supplies a pointer to a structure that is copied to
+ the user stack.
+
+ InputLength - Supplies the length of the input structure.
+
+ Outputbuffer - Supplies a pointer to a variable that recevies the
+ output buffer address.
+
+ Outputlength - Supplies a pointer to a variable that recevies the
+ output buffer length.
+
+Return Value:
+
+ TBS.
+
+--*/
+
+{
+
+ PVOID ValueBuffer;
+ ULONG ValueLength;
+ NTSTATUS Status;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ //
+ // If the current thread is not a GUI thread, then fail the service
+ // since the thread does not have a large stack.
+ //
+
+ if (KeGetCurrentThread()->Win32Thread == (PVOID)&KeServiceDescriptorTable[0]) {
+ return STATUS_NOT_IMPLEMENTED;
+ }
+
+ //
+ // Probe the output buffer address and length for writeability.
+ //
+
+ try {
+ ProbeForWriteUlong((PULONG)OutputBuffer);
+ ProbeForWriteUlong(OutputLength);
+
+ //
+ // If an exception occurs during the probe of the output buffer or
+ // length, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Call out to user mode specifying the input buffer and API number.
+ //
+
+ Status = KeUserModeCallback(ApiNumber,
+ InputBuffer,
+ InputLength,
+ &ValueBuffer,
+ &ValueLength);
+
+ //
+ // If the callout is successful, then the output buffer address and
+ // length.
+ //
+
+ if (NT_SUCCESS(Status)) {
+ try {
+ *OutputBuffer = ValueBuffer;
+ *OutputLength = ValueLength;
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ return Status;
+}
diff --git a/private/ntos/ke/ppc/callout.s b/private/ntos/ke/ppc/callout.s
new file mode 100644
index 000000000..bda1fd956
--- /dev/null
+++ b/private/ntos/ke/ppc/callout.s
@@ -0,0 +1,352 @@
+// TITLE("Call Out to User Mode")
+//++
+//
+// Copyright (c) 1994 Microsoft Corporation
+//
+// Module Name:
+//
+// callout.s
+//
+// Abstract:
+//
+// This module implements the code necessary to call out from kernel
+// mode to user mode.
+//
+// Author:
+//
+// Chuck Lenzmeier (chuckl) 11-Nov-1994
+// modified from MIPS version by David N. Cutler (davec)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+//list(off)
+#include "ksppc.h"
+//list(on)
+
+//
+// Define external variables that can be addressed using GP.
+//
+
+ .extern KeUserCallbackDispatcher
+
+ .extern ..MmGrowKernelStack
+ .extern ..KiServiceExit
+ .extern ..RtlCopyMemory
+ .extern .._savegpr_14
+ .extern .._restgpr_14
+ .extern .._savefpr_14
+ .extern .._restfpr_14
+
+
+ SBTTL("Call User Mode Function")
+//++
+//
+// NTSTATUS
+// KiCallUserMode (
+// IN PVOID *OutputBuffer,
+// IN PULONG OutputLength
+// )
+//
+// Routine Description:
+//
+// This function calls a user mode function.
+//
+// N.B. This function calls out to user mode and the NtCallbackReturn
+// function returns back to the caller of this function. Therefore,
+// the stack layout must be consistent between the two routines.
+//
+// Arguments:
+//
+// OutputBuffer (r.3) - Supplies a pointer to the variable that receivies
+// the address of the output buffer.
+//
+// OutputLength (r.4) - Supplies a pointer to a variable that receives
+// the length of the output buffer.
+//
+// Return Value:
+//
+// The final status of the call out function is returned as the status
+// of the function.
+//
+// N.B. This function does not return to its caller. A return to the
+// caller is executed when a NtCallbackReturn system service is
+// executed.
+//
+// N.B. This function does return to its caller if a kernel stack
+// expansion is required and the attempted expansion fails.
+//
+//--
+
+ NESTED_ENTRY_S(KiCallUserMode, CuFrameLength, 18, 18, _TEXT$01)
+
+ PROLOGUE_END(KiCallUserMode)
+
+//
+// Save argument registers. Check if sufficient room is available on
+// the kernel stack for another system call.
+//
+
+ lwz r.6,KiPcr+PcCurrentThread(r.0) // get current thread address
+ stw r.4,CuR4(r.sp) // save output length address
+ stw r.3,CuR3(r.sp) // save output buffer address
+
+ lwz r.9,ThStackLimit(r.6) // get current stack limit
+ subi r.7,r.sp,KERNEL_LARGE_STACK_COMMIT // compute bottom address
+ cmplw r.7,r.9 // check if limit exceeded
+ bge Cu.10 // if ge, limit not exceeded
+
+ ori r.3,r.sp,0 // set current kernel stack address
+ bl ..MmGrowKernelStack // attempt to grow the kernel stack
+ cmpwi r.3,0 // did it work?
+ lwz r.6,KiPcr+PcCurrentThread(r.0) // get current thread address
+ bne Cu.20 // jump if it failed
+ lwz r.9,ThStackLimit(r.6) // get new stack limit
+ stw r.9,KiPcr+PcStackLimit(r.0) // set new stack limit
+
+Cu.10:
+
+//
+// Get the user-mode continuation address.
+//
+// Get the address of the current thread and save the previous trap frame
+// and callback stack addresses in the current frame. Also save the new
+// callback stack address in the thread object.
+//
+
+ lwz r.9,[toc]KeUserCallbackDispatcher(r.toc) // get address of KeUserCallbackDispatcher
+ lwz r.5,KiPcr+PcInitialStack(r.0) // get initial stack address
+ lwz r.12,ThTrapFrame(r.6) // get trap frame address
+ lwz r.7,ThCallbackStack(r.6) // get callback stack address
+ lwz r.9,0(r.9) // get address of descriptor
+ stw r.5,CuInStk(r.sp) // save initial stack address
+ stw r.12,CuTrFr(r.sp) // save trap frame address
+ stw r.7,CuCbStk(r.sp) // save callback stack address
+ stw r.sp,ThCallbackStack(r.6) // set callback stack address
+ lwz r.10,0(r.9) // get continuation IAR
+ lwz r.11,4(r.9) // get continuation TOC
+
+//
+// Restore state and callback to user mode.
+//
+
+ DISABLE_INTERRUPTS(r.7, r.8) // disable interrupts
+
+ lwz r.7,TrIar(r.12) // get trap IAR
+ lwz r.8,TrGpr2(r.12) // get trap TOC
+ stw r.sp,ThInitialStack(r.6) // reset initial stack address
+ stw r.sp,KiPcr+PcInitialStack(r.0) //
+ stw r.10,TrIar(r.12) // set trap IAR
+ stw r.11,TrGpr2(r.12) // set trap TOC
+ stw r.7,CuTrIar(r.sp) // save trap IAR
+ stw r.8,CuTrToc(r.sp) // save trap TOC
+
+ lwz r.10,TrMsr(r.12) // get caller's MSR value
+ lbz r.8,KiPcr+PcCurrentIrql(r.0) // get current IRQL
+
+ b ..KiServiceExit
+
+//
+// An attempt to grow the kernel stack failed.
+//
+// Note: We don't need to restore the nonvolatile registers here,
+// since we didn't modify them. So we don't use NESTED_EXIT,
+// and pop the stack manually instead.
+
+Cu.20:
+ lwz r.0,CuLr(r.sp)
+ addi r.sp,r.sp,CuFrameLength
+ mtlr r.0
+ SPECIAL_EXIT(KiCallUserMode)
+
+ SBTTL("Switch Kernel Stack")
+//++
+//
+// PVOID
+// KeSwitchKernelStack (
+// IN PVOID StackBase,
+// IN PVOID StackLimit
+// )
+//
+// Routine Description:
+//
+// This function switches to the specified large kernel stack.
+//
+// N.B. This function can ONLY be called when there are no variables
+// in the stack that refer to other variables in the stack, i.e.,
+// there are no pointers into the stack.
+//
+// Arguments:
+//
+// StackBase (r.3) - Supplies a pointer to the base of the new kernel
+// stack.
+//
+// StackLimit (r.4) - supplies a pointer to the limit of the new kernel
+// stack.
+//
+// Return Value:
+//
+// The old kernel stack is returned as the function value.
+//
+//--
+
+ .struct 0
+SsFrame: .space StackFrameHeaderLength
+SsSp: .space 4 // saved new stack pointer
+SsR3: .space 4 // saved R3 (StackBase)
+SsR4: .space 4 // saved R4 (StackLimit)
+SsLr: .space 4 // saved LR
+SsGpr: .space 4 * 2 // saved GPRs
+ .align 3
+SsFrameLength: // length of stack frame
+
+ NESTED_ENTRY_S(KeSwitchKernelStack, SsFrameLength, 2, 0, _TEXT$01)
+
+ PROLOGUE_END(KeSwitchKernelStack)
+
+//
+// Save the address of the new stack and copy the old stack to the new
+// stack.
+//
+
+ stw r.3,SsR3(r.sp) // save new kernel stack base address
+ lwz r.31,KiPcr+PcCurrentThread(r.0) // get current thread address
+ stw r.4,SsR4(r.sp) // save new kernel stack limit address
+ lwz r.6,ThTrapFrame(r.31) // get current trap frame address
+ lwz r.5,ThStackBase(r.31) // get current stack base address
+ sub r.30,r.3,r.5 // calculate offset from old stack to new
+ ori r.4,r.sp,0 // set source address of copy
+ sub r.5,r.5,r.sp // compute length of copy
+ add r.6,r.6,r.30 // relocate current trap frame address
+ sub r.3,r.3,r.5 // set destination address of copy
+ stw r.6,ThTrapFrame(r.31) // store relocated trap frame address
+ stw r.3,SsSp(r.sp) // save new stack pointer address
+ bl ..RtlCopyMemory // copy old stack to new stack
+
+//
+// Switch to new kernel stack and return the address of the old kernel
+// stack.
+//
+
+ DISABLE_INTERRUPTS(r.4, r.5) // disable interrupts
+
+ lwz r.3,ThStackBase(r.31) // get old kernel stack base address
+ lwz r.5,SsR3(r.sp) // get new kernel stack base address
+ lwz r.6,SsR4(r.sp) // get new kernel stack limit address
+ li r.7,TRUE
+ lwz r.sp,SsSp(r.sp) // switch to new kernel stack
+ stw r.5,KiPcr+PcInitialStack(r.0) // set new initial stack adddress
+ stw r.5,ThInitialStack(r.31) // set new initial stack address
+ lwz r.8,SsFrameLength(r.sp) // get caller's stack frame link
+ stw r.5,ThStackBase(r.31) // set new stack base address
+ stw r.6,KiPcr+PcStackLimit(r.0) // set new stack limit adddress
+ add r.8,r.8,r.30 // relocate caller's stack frame link
+ stw r.6,ThStackLimit(r.31) // set new stack limit address
+ stb r.7,ThLargeStack(r.31) // set large kernel stack TRUE
+ stw r.8,SsFrameLength(r.sp) // set caller's new stack frame link
+
+ ENABLE_INTERRUPTS(r.4) // enable interrupts
+
+ NESTED_EXIT(KeSwitchKernelStack, SsFrameLength, 2, 0)
+
+ SBTTL("Return from User Mode Callback")
+//++
+//
+// NTSTATUS
+// NtCallbackReturn (
+// IN PVOID OutputBuffer OPTIONAL,
+// IN ULONG OutputLength,
+// IN NTSTATUS Status
+// )
+//
+// Routine Description:
+//
+// This function returns from a user mode callout to the kernel
+// mode caller of the user mode callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode and the KiCallUserMode function calls out to user mode.
+// Therefore, the stack layout must be consistent between the
+// two routines.
+//
+// Arguments:
+//
+// OutputBuffer (r.3) - Supplies an optional pointer to an output buffer.
+//
+// OutputLength (r.4) - Supplies the length of the output buffer.
+//
+// Status (r.5) - Supplies the status value returned to the caller of the
+// callback function.
+//
+// Return Value:
+//
+// If the callback return cannot be executed, then an error status is
+// returned. Otherwise, the specified callback status is returned to
+// the caller of the callback function.
+//
+// N.B. This function returns to the function that called out to user
+// mode is a callout is currently active.
+//
+//--
+
+ LEAF_ENTRY_S(NtCallbackReturn, _TEXT$01)
+
+ lwz r.7,KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.8,ThCallbackStack(r.7) // get callback stack address
+ cmpwi r.8,0 // if eq, no callback stack present
+ beq- cr.10
+
+//
+// Restore the trap frame and callback stacks addresses, store the output
+// buffer address and length, restore the floating status, and set the
+// service status.
+//
+
+ lwz r.9,CuR3(r.8) // get address to store output address
+ lwz r.10,CuR4(r.8) // get address to store output length
+ lwz r.11,CuTrFr(r.8) // get previous trap frame address
+ lwz r.12,CuCbStk(r.8) // get previous callback stack address
+ stw r.3,0(r.9) // store output buffer address
+ lwz r.3,CuTrIar(r.8) // get saved trap IAR
+ stw r.4,0(r.10) // store output buffer length
+ lwz r.4,CuTrToc(r.8) // get saved trap TOC
+ stw r.11,ThTrapFrame(r.7) // restore trap frame address
+ stw r.12,ThCallbackStack(r.7) // restore callback stack address
+ stw r.3,TrIar(r.11) // restore trap IAR
+ stw r.4,TrGpr2(r.11) // restore trap TOC
+
+ ori r.3,r.5,0 // set callback service status
+
+//
+// **** this is the place where the current stack would be trimmed back.
+//
+
+//
+// Restore initial stack pointer, trim stackback to callback frame,
+// deallocate callback stack frame, and return to callback caller.
+//
+
+ lwz r.4,CuInStk(r.8) // get previous initial stack
+
+ DISABLE_INTERRUPTS(r.5, r.9) // disable interrupts
+
+ stw r.4,ThInitialStack(r.7) // restore initial stack address
+ stw r.4,KiPcr+PcInitialStack(r.0) //
+ ori r.sp,r.8,0 // trim stack back to callback frame
+
+ ENABLE_INTERRUPTS(r.5) // enable interrupts
+
+ NESTED_EXIT(NtCallbackReturn, CuFrameLength, 18, 18)
+//
+// No callback is currently active.
+//
+
+cr.10:
+ LWI (r.3,STATUS_NO_CALLBACK_ACTIVE) // set service status
+ ALTERNATE_EXIT(NtCallbackReturn)
+
diff --git a/private/ntos/ke/ppc/clock.s b/private/ntos/ke/ppc/clock.s
new file mode 100644
index 000000000..93642ac56
--- /dev/null
+++ b/private/ntos/ke/ppc/clock.s
@@ -0,0 +1,639 @@
+// TITLE("Interval and Profile Clock Interrupts")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// xxclock.s
+//
+// Abstract:
+//
+// This module implements the code necessary to field and process the
+// interval and profile clock interrupts.
+//
+// Author:
+//
+// Chris P. Karamatas 27-Sep-1993
+//
+// Based on MIPS version by David N. Cutler (davec) 27-Mar-1990
+//
+// Modified by:
+//
+// Pat Carr 14-Apr-1994 to follow 3.5 model
+// Peter Johnston 30-May-1994 extensive mods for level 612
+// Peter Johnston 10-Jun-1994 updated to level 683 (Beta 2)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+ .file "clock.s"
+
+#include "ksppc.h"
+
+
+//
+// Define external variables used by this module.
+//
+
+ .extern KeTickCount // 3 * 4
+ .extern KeTimeAdjustment // 4
+ .extern KiAdjustDpcThreshold // 4
+ .extern KiIdealDpcRate // 4
+ .extern KiMaximumDpcQueueDepth // 4
+ .extern KiProfileListHead // 2 * 4
+ .extern KiProfileLock // 4
+ .extern KiTimerTableListHead
+ .extern KiTimerExpireDpc
+ .extern KiTickOffset
+ .extern KeMaximumIncrement
+
+#if !defined(NT_UP) && SPINDBG
+ .extern ..KiAcquireSpinLockDbg
+#endif
+
+
+//++
+//
+// VOID
+// KeUpdateSystemTime (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the system time and check to
+// determine if a timer has expired.
+//
+// N.B. This routine is executed on a single processor in a multiprocess
+// system. The remainder of the processors only execute the quantum end
+// and runtime update code.
+//
+// Arguments:
+//
+// TrapFrame (r.3) - Supplies a pointer to a trap frame.
+// TimeIncrement (r.4) - Supplies the rime increment in 100ns units.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+// rem hal calls this with interrupts disabled
+
+ .set cr.5.gt, 21
+ .set cr.0.gt, 1
+
+ LEAF_ENTRY(KeUpdateSystemTime)
+
+//
+// Update the interrupt time.
+//
+// N.B. The interrupt time is updated in a very strict manner so that an
+// interlock does not have to be used in an MP system.
+//
+
+ lwz r.5, [toc]KiTickOffset(r.toc) // get addr of globals used
+ // from "Compute next tick
+ lwz r.12, [toc]KeMaximumIncrement(r.toc) // offset value."
+ lwz r.7, KiPcr2 + Pc2InterruptTime + 0(r.0) // get low interrupt time
+ lwz r.8, KiPcr2 + Pc2InterruptTime + 4(r.0) // get high interrupt time
+ lwz r.10, [toc]KeTickCount(r.toc)
+ lwz r.11, [toc]KiTimerTableListHead(r.toc)
+
+ lwz r.9, 0(r.5) // get tick offset value
+ lwz r.12, 0(r.12) // get maximum increment value
+ addc r.7, r.7, r.4 // add time increment value
+ addze r.8, r.8 // increment high interupt time
+
+ stw r.8, KiPcr2 + Pc2InterruptTime + 8(r.0) // store high 2 interrupt time
+ stw r.7, KiPcr2 + Pc2InterruptTime + 0(r.0) // store low interrupt time
+ stw r.8, KiPcr2 + Pc2InterruptTime + 4(r.0) // store high 1 interrupt time
+
+ sub. r.9, r.9, r.4 // subtract time increment
+ // from "Compute next tick
+ lwz r.4, [toc]KeTimeAdjustment(r.toc) // offset value."
+ stw r.9, 0(r.5) // store tick offset value
+ add r.9, r.12, r.9 // add maximum inc to residue
+ crmove cr.5.gt, cr.0.gt // save cr.0 gt for later
+ lwz r.6, 0(r.10) // get low tick count
+ bgt check_timer // jif tick not completed
+
+//
+// Compute next tick offset value.
+//
+ stw r.9, 0(r.5) // store tick offset value
+ lwz r.4, 0(r.4) // get time adjustment value
+
+//
+// Update system time.
+//
+// N.B. The system time is updated in a very strict manner so that an
+// interlock does not have to be used in an MP system.
+//
+
+ lwz r.0, KiPcr2 + Pc2SystemTime + 0(r.0) // get low system time
+ lwz r.9, KiPcr2 + Pc2SystemTime + 4(r.0) // get high system time
+ addc r.0, r.4, r.0 // add time increment
+ // From "Update the tick count"
+ lwz r.4, 4(r.10) // get high tick count
+ addze r.9, r.9 // incremnt high system time
+
+ stw r.9, KiPcr2 + Pc2SystemTime + 8(r.0) // store high 2 system time
+ stw r.0, KiPcr2 + Pc2SystemTime + 0(r.0) // store low system time
+ stw r.9, KiPcr2 + Pc2SystemTime + 4(r.0) // store high 1 system time
+ // From "Update the tick count"
+ addic r.9, r.6, 1 // increment tick count
+
+//
+// Update the tick count.
+//
+// N.B. The tick count is updated in a very strict manner so that an
+// interlock does not have to be used in an MP system.
+//
+
+ addze r.4, r.4 // increment high word
+ stw r.9, KiPcr2 + Pc2TickCountLow(r.0) // store low tick count
+ stw r.4, 8(r.10) // store high 2 tick count
+ stw r.9, 0(r.10) // store low tick count
+ stw r.4, 4(r.10) // store high 1 tick count
+
+//
+// Check to determine if a timer has expired at the current (ie old) hand
+// value.
+//
+ rlwinm r.10, r.6, 3, (TIMER_TABLE_SIZE - 1) << 3 // get table offset
+ add r.10, r.11, r.10 // get addr of table entry
+ lwz r.9, LsFlink(r.10) // get addr of 1st timer in list
+ cmplw cr7, r.9, r.10 // list empty?
+ // From "Get the expiration..."
+ lwz r.4, TiDueTime + TmHighTime - TiTimerListEntry(r.9)
+ // From "Get the expiration..."
+ lwz r.5, TiDueTime + TmLowTime - TiTimerListEntry(r.9)
+ beq cr7, check_next_hand // jif yes
+
+//
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+ cmplw cr.6, r.4, r.8 // check high time
+ cmplw cr.7, r.5, r.7 // check low time
+ bgt cr.6, check_next_hand // this timer has not expired
+ blt cr.6, expire // this timer has expired
+ ble cr.7, expire // this timer has expired
+
+//
+// Check to determine if a timer has expired at the next hand value.
+//
+check_next_hand:
+ addi r.6, r.6, 1 // advance hand entry to next
+check_timer:
+ rlwinm r.10, r.6, 3, (TIMER_TABLE_SIZE - 1) << 3 // get table offset
+ add r.10, r.11, r.10 // get addr of table entry
+ lwz r.9, LsFlink(r.10) // get addr of 1st timer in list
+ cmplw cr.7, r.9, r.10 // list empty?
+ lwz r.5, TiDueTime + TmLowTime - TiTimerListEntry(r.9)
+ beq cr.7, kust_exit // jif yes
+
+// Get the expiration time from the timer object.
+//
+// N.B. The offset to the timer list entry must be subtracted out of the
+// displacement calculation.
+//
+ // Note we can't move this guy
+ // above beq kust_exit.
+ lwz r.4, TiDueTime + TmHighTime - TiTimerListEntry(r.9)
+ cmplw cr.6, r.4, r.8 // check high time
+ cmplw cr.7, r.5, r.7 // check low time
+ bgt cr.6, kust_exit // this timer has not expired
+ blt cr.6, expire // this timer has expired
+ bgt cr.7, kust_exit // this timer has not expired
+
+//
+// Put timer expiration DPC in the system DPC list and initiate a dispatch
+// interrupt on the current processor.
+//
+
+expire:
+ lwz r.9, KiPcr+PcPrcb(r.0) // get address of PRCB
+ lwz r.10, [toc]KiTimerExpireDpc(r.toc)// get expiration DPC address
+ addi r.11, r.9, PbDpcListHead // compute DPC listhead address
+ addi r.7, r.9, PbDpcLock // compute DPC lock address
+
+#if !defined(NT_UP)
+ ACQUIRE_SPIN_LOCK(r.7, r.11, r.0, expire_lock, expire_lock_spin)
+#endif
+
+ lwz r.8, DpLock(r.10) // get DPC inserted state
+ addi r.5, r.10, DpDpcListEntry // compute addr DPC list entry
+ lwz r.12, LsBlink(r.11) // get addr last entry in list
+ cmplwi r.8, 0 // DPC inserted?
+ bne queued // jif DPC already inserted
+ lwz r.8, PbDpcQueueDepth(r.9) // get DPC queue depth
+ stw r.7, DpLock(r.10) // set DPC inserted state
+ stw r.6, DpSystemArgument1(r.10) // set timer table hand value
+
+ stw r.5, LsBlink(r.11) // set addr of new last entry
+ stw r.5, LsFlink(r.12) // set next in old last entry
+ stw r.11, LsFlink(r.5) // set address of next entry
+ stw r.12, LsBlink(r.5) // set address of previous entry
+
+ addi r.8, r.8, 1 // increment DPC queue depth
+ stw r.8, PbDpcQueueDepth(r.9) //
+
+ SOFTWARE_INTERRUPT(DISPATCH_LEVEL, r.4)
+
+queued:
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.7, r.0)
+#endif
+
+kust_exit:
+
+ ble cr.5, ..KeUpdateRunTime // if lez, full tick
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r.7, r.0, expire_lock, expire_lock_spin)
+#endif
+
+ DUMMY_EXIT(KeUpdateSystemTime)
+
+
+//++
+//
+// VOID
+// KeUpdateRunTime (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// interval timer. Its function is to update the runtime of the current
+// thread, update the runtime of the current thread's process, and decrement
+// the current thread's quantum.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// Arguments:
+//
+// TrapFrame (r.3) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeUpdateRunTime)
+
+ lwz r.9, KiPcr+PcPrcb(r.0) // get current processor block
+ lwz r.7, TrMsr(r.3) // get saved machine status
+ // From "If a DPC is activ..."
+ lbz r.10, TrOldIrql(r.3) // get previous IRQL
+ lwz r.4, KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.8, ThUserTime(r.4) // get user time
+ lwz r.11, PbDpcRoutineActive(r.9) // get DPC active flag
+ extrwi. r.7, r.7, 1, MSR_PR // test previous mode
+ lwz r.6, ThApcState + AsProcess(r.4)// get addr current process
+ cmpwi cr.1, r.11, 0 // DPC active ?
+ bne user // jif previous mode was user
+
+//
+// If a DPC is active, then increment the time spent executing DPC routines.
+// Otherwise, if the old IRQL is greater than DPC level, then increment the
+// time spent executing interrupt services routines. Otherwise, increment
+// the time spent in kernel mode for the current thread.
+//
+
+ lwz r.11, ThKernelTime(r.4) // get kernel time
+ cmpwi r.10, DISPATCH_LEVEL // compare IRQL with DPC level
+ addi r.7, r.9, PbInterruptTime // compute interrupt time addr
+ blt kernel // if ltz, inc. thread kernel time
+ bgt intrpt // if >DPC level bump interrupt
+ addi r.7, r.9, PbDpcTime // compute DPC time address
+ beq cr.1, kernel // jif not dpc active
+
+//
+// Update the time spent in DPC/interrupt processing.
+//
+
+intrpt:
+ lwz r.8, 0(r.7) // get processor time
+ addi r.5, r.9, PbKernelTime // compute processor kernel time addr.
+ addi r.8, r.8, 1 // increment processor time
+ stw r.8, 0(r.7) // store processor time
+ b processor
+
+//
+// Update the time spent in kernel mode for the current thread.
+//
+
+kernel:
+ addi r.6, r.6, PrKernelTime // compute process kernel time addr.
+ addi r.5, r.9, PbKernelTime // compute processor kernel time addr.
+ addi r.11, r.11, 1 // increment kernel time
+ stw r.11, ThKernelTime(r.4) // store kernel time
+ b continue
+
+//
+// Update the time spent in user mode for the current thread.
+//
+
+user:
+ addi r.6, r.6, PrUserTime // compute process user time addr.
+ addi r.5, r.9, PbUserTime // compute processor user time addr.
+ addi r.8, r.8, 1 // increment user time
+ stw r.8, ThUserTime(r.4) // store user time
+
+//
+// Update the time spent in kernel/user mode for the current thread's process.
+//
+// N.B. The update of the process time must be synchronized across processors.
+//
+
+ //DBGSTORE_I(r10,r8,0x1112)
+continue:
+ lwarx r.10, 0, r.6 // get process time
+ addi r.10, r.10, 1 // increment process time
+ stwcx. r.10, 0, r.6 // store process time
+ bne- continue // if store conditional failed
+
+//
+// Update the time spent in kernel/user mode for the current processor.
+//
+
+processor:
+ lwz r.10, 0(r.5) // get low processor time
+
+//
+// Update the DPC request rate which is computed as the average between
+// the previous rate and the current rate.
+//
+
+ lwz r.8, PbDpcQueueDepth(r.9) // get current DPC queue depth
+ lwz r.6, PbDpcLastCount(r.9) // get last DPC count
+ lwz r.7, PbDpcRequestRate(r.9)// get last DPC request rate
+ lwz r.0, PbDpcInterruptRequested(r.9)// interrupt requested?
+ // NOTE: we can't move these below
+ // the next lwz to r.5
+ addi r.10, r.10, 1 // increment processor time
+ stw r.10, 0(r.5) // store low processor time
+ lwz r.5, PbDpcCount(r.9) // get current DPC count
+ cmpwi cr.0, r.8, 0
+ lwz r.8, [toc]KiAdjustDpcThreshold(r.toc)
+ stw r.5, PbDpcLastCount(r.9) // set last DPC count
+ sub r.5, r.5, r.6 // compute count during interval
+ cmpwi cr.7, r.0, 0 // interrupt requested ?
+ add r.5, r.5, r.7 // compute sum of current and last
+ srwi r.5, r.5, 1 // average current and last
+ stw r.5, PbDpcRequestRate(r.9)// set new DPC request rate
+ lwz r.0, 0(r.8) // get DPC threshold counter
+ lwz r.7, PbMaximumDpcQueueDepth(r.9)// get current max queue depth
+
+//
+// If the current DPC queue depth is not zero, a DPC routine is not active,
+// and a DPC interrupt has not been requested, then request a dispatch
+// interrupt, decrement the maximum DPC queue depth, and reset the threshold
+// counter if appropriate.
+//
+
+ bne cr.1, nodpc // jif DPC routine active
+ beq cr.0, nodpc // jif DPC queue is empty
+ bne cr.7, nodpc // jif DPC already requested
+
+ lwz r.6, [toc]KiIdealDpcRate(r.toc) // get &ideal DPC rate
+ stw r.0, PbAdjustDpcThreshold(r.9) // set new DPC threshold counter
+ lwz r.6, 0(r.6) // get ideal DPC rate
+ li r.0, 1
+ stb r.0, KiPcr+PcDispatchInterrupt(r.0) // request DPC interrupt
+ cmpw cr.6, r.5, r.6 // current rate < ideal ?
+ subic. r.7, r.7, 1 // decrement max DPC queue depth
+ bge cr.6, ..KiDecrementQuantum // jif rate >= ideal
+ beq ..KiDecrementQuantum // if cur val == 1
+ stw r.7, PbMaximumDpcQueueDepth(r.9)// set new maximum queue depth
+ b ..KiDecrementQuantum
+
+//
+// The DPC queue is empty or a DPC routine is active or a DPC interrupt
+// has been requested. Count down the adjustment threshold and if the
+// count reaches zero, then increment the maximum DPC queue depth, but
+// no above the initial value and reset the adjustment threshold value.
+//
+
+nodpc:
+ lwz r.5, [toc]KiMaximumDpcQueueDepth(r.toc)
+ lwz r.6, PbAdjustDpcThreshold(r.9) // get adjustment threshold
+ lwz r.5, 0(r.5) // get init max queue depth
+ subic. r.6, r.6, 1 // decrement adjustment
+ stw r.6, PbAdjustDpcThreshold(r.9) // threshold counter
+ bne ..KiDecrementQuantum
+ cmpw cr.6, r.5, r.7
+ stw r.0, PbAdjustDpcThreshold(r.9) // reset threshold
+ beq cr.6, ..KiDecrementQuantum // jif at max depth
+ addi r.7, r.7, 1 // increment current max depth
+ stw r.7, PbMaximumDpcQueueDepth(r.9)// set new maximum DPC queue
+ // depth.
+
+
+
+//
+// Decrement current thread quantum and check to determine if a quantum end
+// has occurred.
+//
+
+ ALTERNATE_ENTRY(KiDecrementQuantum)
+
+ lbz r.5, ThQuantum(r.4) // get current thread quantum
+ extsb r.5, r.5 // sign-extend thread quantum
+ subic. r.5, r.5, CLOCK_QUANTUM_DECREMENT // decrement current quantum
+ stb r.5, ThQuantum(r.4) // store thread quantum
+ bgtlr+ // return if quantum remaining
+
+//
+// Set quantum end flag and initiate a dispatch interrupt on the current
+// processor.
+//
+
+ lwz r.5, PbIdleThread(r.9) // get address of idle thread
+ cmpw r.5, r.4 // is this the idle thread?
+ beqlr- // return if in idle thread
+
+ stw r.sp, KiPcr+PcQuantumEnd(r.0) // set quantum end indicator
+
+ SOFTWARE_INTERRUPT(DISPATCH_LEVEL, r.8)
+
+ LEAF_EXIT(KeUpdateRunTime)
+
+//++
+//
+// VOID
+// KeProfileInterruptWithSource (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE ProfileSource
+// )
+//
+// VOID
+// KeProfileInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt generated by the
+// profile timer. Its function is to update the profile information for
+// the currently active profile objects.
+//
+// N.B. This routine is executed on all processors in a multiprocess system.
+//
+// N.B. KeProfileInterruptWithSource currently not implemented
+//
+// Arguments:
+//
+// TrapFrame (r.3) - Supplies a pointer to a trap frame.
+//
+// ProfileSource (r.4) - Supplies the source of the profile interrupt
+// KeProfileInterrupt is an alternate entry for backwards
+// compatibility that sets the source to zero (ProfileTime)
+//
+// Return Value:
+//
+// None.
+//
+//--
+ .struct 0
+ .space StackFrameHeaderLength
+piLR: .space 4 // Link Register
+ .align 3 // 8 byte align
+piFrameLength:
+
+ SPECIAL_ENTRY(KeProfileInterrupt)
+
+ li r.4, 0 // set profile source to
+ // ProfileTime
+
+ ALTERNATE_ENTRY(KeProfileInterruptWithSource)
+
+ mflr r.0
+ stwu r.sp, -piFrameLength(r.sp)
+ stw r.0, piLR(r.sp) // save return address
+
+ PROLOGUE_END(KeProfileInterrupt)
+
+#if !defined(NT_UP)
+
+ lwz r.11, [toc]KiProfileLock(r.toc)
+
+#endif
+
+#if !defined(NT_UP)
+ ACQUIRE_SPIN_LOCK(r.11, r.3, r.10, profile_lock, profile_lock_spin)
+#endif
+
+ lwz r.5, KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.5, ThApcState + AsProcess(r.5)// get current process address
+ addi r.5, r.5, PrProfileListHead // compute profile listhead addr
+ bl ..KiProcessProfileList // process process profile list
+ lwz r.5, [toc]KiProfileListHead(r.toc)// get profile listhead addr
+ bl ..KiProcessProfileList // process system profile list
+
+ lwz r.0, piLR(r.sp) // get return address
+
+#if !defined(NT_UP)
+ lwz r.11, [toc]KiProfileLock(r.toc)
+ li r.10, 0
+ RELEASE_SPIN_LOCK(r.11, r.10)
+#endif
+
+ mtlr r.0 // set return address
+ addi r.sp, r.sp, piFrameLength // deallocate stack frame
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r.11, r.10, profile_lock, profile_lock_spin)
+#endif
+
+ DUMMY_EXIT(KeProfileInterrupt)
+
+//++
+//
+// VOID
+// KiProcessProfileList (
+// IN PKTRAP_FRAME TrapFrame,
+// IN KPROFILE_SOURCE Source,
+// IN PLIST_ENTRY ListHead
+// )
+//
+// Routine Description:
+//
+// This routine is called to process a profile list.
+//
+// Arguments:
+//
+// TrapFrame (r.3) - Supplies a pointer to a trap frame.
+//
+// Source (r.4) - Supplies profile source to match
+//
+// ListHead (r.5) - Supplies a pointer to a profile list.
+//
+// Return Value:
+//
+// None.
+//
+// Note:
+//
+// Registers r.3 and r.4 are returned unaltered.
+//
+//--
+
+ LEAF_ENTRY(KiProcessProfileList)
+
+ lwz r.6, LsFlink(r.5) // get address of next entry
+ cmplw r.5, r.6 // cmp process profile list head
+ beqlr // if eq, end of list
+ lwz r.12, KiPcr+PcPrcb(r.0) // get address of PRCB
+ lwz r.7, TrIar(r.3) // get interrupt PC address
+ lwz r.12, PbSetMember(r.12) // get current processor num
+
+//
+// Scan profile list and increment profile buckets as appropriate.
+//
+
+l.10: lhz r.0, PfSource - PfProfileListEntry(r.6) // get source
+ lwz r.8, PfRangeBase - PfProfileListEntry(r.6) // get range base
+ cmplw cr.6, r.0, r.4 // compare source
+ lwz r.9, PfRangeLimit - PfProfileListEntry(r.6)// get range limit
+ lwz r.11, PfAffinity - PfProfileListEntry(r.6) // get affinity
+ bne cr.6, l.20 // if ne, source mismatch
+ cmplw cr.7, r.7, r.8 // check against range base
+ cmplw cr.1, r.7, r.9 // check against range limit
+ and. r.11, r.11, r.12 // check affinity
+ blt cr.7, l.20 // jif less than range base
+ bgt cr.1, l.20 // jif not less than range limit
+ beq cr.0, l.20 // jif affinity mismatch
+ sub r.8, r.7, r.8 // compute offset in range
+ lwz r.9, PfBucketShift - PfProfileListEntry(r.6)// get shift count
+ lwz r.10, PfBuffer - PfProfileListEntry(r.6) // get &profile buffer
+ srw r.8, r.8, r.9 // compute bucket offset
+ rlwinm r.8, r.8, 0, 0xfffffffc // clear low order offset bits
+ lwzx r.7, r.8, r.10 // increment profile bucket
+ addi r.7, r.7, 1 //
+ stwx r.7, r.8, r.10 //
+l.20: lwz r.6, LsFlink(r.6) // get address of next entry
+ cmplw r.5, r.6 // more entries in list ?
+ bne l.10 // jif yes
+
+ LEAF_EXIT(KiProcessProfileList)
+
diff --git a/private/ntos/ke/ppc/ctxswap.s b/private/ntos/ke/ppc/ctxswap.s
new file mode 100644
index 000000000..2d4c2c41b
--- /dev/null
+++ b/private/ntos/ke/ppc/ctxswap.s
@@ -0,0 +1,2942 @@
+// TITLE("Context Swap")
+//++
+//
+// Copyright (c) 1994 IBM Corporation
+//
+// Module Name:
+//
+// ctxswap.s
+//
+// Abstract:
+//
+// This module implements the PowerPC machine dependent code necessary to
+// field the dispatch interrupt and to perform kernel initiated context
+// switching.
+//
+// Author:
+//
+// Peter L. Johnston (plj@vnet.ibm.com) August 1993
+// Adapted from code by David N. Cutler (davec) 1-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// plj Apr-94 Upgraded to NT 3.5.
+//
+//--
+
+#include "ksppc.h"
+
+// Module Constants
+
+#define rPrcb r.29
+#define OTH r.30
+#define NTH r.31
+
+// Global externals
+
+ .extern ..KdPollBreakIn
+ .extern ..KeFlushCurrentTb
+ .extern ..KiActivateWaiterQueue
+ .extern ..KiContinueClientWait
+ .extern ..KiDeliverApc
+ .extern ..KiQuantumEnd
+ .extern ..KiReadyThread
+ .extern ..KiWaitTest
+
+ .extern KdDebuggerEnabled
+ .extern KeTickCount
+ .extern KiDispatcherReadyListHead
+ .extern KiIdleSummary
+ .extern KiReadySummary
+ .extern KiWaitInListHead
+ .extern KiWaitOutListHead
+ .extern __imp_HalProcessorIdle
+ .extern __imp_KeLowerIrql
+#if DBG
+ .extern ..DbgBreakPoint
+ .extern ..DbgBreakPointWithStatus
+#endif
+#if !defined(NT_UP)
+
+ .extern KeNumberProcessors
+ .extern KiBarrierWait
+ .extern KiContextSwapLock
+ .extern KiDispatcherLock
+ .extern KiProcessorBlock
+
+#if SPINDBG
+ .extern ..KiAcquireSpinLockDbg
+ .extern ..KiTryToAcquireSpinLockDbg
+#endif
+
+#endif
+ .extern KiMasterSequence
+ .extern KiMasterPid
+
+ .globl KiScheduleCount
+ .data
+KiScheduleCount:
+ .long 0
+
+#if COLLECT_PAGING_DATA
+ .globl KiFlushOnProcessSwap
+ .data
+KiFlushOnProcessSwap:
+ .long 0
+#endif
+
+
+//
+// ThreadSwitchFrame
+//
+// This is the layout of the beginning of the stack frame that must be
+// established by any routine that calls SwapContext.
+//
+// The caller of SwapContext must have saved r.14 and 26 thru 31.
+// SwapContext will take care of r.15 thru r.25, f.14 thru f.31 and
+// the condition register.
+//
+// Note: this is not a complete stack frame, the caller must allocate
+// additional space for any additional registers it needs to
+// save (eg Link Register). (Also, the following has not been
+// padded to 8 bytes).
+//
+// WARNING: KiInitializeContextThread() is aware of the layout of
+// a ThreadSwitchFrame.
+//
+
+ .struct 0
+ .space StackFrameHeaderLength
+swFrame:.space SwapFrameLength
+swFrameLength:
+
+
+// SBTTL("Switch To Thread")
+//++
+//
+// NTSTATUS
+// KiSwitchToThread (
+// IN PKTHREAD NextThread,
+// IN ULONG WaitReason,
+// IN ULONG WaitMode,
+// IN PKEVENT WaitObject
+// )
+//
+// Routine Description:
+//
+// This function performs an optimal switch to the specified target thread
+// if possible. No timeout is associated with the wait, thus the issuing
+// thread will wait until the wait event is signaled or an APC is deliverd.
+//
+// N.B. This routine is called with the dispatcher database locked.
+//
+// N.B. The wait IRQL is assumed to be set for the current thread and the
+// wait status is assumed to be set for the target thread.
+//
+// N.B. It is assumed that if a queue is associated with the target thread,
+// then the concurrency count has been incremented.
+//
+// N.B. Control is returned from this function with the dispatcher database
+// unlocked.
+//
+// Arguments:
+//
+// NextThread - Supplies a pointer to a dispatcher object of type thread.
+//
+// WaitReason - supplies the reason for the wait operation.
+//
+// WaitMode - Supplies the processor wait mode.
+//
+// WaitObject - Supplies a pointer to a dispatcher object of type event
+// or semaphore.
+//
+// Return Value:
+//
+// The wait completion status. A value of STATUS_SUCCESS is returned if
+// the specified object satisfied the wait. A value of STATUS_USER_APC is
+// returned if the wait was aborted to deliver a user APC to the current
+// thread.
+//--
+
+ .struct 0
+ .space swFrameLength
+sttLR: .space 4
+sttReason: .space 4
+sttMode: .space 4
+sttObject: .space 4
+ .align 3 // ensure 8 byte alignment
+sttFrameLength:
+
+ SPECIAL_ENTRY_S(KiSwitchToThread,_TEXT$00)
+
+ mflr r.0 // get return address
+ stwu r.sp, -sttFrameLength(r.sp) // buy stack frame
+ stw r.14, swFrame + ExGpr14(r.sp) // save gpr 14
+ stw r.26, swFrame + ExGpr26(r.sp) // save gprs 26 through 31
+ stw r.27, swFrame + ExGpr27(r.sp)
+ stw r.28, swFrame + ExGpr28(r.sp)
+ stw r.29, swFrame + ExGpr29(r.sp)
+ stw r.30, swFrame + ExGpr30(r.sp)
+ stw r.31, swFrame + ExGpr31(r.sp)
+ ori NTH, r.3, 0
+ stw r.0, sttLR(r.sp) // save return address
+ li r.0, 0
+
+ PROLOGUE_END(KiSwitchToThread)
+
+//
+// Save the wait reason, the wait mode, and the wait object address.
+//
+
+ stw r.4, sttReason(r.sp) // save wait reason
+ stw r.5, sttMode(r.sp) // save wait mode
+ stw r.6, sttObject(r.sp) // save wait object address
+
+//
+// If the target thread's kernel stack is resident, the target thread's
+// process is in the balance set, the target thread can run on the
+// current processor, and another thread has not already been selected
+// to run on the current processor, then do a direct dispatch to the
+// target thread bypassing all the general wait logic, thread priorities
+// permiting.
+//
+
+ lwz r.7, ThApcState + AsProcess(NTH) // get target process address
+ lbz r.8, ThKernelStackResident(NTH) // get kernel stack resident
+ lwz rPrcb, KiPcr + PcPrcb(r.0) // get address of PRCB
+ lbz r.10, PrState(r.7) // get target process state
+ lwz OTH, KiPcr + PcCurrentThread(r.0) // get current thread address
+ cmpwi r.8, 0 // kernel stack resident?
+ beq LongWay // if eq, kernel stack not resident
+ cmpwi r.10, ProcessInMemory // process in memory?
+ bne LongWay // if ne, process not in memory
+
+#if !defined(NT_UP)
+
+ lwz r.8, PbNextThread(rPrcb) // get address of next thread
+ lbz r.10, ThNextProcessor(OTH) // get current processor number
+ lwz r.14, ThAffinity(NTH) // get target thread affinity
+ lwz r.26, KiPcr + PcSetMember(r.0) // get processor set member
+ cmpwi r.8, 0 // next thread selected?
+ bne LongWay // if ne, next thread selected
+ and. r.26, r.26, r.14 // check for compatible affinity
+ beq LongWay // if eq, affinity not compatible
+
+#endif
+
+//
+// Compute the new thread priority.
+//
+
+ lbz r.14, ThPriority(OTH) // get client thread priority
+ lbz r.26, ThPriority(NTH) // get server thread priority
+ cmpwi r.14, LOW_REALTIME_PRIORITY // check if realtime client
+ cmpwi cr.7, r.26, LOW_REALTIME_PRIORITY // check if realtime server
+ bge stt60 // if ge, realtime client
+ lbz r.27, ThPriorityDecrement(NTH) // get priority decrement value
+ lbz r.28, ThBasePriority(NTH) // get server base priority
+ bge cr.7, stt50 // if ge, realtime server
+ addi r.9, r.28, 1 // computed boosted priority
+ cmpwi r.27, 0 // server boot active?
+ bne stt30 // if ne, server boost active
+
+//
+// Both the client and the server are not realtime and a priority boost
+// is not currently active for the server. Under these conditions an
+// optimal switch to the server can be performed if the base priority
+// of the server is above a minimum threshold or the boosted priority
+// of the server is not less than the client priority.
+//
+
+ cmpw r.9, r.14 // check if high enough boost
+ cmpwi cr.7, r.9, LOW_REALTIME_PRIORITY // check if less than realtime
+ blt stt20 // if lt, boosted priority less
+ stb r.9, ThPriority(NTH) // asssume boosted priority is okay
+ blt cr.7, stt70 // if lt, less than realtime
+ li r.9, LOW_REALTIME_PRIORITY - 1 // set high server priority
+ stb r.9, ThPriority(NTH) //
+ b stt70
+
+stt20:
+
+//
+// The boosted priority of the server is less than the current priority of
+// the client. If the server base priority is above the required threshold,
+// then a optimal switch to the server can be performed by temporarily
+// raising the priority of the server to that of the client.
+//
+
+ cmpwi r.28, BASE_PRIORITY_THRESHOLD // check if above threshold
+ sub r.9, r.14, r.28 // compute priority decrement value
+ blt LongWay // if lt, priority below threshold
+ li r.28, ROUND_TRIP_DECREMENT_COUNT // get system decrement count value
+ stb r.9, ThPriorityDecrement(NTH) // set priority decrement value
+ stb r.14, ThPriority(NTH) // set current server priority
+ stb r.28, ThDecrementCount(NTH) // set server decrement count
+ b stt70
+
+stt30:
+
+//
+// A server boost has previously been applied to the server thread. Count
+// down the decrement count to determine if another optimal server switch
+// is allowed.
+//
+
+
+ lbz r.9, ThDecrementCount(NTH) // decrement server count value
+ subic. r.9, r.9, 1 //
+ stb r.9, ThDecrementCount(NTH) // store updated decrement count
+ beq stt40 // if eq, no more switches allowed
+
+//
+// Another optimal switch to the server is allowed provided that the
+// server priority is not less than the client priority.
+//
+
+ cmpw r.26, r.14 // check if server lower priority
+ bge stt70 // if ge, server not lower priority
+ b LongWay
+
+stt40:
+
+//
+// The server has exhausted the number of times an optimal switch may
+// be performed without reducing its priority. Reduce the priority of
+// the server to its original unboosted value minus one.
+//
+
+ stb r.0, ThPriorityDecrement(NTH) // clear server priority decrement
+ stb r.28, ThPriority(NTH) // set server priority to base
+ b LongWay
+
+stt50:
+
+//
+// The client is not realtime and the server is realtime. An optimal switch
+// to the server can be performed.
+//
+
+ lbz r.9, PrThreadQuantum(r.7) // get process quantum value
+ b stt65
+
+stt60:
+
+//
+// The client is realtime. In order for an optimal switch to occur, the
+// server must also be realtime and run at a high or equal priority.
+//
+
+ cmpw r.26, r.14 // check if server is lower priority
+ lbz r.9, PrThreadQuantum(r.7) // get process quantum value
+ blt LongWay // if lt, server is lower priority
+
+stt65:
+
+ stb r.9, ThQuantum(NTH) // set server thread quantum
+
+stt70:
+
+//
+// Set the next processor for the server thread.
+//
+
+#if !defined(NT_UP)
+
+ stb r.10, ThNextProcessor(NTH) // set server next processor number
+
+#endif
+
+//
+// Set the address of the wait block list in the client thread, initialize
+// the event wait block, and insert the wait block in client event wait list.
+//
+
+ addi r.8, OTH, EVENT_WAIT_BLOCK_OFFSET // compute wait block address
+ stw r.8, ThWaitBlockList(OTH) // set address of wait block list
+ stw r.0, ThWaitStatus(OTH) // set initial wait status
+ stw r.6, WbObject(r.8) // set address of wait object
+ stw r.8, WbNextWaitBlock(r.8) // set next wait block address
+ lis r.10, WaitAny // get wait type and wait key
+ stw r.10, WbWaitKey(r.8) // set wait key and wait type
+ addi r.10, r.6, EvWaitListHead // compute wait object listhead address
+ lwz r.14, LsBlink(r.10) // get backward link of listhead
+ addi r.26, r.8, WbWaitListEntry // compute wait block list entry address
+ stw r.26, LsBlink(r.10) // set backward link of listhead
+ stw r.26, LsFlink(r.14) // set forward link in last entry
+ stw r.10, LsFlink(r.26) // set forward link in wait entry
+ stw r.14, LsBlink(r.26) // set backward link in wait entry
+
+//
+// Set the client thread wait parameters, set the thread state to Waiting,
+// and insert the thread in the proper wait list.
+//
+
+ stb r.0, ThAlertable(OTH) // set alertable FALSE.
+ stb r.4, ThWaitReason(OTH) // set wait reason
+ stb r.5, ThWaitMode(OTH) // set the wait mode
+ lbz r.6, ThEnableStackSwap(OTH) // get kernel stack swap enable
+ lwz r.10, [toc]KeTickCount(r.toc) // get &KeTickCount
+ lwz r.10, 0(r.10) // get low part of tick count
+ stw r.10, ThWaitTime(OTH) // set thread wait time
+ li r.8, Waiting // set thread state
+ stb r.8, ThState(OTH) //
+ lwz r.8,[toc]KiWaitInListHead(r.toc) // get address of wait in listhead
+ cmpwi r.5, 0 // is wait mode kernel?
+ beq stt75 // if eq, wait mode is kernel
+ cmpwi r.6, 0 // is kernel stack swap disabled?
+ beq stt75 // if eq, kernel stack swap disabled
+ cmpwi r.14, LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ blt stt76 // if lt, thread priority in range
+stt75:
+ lwz r.8,[toc]KiWaitOutListHead(r.toc) // get address of wait in listhead
+stt76:
+ lwz r.14, LsBlink(r.8) // get backlink of wait listhead
+ addi r.26, OTH, ThWaitListEntry // compute wait list entry address
+ stw r.26, LsBlink(r.8) // set backward link of listhead
+ stw r.26, LsFlink(r.14) // set forward link in last entry
+ stw r.8, LsFlink(r.26) // set forward link in wait entry
+ stw r.14, LsBlink(r.26) // set backward link in wait entry
+
+stt77:
+
+//
+// If the current thread is processing a queue entry, then attempt to
+// activate another thread that is blocked on the queue object.
+//
+// N.B. The next thread address can change if the routine to activate
+// a queue waiter is called.
+//
+
+ lwz r.3, ThQueue(OTH) // get queue object address
+ cmpwi r.3, 0 // queue object attached?
+ beq stt78 // if eq, no queue object attached
+ stw NTH, PbNextThread(rPrcb) // set next thread address
+ bl ..KiActivateWaiterQueue // attempt to activate a blocked thread
+ lwz NTH, PbNextThread(rPrcb) // get next thread address
+ li r.0, 0
+ stw r.0, PbNextThread(rPrcb) // set next thread address to NULL
+stt78:
+
+#if !defined(NT_UP)
+
+ lwz r.27, [toc]KiContextSwapLock(r.2)// get &KiContextSwapLock
+ lwz r.28, [toc]KiDispatcherLock(r.2) // get &KiDispatcherLock
+
+#endif
+
+ stw NTH, PbCurrentThread(rPrcb) // set address of current thread object
+ bl ..SwapContext // swap context
+
+//
+// Lower IRQL to its previous level.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+// N.B. Register NTH (r.31) contains the address of the new thread on return.
+//
+// In the following, we could lower IRQL, isync then check for pending
+// interrupts. I believe it is faster to disable interrupts and get
+// both loads going. We need to avoid the situation where a DPC or
+// APC could be queued between the time we load PcSoftwareInterrupt
+// and actually lowering IRQL. (plj)
+//
+// We load the thread's WaitStatus in this block for scheduling
+// reasons in the hope that the normal case will be there is NOT
+// a DPC or APC pending.
+//
+
+ lbz r.27, ThWaitIrql(NTH) // get original IRQL
+
+ DISABLE_INTERRUPTS(r.5, r.6)
+
+ lhz r.4, KiPcr+PcSoftwareInterrupt(r.0)
+ lwz r.26, ThWaitStatus(NTH) // get wait completion status
+ stb r.27, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ ENABLE_INTERRUPTS(r.5)
+
+ cmpw r.4, r.27 // check if new IRQL allows
+ ble+ stt79 // APC/DPC and one is pending
+
+ bl ..KiDispatchSoftwareInterrupt // process software interrupt
+
+stt79:
+
+//
+// If the wait was not interrupted to deliver a kernel APC, then return the
+// completion status.
+//
+
+ cmpwi r.26, STATUS_KERNEL_APC // check if awakened for kernel APC
+ ori r.3, r.26, 0 // set return status
+ bne stt90 // if ne, normal wait completion
+
+//
+// Disable interrupts and acquire the dispatcher database lock.
+//
+
+#if !defined(NT_UP)
+
+ DISABLE_INTERRUPTS(r.5, r.6)
+
+//
+// WARNING: The address of KiDispatcherLock was intentionally left in
+// r.28 by SwapContext for use here.
+//
+
+ ACQUIRE_SPIN_LOCK(r.28, NTH, r.7, stt80, stt82)
+
+#endif
+
+//
+// Raise IRQL to synchronization level and save wait IRQL.
+//
+
+ li r.7, SYNCH_LEVEL
+ stb r.7, KiPcr+PcCurrentIrql(r.0)
+
+#if !defined(NT_UP)
+ ENABLE_INTERRUPTS(r.5)
+#endif
+
+ stb r.27, ThWaitIrql(NTH) // set client wait IRQL
+
+ b ContinueWait
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.28, r.7, stt80, stt82, stt85, r.5, r.6)
+#endif
+
+LongWay:
+
+//
+// Ready the target thread for execution and wait on the specified wait
+// object.
+//
+
+ bl ..KiReadyThread // ready thread for execution
+
+//
+// Continue the wait and return the wait completion status.
+//
+// N.B. The wait continuation routine is called with the dispatcher
+// database locked.
+//
+
+ContinueWait:
+
+ lwz r.3, sttObject(r.sp) // get wait object address
+ lwz r.4, sttReason(r.sp) // get wait reason
+ lwz r.5, sttMode(r.sp) // get wait mode
+ bl ..KiContinueClientWait // continue client wait
+
+stt90:
+ lwz r.0, sttLR(r.sp) // restore return address
+ lwz r.26, swFrame + ExGpr26(r.sp) // restore gprs 26 thru 31
+ lwz r.27, swFrame + ExGpr27(r.sp) //
+ lwz r.28, swFrame + ExGpr28(r.sp) //
+ lwz r.29, swFrame + ExGpr29(r.sp) //
+ lwz r.30, swFrame + ExGpr30(r.sp) //
+ mtlr r.0 // set return address
+ lwz r.31, swFrame + ExGpr31(r.sp) //
+ lwz r.14, swFrame + ExGpr14(r.sp) // restore gpr 14
+ addi r.sp, r.sp, sttFrameLength // return stack frame
+
+ blr
+
+ DUMMY_EXIT(KiSwitchToThread)
+
+ SBTTL("Dispatch Software Interrupt")
+//++
+//
+// VOID
+// KiDispatchSoftwareInterrupt (VOID)
+//
+// Routine Description:
+//
+// This routine is called when the current irql drops below
+// DISPATCH_LEVEL and a software interrupt may be pending. A
+// software interrupt is either a pending DPC or a pending APC.
+// If a DPC is pending, Irql is raised to DISPATCH_LEVEL and
+// KiDispatchInterrupt is called. When KiDispatchInterrupt
+// returns, the Irql (before we raised it) is compared to
+// APC_LEVEL and if less and an APC interrupt is pending it
+// is processed.
+//
+// Note: this routine manipulates PCR->CurrentIrql directly to
+// avoid recursion by using Ke[Raise|Lower]Irql.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .text
+ .align 5 // cache block align
+
+ LEAF_ENTRY_S(KiDispatchSoftwareInterrupt, _TEXT$00)
+
+ li r.3, 1 // Flag to dispatch routines
+ // to enable interrupts when
+ // returning to IRQL 0
+ DISABLE_INTERRUPTS(r.7, r.4)
+
+ ALTERNATE_ENTRY(KiDispatchSoftwareIntDisabled)
+ stw r.3, -8(r.sp) // Flag to dispatch routines
+ // indicating whether to enable
+ // or not to enable interrupts
+ // when returning to IRQL 0
+
+ lhz r.9, KiPcr+PcSoftwareInterrupt(r.0) // pending s/w interrupt?
+ lbz r.3, KiPcr+PcCurrentIrql(r.0) // get current irql
+ srwi. r.4, r.9, 8 // isolate DPC pending
+ cmpw cr.6, r.9, r.3 // see if APC int and APC level
+ cmpwi cr.7, r.3, APC_LEVEL // compare IRQL to APC LEVEL
+
+//
+// Possible values for SoftwareInterrupt (r.9) are
+//
+// 0x0101 DPC and APC interrupt pending
+// 0x0100 DPC interrupt pending
+// 0x0001 APC interrupt pending
+// 0x0000 No software interrupt pending (unlikely but possible)
+//
+// Possible values for current IRQL are zero or one. By comparing
+// SoftwareInterrupt against current IQRL (above) we can quickly see
+// if any software interrupts are valid at this time.
+//
+// Calculate correct IRQL for the interrupt we are processing. If DPC
+// then we need to be at DISPATCH_LEVEL which is one greater than APC_
+// LEVEL. r.4 contains one if we are going to run a DPC, so we add
+// APC_LEVEL to r.4 to get the desired IRQL.
+//
+
+ addi r.4, r.4, APC_LEVEL // calculate new IRQL
+
+ ble cr.6,ExitEnabled // return if no valid interrupt
+
+
+#if DBG
+ cmplwi cr.6, r.3, DISPATCH_LEVEL // sanity check, should only
+ blt+ cr.6, $+12 // below DISPATCH_LEVEL
+ twi 31, 0, 0x16 // BREAKPOINT
+ blr // return if wrong IRQL
+#endif
+
+//
+// ..DispatchSoftwareInterrupt is an alternate entry used indirectly
+// by KeReleaseSpinLock (via KxReleaseSpinLock). KeReleaseSpinLock has
+// been carefully written to construct the same conditions as apply if
+// execution came from above.
+//
+
+..DispatchSoftwareInterrupt:
+
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set IRQL
+ ENABLE_INTERRUPTS(r.7)
+
+ beq cr.0, ..KiDispatchApc // jif not DPC interrupt
+ beq cr.7, ..KiDispatchDpcOnly // jif DPC and old IRQL APC LEV.
+ b ..KiDispatchDpc // DPC int, old IRQL < APC LEV.
+
+ExitEnabled:
+ ENABLE_INTERRUPTS(r.7)
+ blr
+
+ DUMMY_EXIT(KiDispatchSoftwareInterrupt)
+
+ SBTTL("Unlock Dispatcher Database")
+//++
+//
+// VOID
+// KiUnlockDispatcherDatabase (
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This routine is entered at synchronization level with the dispatcher
+// database locked. Its function is to either unlock the dispatcher
+// database and return or initiate a context switch if another thread
+// has been selected for execution.
+//
+// N.B. This code merges with the following swap context code.
+//
+// N.B. A context switch CANNOT be initiated if the previous IRQL
+// is greater than or equal to DISPATCH_LEVEL.
+//
+// N.B. This routine is carefully written to be a leaf function. If,
+// however, a context swap should be performed, the routine is
+// switched to a nested function.
+//
+// Arguments:
+//
+// OldIrql (r.3) - Supplies the IRQL when the dispatcher database
+// lock was acquired.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY_S(KiUnlockDispatcherDatabase, _TEXT$00)
+
+//
+// Check if a thread has been scheduled to execute on the current processor.
+//
+
+ cmpwi cr.1, r.3, APC_LEVEL // check if IRQL below dispatch level
+ lwz r.7, KiPcr+PcPrcb(r.0) // get address of PRCB
+ lhz r.9, KiPcr+PcSoftwareInterrupt(r.0) // pending s/w interrupt?
+ lwz r.8, PbNextThread(r.7) // get next thread address
+ cmpw cr.7, r.9, r.3 // compare pending against irql
+ cmpwi r.8, 0 // check if next thread selected
+ bne uddSwitch // jif new thread selected
+
+//
+// Not switching, release dispatcher database lock.
+//
+
+#if !defined(NT_UP)
+ lwz r.5, [toc]KiDispatcherLock(r.toc)
+ li r.6, 0
+ RELEASE_SPIN_LOCK(r.5, r.6)
+#endif
+
+//
+// If already at dispatch level, we're done.
+//
+
+ bgtlr cr.1 // return
+
+//
+// Dropping below DISPATCH_LEVEL, we may need to inspire a software
+// interrupt if one is pending.
+//
+//
+// Above we compared r.9 (SoftwareInterrupt) with r.3 (OldIrql) (result in
+// cr.7). See KiDispatchSoftwareInterrupt (above) for possible values.
+// If we did not take the above branch then OldIrql is either zero or one.
+// If SoftwareInterrupt is greater than OldIrql then a pending software
+// interrupt can be taken at this time.
+//
+
+ bgt cr.7, uddIntPending // jif pending interrupt
+
+ stb r.3, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+ blr // return
+
+
+//
+// A new thread has been selected to run on the current processor, but
+// the new IRQL is not below dispatch level. Release the dispatcher lock.
+// If the current processor is not executing a DPC, then request a dispatch
+// interrupt on the current. IRQL is already at the right level.
+//
+
+uddCantSwitch:
+
+#if !defined(NT_UP)
+ lwz r.5, [toc]KiDispatcherLock(r.toc)
+ li r.6, 0
+ RELEASE_SPIN_LOCK(r.5, r.6)
+#endif
+
+ lwz r.6, PbDpcRoutineActive(r.7)
+ li r.9, 1
+ cmplwi r.6, 0
+ bnelr // return if DPC already active
+
+ lwz r.5, [toc]KiScheduleCount(r.toc)
+ stb r.9, KiPcr+PcDispatchInterrupt(r.0) // request dispatch interrupt
+ lwz r.6, 0(r.5)
+ addi r.6, r.6, 1 // bump schedule count
+ stw r.6, 0(r.5)
+
+ blr // return
+
+//
+// A software interrupt is pending with higher priority than OldIrql.
+//
+// cr.1 is the result of comparing OldIrql with APC_LEVEL.
+//
+
+uddIntPending:
+
+//
+// Set flag to enable interrupts after dispatching software interrupts.
+// r.9 must be non-zero because PcSoftwareInt(r.9) > OldIrql. Only
+// needs to be set once no matter which dispatch routine is called.
+//
+ stw r.9, -8(r.sp)
+
+ beq cr.1, ..KiDispatchDpcOnly // new IRQL doesn't allow APCs
+
+ srwi. r.3, r.9, 8 // isolate DPC pending
+ addi r.3, r.3, APC_LEVEL // calculate correct IRQL
+ stb r.3, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ bne ..KiDispatchDpc // jif DPC at APC_LEVEL
+
+//
+// IRQL dropped from DISPATCH_LEVEL to APC_LEVEL, make sure no DPCs
+// were queued while we were checking. We are now at APC level so
+// any new DPCs will happen without our having to check again.
+//
+
+ lbz r.4, KiPcr+PcDispatchInterrupt(r.0)
+ cmpwi r.4, 0 // new DPCs?
+ beq ..KiDispatchApc // jif not
+
+ li r.3, DISPATCH_LEVEL // re-raise to DISPATCH_LEVEL
+ stb r.3, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ b ..KiDispatchDpc
+
+ DUMMY_EXIT(KiUnlockDispatcherDatabase)
+
+//
+// A new thread has been selected to run on the current processor.
+//
+// If the new IRQL is less than dispatch level, then switch to the new
+// thread.
+//
+
+uddSwitch:
+ bgt cr.1, uddCantSwitch // jif new IRQL > apc level
+
+//
+// N.B. This routine is carefully written as a nested function. Control
+// drops into this function from above.
+//
+
+ SPECIAL_ENTRY_S(KxUnlockDispatcherDatabase, _TEXT$00)
+
+ mflr r.0 // get return address
+ stwu r.sp, -kscFrameLength(r.sp) // buy stack frame
+ stw r.29, swFrame + ExGpr29(r.sp) // save gpr 29
+ stw r.30, swFrame + ExGpr30(r.sp) // save gpr 30
+ ori rPrcb, r.7, 0 // copy PRCB address
+ stw r.31, swFrame + ExGpr31(r.sp) // save gpr 31
+ lwz OTH, KiPcr+PcCurrentThread(r.0) // get current thread address
+ stw r.27, swFrame + ExGpr27(r.sp) // save gpr 27
+ ori NTH, r.8, 0 // thread to switch to
+ stw r.14, swFrame + ExGpr14(r.sp) // save gpr 14
+ li r.11,0
+ stw r.28, swFrame + ExGpr28(r.sp) // save gpr 28
+ stw r.26, swFrame + ExGpr26(r.sp) // save gpr 26
+ stw r.0, kscLR(r.sp) // save return address
+
+ PROLOGUE_END(KxUnlockDispatcherDatabase)
+
+ stw r.11, PbNextThread(rPrcb) // clear next thread address
+ stb r.3, ThWaitIrql(OTH) // save previous IRQL
+
+//
+// Reready current thread for execution and swap context to the selected thread.
+//
+
+ ori r.3, OTH, 0 // set address of previous thread object
+ stw NTH, PbCurrentThread(rPrcb) // set address of current thread object
+
+#if !defined(NT_UP)
+
+ lwz r.27,[toc]KiContextSwapLock(r.2)// get &KiContextSwapLock
+ lwz r.28,[toc]KiDispatcherLock(r.2) // get &KiDispatcherLock
+
+#endif
+
+ bl ..KiReadyThread // reready thread for execution
+ b ksc130 // join common code
+
+ DUMMY_EXIT(KxUnlockDispatcherDatabase)
+
+//++
+//
+// VOID
+// KxReleaseSpinLock(VOID)
+//
+// Routine Description:
+//
+// This routine is entered when a call to KeReleaseSpinLock lowers
+// IRQL to a level sufficiently low for a pending software interrupt
+// to be deliverable.
+//
+// Although this routine has no arguments, the following entry conditions
+// apply.
+//
+// Interrupts are disabled.
+//
+// r.7 MSR prior to disabling interrupts.
+// r.4 IRQL to be raised to (PCR->CurrentIrql has been lowered
+// even though interrupts are currently disabled).
+//
+// cr.0 ne if DPC pending
+// cr.7 eq if ONLY DPCs can run (ie PCR->CurrentIrql == APC_LEVEL)
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ .struct 0
+ .space StackFrameHeaderLength
+sp31: .space 4 // r.31 save
+ .align 3 // ensure 8 byte alignment
+spLength:
+
+ SPECIAL_ENTRY_S(KxReleaseSpinLock, _TEXT$01)
+
+ stw r.31, sp31-spLength(r.sp) // save r.31
+ mflr r.31 // save return address (in r.31)
+ stwu r.sp, -spLength(r.sp) // buy stack frame
+
+ PROLOGUE_END(KxReleaseSpinLock)
+
+ li r.3, 1
+ stw r.3, -8(r.sp) // flag to dispatch routines
+ // to enable interrupts when
+ // returning to irql 0.
+ bl ..DispatchSoftwareInterrupt
+
+ mtlr r.31 // set return address
+ lwz r.31, sp31(r.sp) // restore r.31
+ addi r.sp, r.sp, spLength // release stack frame
+
+ SPECIAL_EXIT(KxReleaseSpinLock)
+
+
+//++
+//
+// VOID
+// KiDispatchDpcOnly (VOID)
+//
+// Routine Description:
+//
+// This routine is entered as a result of lowering IRQL to
+// APC_LEVEL with a DPC interrupt pending. IRQL is currently
+// at DISPATCH_LEVEL, the dispatcher database is unlocked.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+ .set kdsiFrameLength, STK_MIN_FRAME+8
+
+ NESTED_ENTRY_S(KiDispatchDpcOnly, kdsiFrameLength, 0, 0, _TEXT$00)
+ PROLOGUE_END(KiDispatchDpcOnly)
+
+kddo10: bl ..KiDispatchInterrupt
+
+ li r.3, APC_LEVEL // get new IRQL
+
+ DISABLE_INTERRUPTS(r.8, r.4)
+
+ lbz r.4, KiPcr+PcDispatchInterrupt(r.0) // more DPCs pending?
+ lwz r.5, KiPcr+PcPrcb(r.0) // get address of PRCB
+ cmpwi r.4, 0
+ lwz r.6, PbInterruptCount(r.5) // bump interrupt count
+ addi r.4, r.4, APC_LEVEL // calc new IRQL
+ addi r.6, r.6, 1
+ stw r.6, PbInterruptCount(r.5)
+ lwz r.6, STK_MIN_FRAME(r.sp) // parameter to enable
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+ beq+ kddo20
+
+ ENABLE_INTERRUPTS(r.8)
+
+ b kddo10 // jif more DPCs to run
+
+kddo20: cmpwi r.6, 0 // o.k to enable interrupts?
+ beq kddo25 // return if not
+ ENABLE_INTERRUPTS(r.8) // reenable interrupts and exit
+
+kddo25:
+ NESTED_EXIT(KiDispatchDpcOnly, kdsiFrameLength, 0, 0)
+
+//++
+//
+// VOID
+// KiDispatchDpc (VOID)
+//
+// Routine Description:
+//
+// This routine is entered as a result of lowering IRQL below
+// APC_LEVEL with a DPC interrupt pending. IRQL is currently
+// at DISPATCH_LEVEL, the dispatcher database is unlocked.
+//
+// Once DPC processing is complete, APC processing may be required.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY_S(KiDispatchDpc, kdsiFrameLength, 0, 0, _TEXT$00)
+ PROLOGUE_END(KiDispatchDpc)
+
+kdd10: bl ..KiDispatchInterrupt
+
+ DISABLE_INTERRUPTS(r.8, r.4)
+
+ lwz r.5, KiPcr+PcPrcb(r.0) // get address of PRCB
+ lhz r.4, KiPcr+PcSoftwareInterrupt(r.0) // more DPCs or APCs pending?
+ lwz r.6, PbInterruptCount(r.5) // bump interrupt count
+ cmpwi r.4, APC_LEVEL
+ addi r.6, r.6, 1
+ stw r.6, PbInterruptCount(r.5)
+ bgt- kdd20 // jif more DPCs
+ lwz r.6, STK_MIN_FRAME(r.sp) // parameter to enable
+ // interrupts
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+ blt kdd30 // honor parameter to exit
+ // enabled or disabled when
+ // returning to IRQL 0
+
+ ENABLE_INTERRUPTS(r.8) // equal
+
+ b kda10 // jif APCs to run
+
+kdd20: ENABLE_INTERRUPTS(r.8) // greater than
+
+ b kdd10 // jif more DPCs to run
+
+kdd30: cmpwi r.6, 0 // o.k to enable interrupts?
+ beq- kdd35 // return if not
+ ENABLE_INTERRUPTS(r.8) // reenable interrupts and exit
+
+kdd35:
+ NESTED_EXIT(KiDispatchDpc, kdsiFrameLength, 0, 0)
+
+//++
+//
+// VOID
+// KiDispatchApc (VOID)
+//
+// Routine Description:
+//
+// This routine is entered as a result of lowering IRQL below
+// APC_LEVEL with an APC interrupt pending. IRQL is currently
+// at APC_LEVEL.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ NESTED_ENTRY_S(KiDispatchApc, kdsiFrameLength, 0, 0, _TEXT$00)
+ PROLOGUE_END(KiDispatchApc)
+
+kda10:
+ lwz r.6, KiPcr+PcPrcb(r.0) // get address of PRCB
+ li r.3, 0 // PreviousMode = Kernel
+ lwz r.7, PbApcBypassCount(r.6) // get APC bypass count
+ li r.4, 0 // TrapFrame = 0
+ li r.5, 0 // ExceptionFrame = 0
+ addi r.7, r.7, 1 // increment APC bypass count
+ stb r.3, KiPcr+PcApcInterrupt(r.0) // clear APC pending
+ stw r.7, PbApcBypassCount(r.6) // store new APC bypass count
+ bl ..KiDeliverApc
+
+ li r.3, 0 // get new IRQL
+
+#if !defined(NT_UP)
+
+ DISABLE_INTERRUPTS(r.8, r.4)
+
+#else
+
+ stb r.3, KiPcr+PcCurrentIrql(r.0) // lower IRQL
+ sync
+
+#endif
+
+ lwz r.5, KiPcr+PcPrcb(r.0) // get address of PRCB
+ lbz r.4, KiPcr+PcApcInterrupt(r.0) // more APCs pending?
+ lwz r.6, PbInterruptCount(r.5) // bump interrupt count
+ cmpwi r.4, 0
+ addi r.6, r.6, 1
+ stw r.6, PbInterruptCount(r.5)
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // Raise IRQL if more APCs
+
+#if !defined(NT_UP)
+
+ ENABLE_INTERRUPTS(r.8)
+
+#endif
+
+ bne- kda10 // jif more APCs to run
+
+ NESTED_EXIT(KiDispatchApc, kdsiFrameLength, 0, 0)
+
+ SBTTL("Swap Thread")
+//++
+//
+// VOID
+// KiSwapThread (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This routine is called to select the next thread to run on the
+// current processor and to perform a context switch to the thread.
+//
+// Arguments:
+//
+// None.
+//
+// Outputs: ( for call to SwapContext )
+//
+// r.31 NTH pointer to new thread
+// r.30 OTH pointer to old thread
+// r.29 rPrcb pointer to processor control block
+// r.28 pointer to dispatcher database lock
+// r.27 pointer to context swap lock
+//
+// Return Value:
+//
+// Wait completion status (r.3).
+//
+//--
+
+ .struct 0
+ .space swFrameLength
+kscLR: .space 4
+ .align 3 // ensure 8 byte alignment
+kscFrameLength:
+
+ .align 6 // cache line align
+
+ SPECIAL_ENTRY_S(KiSwapThread,_TEXT$00)
+
+ mflr r.0 // get return address
+ stwu r.sp, -kscFrameLength(r.sp) // buy stack frame
+ stw r.29, swFrame + ExGpr29(r.sp) // save gpr 29
+ stw r.30, swFrame + ExGpr30(r.sp) // save gpr 30
+ stw r.31, swFrame + ExGpr31(r.sp) // save gpr 31
+ lwz OTH, KiPcr+PcCurrentThread(r.0) // get current thread addr
+ lwz rPrcb, KiPcr+PcPrcb(r.0) // get Processor Control Block
+ stw r.27, swFrame + ExGpr27(r.sp) // save gpr 27
+ stw r.28, swFrame + ExGpr28(r.sp) // save gpr 28
+ stw r.14, swFrame + ExGpr14(r.sp) // save gpr 14
+ lwz r.27, [toc]KiReadySummary(r.toc) // get &KiReadySummary
+ lwz NTH, PbNextThread(rPrcb) // get address of next thread
+ stw r.26, swFrame + ExGpr26(r.sp) // save gpr 26
+ li r.28, 0 // load a 0
+ lwz r.26, 0(r.27) // get ready summary
+ cmpwi NTH, 0 // next thread selected?
+ stw r.0, kscLR(r.sp) // save return address
+
+ PROLOGUE_END(KiSwapThread)
+
+ stw r.28, PbNextThread(rPrcb) // zero address of next thread
+ bne ksc120 // if ne, next thread selected
+
+#if !defined(NT_UP)
+
+ lwz r.5, [toc]KeTickCount(r.toc) // get &KeTickCount
+ lwz r.3, KiPcr+PcSetMember(r.0) // get processor affinity mask
+ lbz r.4, PbNumber(rPrcb) // get current processor number
+ lwz r.5, 0(r.5) // get low part of tick count
+
+#endif
+
+//
+// Find the highest priority ready thread.
+//
+
+ cntlzw r.6, r.26 // count zero bits from left
+ lwz r.8, [toc]KiDispatcherReadyListHead(r.toc) // get ready listhead base address
+ slw. r.7, r.26, r.6 // shift first set bit into sign bit
+ subfic r.6, r.6, 31 // convert shift count to priority
+
+ beq kscIdle // if mask is zero, no ready threads
+
+kscReadyScan:
+
+//
+// If the thread can execute on the current processor, then remove it from
+// the dispatcher ready queue.
+//
+
+ slwi r.9, r.6, 3 // compute ready listhead offset
+ slwi r.7, r.7, 1 // position next ready summary bit
+ add r.9, r.9, r.8 // compute ready queue address
+ lwz r.10, LsFlink(r.9) // get address of first entry
+ subi NTH, r.10, ThWaitListEntry // compute address of thread object
+
+#if !defined(NT_UP)
+
+kscAffinityScan:
+
+ lwz r.11, ThAffinity(NTH) // get thread affinity
+ lbz r.0, ThNextProcessor(NTH) // get last processor number
+ and. r.11, r.11, r.3 // check for compatible thread affinity
+ cmpw cr.6, r.0, r.4 // compare last processor with current
+ bne kscAffinityOk // if ne, thread affinity compatible
+
+ lwz r.10, LsFlink(r.10) // get address of next entry
+ cmpw r.10, r.9 // compare with queue address
+ subi NTH, r.10, ThWaitListEntry // compute address of thread object
+ bne kscAffinityScan // if ne, not end of list
+ksc70:
+ cmpwi r.7, 0 // more ready queues to scan?
+ subi r.6, r.6, 1 // decrement ready queue priority
+ blt kscReadyScan // if lt, queue contains an entry
+ beq kscIdle // if eq, no ready threads
+
+ slwi r.7, r.7, 1 // position next ready summary bit
+ b ksc70 // check next bit
+
+kscAffinityOk:
+
+//
+// If the thread last ran on the current processor, has been waiting for
+// longer than a quantum, or its priority is greater than low realtime
+// plus 9, then select the thread. Otherwise, an attempt is made to find
+// a more appropriate candidate.
+//
+
+ beq cr.6, kscReadyFound // if eq, processor number match
+ lbz r.0, ThIdealProcessor(NTH) // get ideal processor number
+ cmpwi r.6, LOW_REALTIME_PRIORITY + 9 // check if priority in range
+ cmpw cr.6, r.0, r.4 // compare ideal processor with current
+ beq cr.6, ksc100 // if eq, processor number match
+ bge ksc100 // if ge, priority high enough
+ lwz r.12, ThWaitTime(NTH) // get time of thread ready
+ sub r.12, r.5, r.12 // compute length of wait
+ cmpwi cr.7, r.12, READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ bge cr.7, ksc100 // if ge, waited long enough
+
+//
+// Search forward in the ready queue until the end of the list is reached
+// or a more appropriate thread is found.
+//
+
+ lwz r.14, LsFlink(r.10) // get address of next entry
+ksc80:
+ cmpw r.14, r.9 // compare with queue address
+ subi r.28, r.14, ThWaitListEntry // compute address of thread object
+ beq ksc100 // if eq, end of list
+
+ lwz r.11, ThAffinity(r.28) // get thread affinity
+ lbz r.0, ThNextProcessor(r.28) // get last processor number
+ and. r.11, r.11, r.3 // check for compatible thread affinity
+ cmpw cr.6, r.0, r.4 // compare last processor with current
+ beq ksc85 // if eq, thread affinity not compatible
+ beq cr.6, ksc90 // if eq, processor number match
+ lbz r.0, ThIdealProcessor(NTH) // get ideal processor number
+ cmpw cr.6, r.0, r.4 // compare ideal processor with current
+ beq cr.6, ksc90 // if eq, processor number match
+ksc85:
+ lwz r.12, ThWaitTime(r.28) // get time of thread ready
+ lwz r.14, LsFlink(r.14) // get address of next entry
+ sub r.12, r.5, r.12 // compute length of wait
+ cmpwi cr.7, r.12, READY_SKIP_QUANTUM + 1 // check if wait time exceeded
+ blt cr.7, ksc80 // if lt, wait time not exceeded
+ b ksc100 // wait time exceeded -- switch to
+ // first matching thread in ready queue
+ksc90:
+ ori NTH, r.28, 0 // set thread address
+ ori r.10, r.14, 0 // set list entry address
+ksc100:
+ stb r.4, ThNextProcessor(NTH) // set next processor number
+
+kscReadyFound:
+
+#endif
+
+//
+// Remove the selected thread from the ready queue.
+//
+
+ lwz r.11, LsFlink(r.10) // get list entry forward link
+ lwz r.12, LsBlink(r.10) // get list entry backward link
+ li r.0, 1 // set bit for mask generation
+ slw r.0, r.0, r.6 // compute ready summary set member
+ cmpw r.11, r.12 // check for list empty
+ stw r.11, LsFlink(r.12) // set forward link in previous entry
+ stw r.12, LsBlink(r.11) // set backward link in next entry
+ bne ksc120 // if ne, list is not empty
+ xor r.26, r.26, r.0 // clear ready summary bit
+ stw r.26, 0(r.27) // update ready summary
+ksc120:
+
+//
+// Swap context to the next thread.
+//
+
+
+#if !defined(NT_UP)
+
+ lwz r.27,[toc]KiContextSwapLock(r.2)// get &KiContextSwapLock
+ lwz r.28,[toc]KiDispatcherLock(r.2) // get &KiDispatcherLock
+
+#endif
+
+ stw NTH, PbCurrentThread(rPrcb) // set new thread current
+ksc130:
+ bl ..SwapContext // swap context
+
+
+//
+// Lower IRQL and return wait completion status.
+//
+// N.B. SwapContext releases the dispatcher database lock.
+//
+ lbz r.4, ThWaitIrql(NTH) // get original wait IRQL
+
+ DISABLE_INTERRUPTS(r.6, r.7)
+
+ lhz r.5, KiPcr+PcSoftwareInterrupt(r.0) // check for pending s/w ints
+ lwz r.14, kscLR(r.sp) // get return address
+ lwz r.31, ThWaitStatus(NTH) // get wait completion status
+ lwz r.26, swFrame + ExGpr26(r.sp) // restore gprs 26 thru 30
+ lwz r.27, swFrame + ExGpr27(r.sp) //
+ lwz r.28, swFrame + ExGpr28(r.sp) //
+ lwz r.29, swFrame + ExGpr29(r.sp) //
+ lwz r.30, swFrame + ExGpr30(r.sp) //
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+ ENABLE_INTERRUPTS(r.6)
+
+ cmpw r.5, r.4 // see if s/w int could now run
+ bgtl- ..KiDispatchSoftwareInterrupt // jif pending int can run
+ mtlr r.14 // set return address
+ ori r.3, r.31, 0 // set return status
+ lwz r.31, swFrame + ExGpr31(r.sp) // restore r.31
+ lwz r.14, swFrame + ExGpr14(r.sp) // restore r.14
+ addi r.sp, r.sp, kscFrameLength // return stack frame
+
+ SPECIAL_EXIT(KiSwapThread)
+
+kscIdle:
+
+//
+// All ready queues were scanned without finding a runnable thread so
+// default to the idle thread and set the appropriate bit in idle summary.
+//
+
+ lwz r.5, [toc]KiIdleSummary(r.toc) // get &KiIdleSummary
+
+#if defined(NT_UP)
+
+ li r.4, 1 // set current idle summary
+#else
+
+ lwz r.4, 0(r.5) // get current idle summary
+ or r.4, r.4, r.3 // set member bit in idle summary
+
+#endif
+
+ stw r.4, 0(r.5) // set new idle summary
+
+ lwz NTH,PbIdleThread(rPrcb) // set address of idle thread
+ b ksc120 // switch to idle thread
+
+
+ SBTTL("Swap Context to Next Thread")
+//++
+//
+// Routine Description:
+//
+// This routine is called to swap context from one thread to the next.
+//
+// Arguments:
+//
+// r.sp Pointer to { StackFrameHeader, ExceptionFrame }
+// r.31 NTH Address of next thread object
+// r.30 OTH Address of previous thread object
+// r.29 rPrcb Address of processor control block
+// r.28 Address of KiDispatcherLock
+// r.27 Address of KiContextSwapLock
+//
+// Return value:
+//
+// r.31 NTH Address of current thread object.
+// r.29 rPrcb Address of processor control block
+//
+// Note that the TOC register is neither saved nor restored across a
+// thread switch. This is because we are in NTOSKRNL (actually in the
+// routine SwapContext) in both threads (ie the TOC does not change).
+//
+// GPR 13 is set to the address of the TEB where it will remain.
+//
+//--
+
+ SPECIAL_ENTRY_S(SwapContext,_TEXT$00)
+
+ mfcr r.3 // get condition register
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+ b LkCtxSw // skip spin code
+
+ SPIN_ON_SPIN_LOCK(r.27,r.4,LkCtxSw,LkCtxSwSpin) // spin on context swap lock
+
+ ACQUIRE_SPIN_LOCK(r.27,r.31,r.4,LkCtxSw,LkCtxSwSpin) // acquire context swap lock
+#endif
+
+//
+// Set the new thread's state to Running before releasing the dispatcher lock.
+//
+
+ li r.8, Running // set state of new thread
+ stb r.8, ThState(NTH) // to running.
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.28,r.4) // release dispatcher lock
+#endif
+
+//
+// Save old thread non-volatile context.
+//
+
+ mflr r.0 // get return address
+
+ stw r.15, swFrame + ExGpr15(r.sp) // save gprs 15 thru 25
+ stw r.16, swFrame + ExGpr16(r.sp) //
+ stw r.17, swFrame + ExGpr17(r.sp) //
+ stw r.18, swFrame + ExGpr18(r.sp) //
+ stw r.19, swFrame + ExGpr19(r.sp) //
+ stw r.20, swFrame + ExGpr20(r.sp) //
+ stw r.21, swFrame + ExGpr21(r.sp) //
+ stw r.22, swFrame + ExGpr22(r.sp) //
+ stw r.23, swFrame + ExGpr23(r.sp) //
+ stw r.24, swFrame + ExGpr24(r.sp) //
+ stw r.25, swFrame + ExGpr25(r.sp) //
+
+ stfd f.14, swFrame + ExFpr14(r.sp) // save non-volatile
+ stfd f.15, swFrame + ExFpr15(r.sp) // floating point regs
+ stfd f.16, swFrame + ExFpr16(r.sp) //
+ stfd f.17, swFrame + ExFpr17(r.sp) //
+ stfd f.18, swFrame + ExFpr18(r.sp) //
+ stfd f.19, swFrame + ExFpr19(r.sp) //
+ stfd f.20, swFrame + ExFpr20(r.sp) //
+ stfd f.21, swFrame + ExFpr21(r.sp) //
+ stfd f.22, swFrame + ExFpr22(r.sp) //
+ stfd f.23, swFrame + ExFpr23(r.sp) //
+ stfd f.24, swFrame + ExFpr24(r.sp) //
+ stfd f.25, swFrame + ExFpr25(r.sp) //
+ stfd f.26, swFrame + ExFpr26(r.sp) //
+ stfd f.27, swFrame + ExFpr27(r.sp) //
+ stfd f.28, swFrame + ExFpr28(r.sp) //
+ stfd f.29, swFrame + ExFpr29(r.sp) //
+ stfd f.30, swFrame + ExFpr30(r.sp) //
+ stfd f.31, swFrame + ExFpr31(r.sp) //
+
+ stw r.3, swFrame + SwConditionRegister(r.sp)// save CR
+ stw r.0, swFrame + SwSwapReturn(r.sp) // save return address
+
+ PROLOGUE_END(SwapContext)
+
+//
+// The following entry point is used to switch from the idle thread to
+// another thread.
+//
+
+..SwapFromIdle:
+
+#if DBG
+ cmpw NTH, OTH
+ bne th_ok
+ twi 31, 0, 0x16
+th_ok:
+#endif
+
+ stw NTH, KiPcr+PcCurrentThread(r.0) // set new thread current
+
+//
+// Get the old and new process object addresses.
+//
+
+#define NPROC r.3
+#define OPROC r.4
+
+ lwz NPROC, ThApcState + AsProcess(NTH) // get new process object
+ lwz OPROC, ThApcState + AsProcess(OTH) // get old process object
+
+ DISABLE_INTERRUPTS(r.15, r.6)
+
+ lwz r.13, ThTeb(NTH) // get addr of user TEB
+ lwz r.24, ThStackLimit(NTH) // get stack limit
+ lwz r.23, ThInitialStack(NTH) // get initial kernel stk ptr
+ lwz r.22, ThKernelStack(NTH) // get new thread stk ptr
+ cmpw cr.0, NPROC, OPROC // same process ?
+
+ stw r.sp, ThKernelStack(OTH) // save current kernel stk ptr
+ stw r.13, KiPcr+PcTeb(r.0) // set addr of user TEB
+//
+// Although interrupts are disabled, someone may be attempting to
+// single step thru the following. I can't see anyway to perform
+// the two operations atomically so I am inserting a label that is
+// known externally and can be checked against the exception address
+// if we fail stack validation in common_exception_entry (in real0.s)
+// in which case it's really ok. This has no performance impact.
+//
+// *** WARNING ****** WARNING ****** WARNING ****** WARNING ***
+//
+// (1) these two instructions MUST stay together,
+// (2) the stack validation code in common_exception_entry
+// KNOWS that the second instruction is a 'ori r.sp, r.22, 0'
+// and will perform such an instruction in line to correct
+// the problem. If you change this sequence you will need
+// to make an equivalent change in real0.s and the correct-
+// ability is dependent on the second instruction destroying
+// the stack pointer.
+// (plj).
+//
+
+ stw r.24, KiPcr+PcStackLimit(r.0) // set stack limit
+ stw r.23, KiPcr+PcInitialStack(r.0) // set initial kernel stack ptr
+ .globl KepSwappingContext
+KepSwappingContext:
+ ori r.sp, r.22, 0 // switch stacks
+
+#if !defined(NT_UP)
+
+//
+// Old process address space is no longer required. Ensure all
+// stores are done prior to releasing the ContextSwap lock.
+// N.B. SwapContextLock is still needed to ensure KiMasterPid
+// integrity.
+//
+
+ li r.16, 0
+
+ eieio
+ bne cr.0, ksc10
+ stw r.16, 0(r.27) // release Context Swap lock
+ b ksc20
+
+ksc10:
+
+#else
+
+ beq cr.0, ksc20
+
+#endif
+
+//
+// If the process sequence number matches the system sequence number, then
+// use the process PID. Otherwise, allocate a new process PID.
+//
+// N.B. The following code is duplicated from KiSwapProcess and will
+// join KiSwapProcess at SwapProcessSlow if sequence numbers
+// don't match. Register usage from here to the branch should
+// match KiSwapProcess.
+//
+ lwz r.10,[toc]KiMasterSequence(r.toc) // get &KiMasterSequence
+ lwz r.9,PrProcessSequence(NPROC) // get process sequence number
+ lwz r.11,0(r.10) // get master sequence number
+ lwz r.7,PrProcessPid(NPROC) // get process PID
+ cmpw r.11,r.9 // master sequence == process sequence?
+ bne ksc15 // jif not equal, go the slow path
+
+#if !defined(NT_UP)
+
+ stw r.16, 0(r.27) // release Context Swap lock
+
+#endif
+
+//
+// Swap address space to the specified process.
+//
+
+ lwz r.5,PrDirectoryTableBase(r.3) // get page dir page real addr
+
+ mtsr 0,r.7 // set sreg 0
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 1,r.7 // set sreg 1
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 2,r.7 // set sreg 2
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 3,r.7 // set sreg 3
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 4,r.7 // set sreg 4
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 5,r.7 // set sreg 5
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 6,r.7 // set sreg 6
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 7,r.7 // set sreg 7
+ addi r.7,r.7,12-7 // add 5 to VSID
+ mtsr 12,r.7 // set sreg 12
+ isync // context synchronize
+ stw r.5,KiPcr+PcPgDirRa(r.0) // store page dir page ra in PCR
+
+#if COLLECT_PAGING_DATA
+ lwz r.10,[toc]KiFlushOnProcessSwap(r.toc)
+ lwz r.10,0(r.10)
+ cmpwi r.10,0
+ bnel ..KeFlushCurrentTb
+#endif
+
+ b ksc20
+
+ksc15:
+ bl SwapProcessSlow
+
+ksc20:
+ lbz r.5, ThApcState + AsKernelApcPending(NTH)
+ lbz r.16, ThDebugActive(NTH) // get the active debug register
+ // mask
+ stb r.5, KiPcr+PcApcInterrupt(r.0) // set APC pending appropriately
+ stb r.16, KiPcr+PcDebugActive(r.0) // set the active debug register
+ // mask for the new thread
+ lwz r.5, PbContextSwitches(rPrcb) // get context switch count
+ lwz r.7, ThContextSwitches(NTH)
+ addi r.5, r.5, 1 // bump context switch count
+ stw r.5, PbContextSwitches(rPrcb) // for processor.
+ addi r.7, r.7, 1 // bump context switch count
+ stw r.7, ThContextSwitches(NTH) // for this thread
+
+ ENABLE_INTERRUPTS(r.15)
+
+ lwz r.0, swFrame + SwSwapReturn(r.sp) // get return address
+ lwz r.5, swFrame + SwConditionRegister(r.sp)// get CR
+
+ lwz r.15, swFrame + ExGpr15(r.sp) // restore gprs 15 thru 25
+ lwz r.16, swFrame + ExGpr16(r.sp) //
+ lwz r.17, swFrame + ExGpr17(r.sp) //
+ lwz r.18, swFrame + ExGpr18(r.sp) //
+ lwz r.19, swFrame + ExGpr19(r.sp) //
+ lwz r.20, swFrame + ExGpr20(r.sp) //
+ lwz r.21, swFrame + ExGpr21(r.sp) //
+ lwz r.22, swFrame + ExGpr22(r.sp) //
+ lwz r.23, swFrame + ExGpr23(r.sp) //
+ lwz r.24, swFrame + ExGpr24(r.sp) //
+ lwz r.25, swFrame + ExGpr25(r.sp) //
+
+ lfd f.14, swFrame + ExFpr14(r.sp) // restore non-volatile
+ lfd f.15, swFrame + ExFpr15(r.sp) // floating point regs
+ lfd f.16, swFrame + ExFpr16(r.sp) //
+ lfd f.17, swFrame + ExFpr17(r.sp) //
+ lfd f.18, swFrame + ExFpr18(r.sp) //
+ lfd f.19, swFrame + ExFpr19(r.sp) //
+ lfd f.20, swFrame + ExFpr20(r.sp) //
+ lfd f.21, swFrame + ExFpr21(r.sp) //
+ lfd f.22, swFrame + ExFpr22(r.sp) //
+ lfd f.23, swFrame + ExFpr23(r.sp) //
+ lfd f.24, swFrame + ExFpr24(r.sp) //
+ lfd f.25, swFrame + ExFpr25(r.sp) //
+ lfd f.26, swFrame + ExFpr26(r.sp) //
+ lfd f.27, swFrame + ExFpr27(r.sp) //
+ mtlr r.0 // set return address
+ mtcrf 0xff, r.5 // set condition register
+ lfd f.28, swFrame + ExFpr28(r.sp) //
+ lfd f.29, swFrame + ExFpr29(r.sp) //
+ lfd f.30, swFrame + ExFpr30(r.sp) //
+ lfd f.31, swFrame + ExFpr31(r.sp) //
+
+ SPECIAL_EXIT(SwapContext)
+
+#undef NTH
+#undef OTH
+
+//++
+//
+// VOID
+// KiSwapProcess (
+// IN PKPROCESS NewProcess,
+// IN PKPROCESS OldProcess
+// )
+//
+// Routine Description:
+//
+// This function swaps the address space from one process to another by
+// moving to the PCR the real address of the process page directory page
+// and loading segment registers 0-7 and 12 with VSIDs derived therefrom.
+//
+// The fast path below is duplicated inline in SwapContext for speed.
+// SwapContext joins this code at SwapProcessSlow if sequence numbers
+// differ.
+//
+// Arguments:
+//
+// NewProcess (r3) - Supplies a pointer to a control object of type process
+// which represents the new process that is switched to.
+//
+// OldProcess (r4) - Supplies a pointer to a control object of type process
+// which represents the old process that is switched from.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY_S(KiSwapProcess,_TEXT$00)
+
+//
+// Get the Context Swap lock. This lock is used to protect a
+// processes memory space, it serves double duty to protect access
+// to KiMasterSequence.
+//
+// N.B. It is already held if entry is via SwapProcessSlow, the
+// lock is ALWAYS released by this routine.
+//
+
+#if !defined(NT_UP)
+
+ lwz r.6, [toc]KiContextSwapLock(r.2)
+
+ ACQUIRE_SPIN_LOCK(r.6,r.3,r.5,LkCtxSw2,LkCtxSw2Spin)
+
+#endif
+
+//
+// If the process sequence number matches the system sequence number, then
+// use the process PID. Otherwise, allocate a new process PID.
+//
+// WARNING: if you change register usage in the following be sure to make
+// the same changes in SwapContext.
+//
+
+ lwz r.10,[toc]KiMasterSequence(r.toc) // get &KiMasterSequence
+ lwz r.9,PrProcessSequence(r.3) // get process sequence number
+ lwz r.11,0(r.10) // get master sequence number
+ lwz r.7,PrProcessPid(r.3) // get process PID
+ cmpw r.11,r.9 // master sequence == process sequence?
+ bne SwapProcessSlow // jif not equal, out of line
+
+
+//
+// Swap address space to the specified process.
+//
+
+spup: lwz r.5,PrDirectoryTableBase(r.3) // get page dir page real addr
+
+ DISABLE_INTERRUPTS(r.8,r.0) // disable interrupts
+
+#if !defined(NT_UP)
+
+ sync
+ li r.10, 0
+ stw r.10, 0(r.6) // release KiContextSwapLock
+
+#endif
+
+ stw r.5,KiPcr+PcPgDirRa(r.0) // store page dir page ra in PCR
+ mtsr 0,r.7 // set sreg 0
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 1,r.7 // set sreg 1
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 2,r.7 // set sreg 2
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 3,r.7 // set sreg 3
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 4,r.7 // set sreg 4
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 5,r.7 // set sreg 5
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 6,r.7 // set sreg 6
+ addi r.7,r.7,1 // add 1 to VSID
+ mtsr 7,r.7 // set sreg 7
+ addi r.7,r.7,12-7 // add 5 to VSID
+ mtsr 12,r.7 // set sreg 12
+ isync // context synchronize
+
+ ENABLE_INTERRUPTS(r.8) // enable interrupts
+
+#if COLLECT_PAGING_DATA
+ lwz r.10,[toc]KiFlushOnProcessSwap(r.toc)
+ lwz r.10,0(r.10)
+ cmpwi r.10,0
+ bne ..KeFlushCurrentTb
+#endif
+
+ ALTERNATE_EXIT(KiSwapProcess) // return
+
+//
+// We need a new PID, the dispatcher database lock is still held so
+// we can update KiMasterPid without further protection.
+//
+
+SwapProcessSlow:
+ lwz r.8,[toc]KiMasterPid(r.toc) // get &KiMasterPid
+ lwz r.7,0(r.8) // get KiMasterPid
+ addi r.7,r.7,16 // bump master pid
+ rlwinm. r.7,r.7,0,0x007ffff0 // detect PID wrap
+ beq ..KxSwapProcess // jif PID wrap
+
+ stw r.11,PrProcessSequence(r.3) // save new process sequence
+//
+// control returns here from KxSwapProcess
+//
+
+spnp:
+
+#if !defined(NT_UP)
+
+ lwz r.6, [toc]KiContextSwapLock(r.2)
+
+#endif
+
+ stw r.7,0(r.8) // save new master PID
+ stw r.7,PrProcessPid(r.3) // save new process PID
+ b spup // continue with main line code
+
+#if !defined(NT_UP)
+
+ SPIN_ON_SPIN_LOCK(r.6,r.5,LkCtxSw2,LkCtxSw2Spin)
+
+#endif
+
+ DUMMY_EXIT(KiSwapProcess)
+
+//++
+//
+// VOID
+// KxSwapProcess (
+// IN PKPROCESS NewProcess,
+// )
+//
+// Routine Description:
+//
+// This function is called (only) from KiSwapProcess when PID wrap has
+// occured. KiSwapProcess is a LEAF function. The purpose of this
+// function is to alloacte a stack frame and save data that needs to
+// be restored for KiSwapProcess. This routine is called aproximately
+// once every 16 million new processes. The emphasis in KiSwapProcess
+// is to handle the other 16 million - 1 cases as fast as possible.
+//
+// Arguments:
+//
+// NewProcess (r3) - Supplies a pointer to a control object of
+// type process which represents the new process being switched to.
+// This must be saved and restored for KiSwapProcess.
+//
+// &KiMasterPid (r8) - Address of system global KiMasterPid
+// This must be restored for KiSwapProcess.
+//
+// &KiMasterSequence (r10) - Address of system global KiMasterSequence.
+//
+// KiMasterSequence (r11) - Current Value of the above variable.
+//
+// Return Value:
+//
+// None.
+//
+// Registers r3, r8 and the Link Register are restored. r7 contains
+// the new PID which will be 16.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+spLR: .space 4 // link register save
+spR3: .space 4 // new process address save
+ .align 3 // ensure correct alignment
+spFrameLength:
+
+ SPECIAL_ENTRY_S(KxSwapProcess,_TEXT$00)
+
+ mflr r.0 // get link register
+ stwu r.sp,-spFrameLength(r.sp) // buy stack frame
+ stw r.3,spR3(r.sp) // save new process address
+ stw r.0,spLR(r.sp) // save swap process' return address
+
+ PROLOGUE_END(KxSwapProcess)
+
+//
+// PID wrap has occured. On PowerPC we do not need to lock the process
+// id wrap lock because tlb synchronization is handled by hardware.
+//
+
+ addic. r.11,r.11,1 // bump master sequence number
+ bne+ spnsw // jif sequence number did not wrap
+
+//
+// The master sequence number has wrapped, this is 4 billion * 16 million
+// processes,... not too shabby. We start the sequence again at 2 in case
+// there are system processes that have been running since the system first
+// started.
+//
+
+ li r.11,2 // start again at 2
+
+spnsw: stw r.11,0(r.10) // save new master sequence number
+ stw r.11,PrProcessSequence(r.3) // save new process sequence num
+
+ bl ..KeFlushCurrentTb // flush entire HPT (and all processor
+ // TLBs)
+
+ lwz r.0,spLR(r.sp) // get swap process' return address
+ lwz r.3,spR3(r.sp) // get new process address
+ lwz r.8,[toc]KiMasterPid(r.toc) // get &KiMasterPid
+ addi r.sp,r.sp,spFrameLength // return stack frame
+ li r.7,16 // set new PID
+ mtlr r.0
+ b spnp // continue in KiSwapProcess
+
+ DUMMY_EXIT(KxSwapProcess)
+
+//++
+//
+// VOID
+// KiIdleLoop (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This is the idle loop for NT. This code runs in a thread for
+// each processor in the system. The idle thread runs at IRQL
+// DISPATCH_LEVEL and polls for work.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None. (This routine never returns).
+//
+// Non-volatile register usage is as follows.
+//
+// r.14 --unused - available --
+// r.15 Address of KdDebuggerEnabled
+// r.16 Kernel TOC (backup)
+// r.17 Idle loop MSR with Interrupts DISABLED
+// r.18 Idle loop MSR with Interrupts ENABLED
+// r.19 HalProcessorIdle entry point
+// r.20 HAL's TOC
+// r.21 Debugger poll count
+// r.22 Address of KeTickCount
+// r.23 Zero
+// r.24 Address of dispatcher database lock (MP) (backup for r28)
+// r.25 DpcListHead
+// r.26 --unused - available --
+// r.27 Address of Context Swap lock
+// r.28 Address of dispatcher database lock (MP)
+// r.29 Address of Processor Control Block
+//
+// When another thread is selected to run, SwapContext is called.
+// Normally, callers of SwapContext are responsible for saving and
+// restoring non-volatile regs r.14 and r.26 thru r.31. SwapContext
+// saves/restores gprs r.15 thru r.25. The idle loop never returns so
+// previous contents of r.14 and r.26 thru r.31 are not saved. The
+// idle loop pre-initializes the storage area where SwapContext would
+// normally save r.15 thru r.25 with values that the idle loop needs
+// in those registers upon return from SwapContext and skips over the
+// register save on the way into SwapContext (alternate entry point
+// SwapFromIdle). All callers to SwapContext pass the following
+// arguments-
+//
+// r.27 Address of Context Swap lock (&KiContextSwapLock)
+// r.28 Address of dispatcher database lock (&KiDispatcherLock)
+// r.29 Address of PRCB
+// r.30 Address of OLD thread object
+// r.31 Address of NEW thread object
+//
+// The idle loop does not have a fixed use for regs r.30 and r.31.
+// r.29 contains the correct value for this processor. r.14 and
+// r.26 contents are unknown and must be regenerated upon return
+// from SwapContext. The assignment of function to these registers
+// was chosen for easy regeneration of content.
+//
+// Note also that r.21 was assigned in the range of registers
+// restored by SwapContext so that it is reset to its initial
+// values whenever SwapContext is called.
+//
+//--
+
+#define rDbg r.15
+#define rKTOC r.16
+#define rIntOff r.17
+#define rIntOn r.18
+#define rHalIdle r.19
+#define rHalToc r.20
+#define rDbgCount r.21
+#define rTickP r.22
+#define rZero r.23
+#define rDispLkSave r.24
+#define rDPCHEAD r.25
+
+#define rDispLk r.28
+#define rPrcb r.29
+
+
+ SPECIAL_ENTRY_S(KiIdleLoop,_TEXT$00)
+
+ mflr r.0 // get return address
+ stwu r.sp, -kscFrameLength(r.sp) // buy stack frame
+ stw r.0, kscLR(r.sp) // save return address
+
+ PROLOGUE_END(KiIdleLoop)
+
+
+//
+// Setup initial global register values
+//
+
+ ori rKTOC, r.toc, 0 // backup kernel's TOC
+ lwz rPrcb, KiPcr+PcPrcb(r.0) // Address of PCB to rPrcb
+ lwz rTickP, [toc]KeTickCount(r.toc) // Address of KeTickCount
+ lwz rDbg, [toc]KdDebuggerEnabled(r.toc)// Addr KdDebuggerEnabled
+ lwz rHalToc,[toc]__imp_HalProcessorIdle(r.toc)
+ lwz rHalToc,0(rHalToc)
+ lwz rHalIdle,0(rHalToc) // HalProcessorIdle entry
+ lwz rHalToc,4(rHalToc) // HAL's TOC
+ li rZero, 0 // Keep zero around, we use it
+ mfmsr rIntOff // get current machine state
+ rlwinm rIntOff, rIntOff, 0, 0xffff7fff // clear interrupt enable
+ ori rIntOn, rIntOff, 0x00008000 // set interrupt enable
+
+#if !defined(NT_UP)
+
+ lwz rDispLk, [toc]KiDispatcherLock(r.toc)// get &KiDispatcherLock
+ lwz r.27, [toc]KiContextSwapLock(r.toc) // get &KiContextSwapLock
+
+#endif
+
+ addi rDPCHEAD, rPrcb, PbDpcListHead // compute DPC listhead address
+ li rDbgCount, 0 // Clear breakin loop counter
+
+#if !defined(NT_UP)
+ ori rDispLkSave, rDispLk, 0 // copy &KiDispatcherLock
+#endif
+
+//
+// Registers 15 thru 25 are normally saved by SwapContext but the idle
+// loop uses an alternate entry that skips the save by SwapContext.
+// SwapContext will still restore them so we set up the stack so what
+// we want is what gets restored. This is especially useful for things
+// whose values need to be reset after SwapContext is called, eg rDbgCount.
+//
+
+ lwz r.4, [toc]__imp_KeLowerIrql(r.toc) // &&fd(KeLowerIrql)
+ stw r.15, swFrame + ExGpr15(r.sp)
+ stw r.16, swFrame + ExGpr16(r.sp)
+ stw r.17, swFrame + ExGpr17(r.sp)
+ lwz r.4, 0(r.4) // &fd(KeLowerIrql)
+ stw r.18, swFrame + ExGpr18(r.sp)
+ stw r.19, swFrame + ExGpr19(r.sp)
+ stw r.20, swFrame + ExGpr20(r.sp)
+ lwz r.5, 0(r.4) // &KeLowerIrql
+ stw r.21, swFrame + ExGpr21(r.sp)
+ stw r.22, swFrame + ExGpr22(r.sp)
+ stw r.23, swFrame + ExGpr23(r.sp)
+ stw r.24, swFrame + ExGpr24(r.sp)
+ stw r.25, swFrame + ExGpr25(r.sp)
+
+//
+// Control reaches here with IRQL at HIGH_LEVEL. Lower IRQL to
+// DISPATCH_LEVEL and set wait IRQL of idle thread.
+//
+
+ mtctr r.5
+ lwz r.toc, 4(r.4) // HAL's TOC
+ lwz r.11, KiPcr+PcCurrentThread(r.0) // Lower thread and processor
+ li r.3, DISPATCH_LEVEL // IRQL to DISPATCH_LEVEL.
+ stb r.3, ThWaitIrql(r.11)
+ bctrl
+ ori r.toc, rKTOC, 0 // restore our TOC
+
+//
+// In a multiprocessor system the boot processor proceeds directly into
+// the idle loop. As other processors start executing, however, they do
+// not directly enter the idle loop and spin until all processors have
+// been started and the boot master allows them to proceed.
+//
+
+#if !defined(NT_UP)
+
+ lwz r.4, [toc]KiBarrierWait(r.toc)
+
+BarrierWait:
+ lwz r.3, 0(r.4) // get barrier wait value
+ cmpwi r.3, 0 // if ne spin until allowed
+ bne BarrierWait // to proceed.
+
+ lbz r.3, PbNumber(rPrcb) // get processor number
+ cmpwi cr.4, r.3, 0 // save "processor == 0 ?"
+
+#endif
+
+//
+// Set condition register and swap return values in the swap frame.
+//
+
+ mfcr r.3 // save condition register
+ stw r.3, swFrame + SwConditionRegister(r.sp)
+
+ bl FindIdleReturn
+FindIdleReturn:
+ mflr r.3
+ addi r.3, r.3, KiIdleReturn - FindIdleReturn
+ stw r.3, swFrame + SwSwapReturn(r.sp)// save return address
+
+//
+// The following loop is executed for the life of the system.
+//
+
+IdleLoop:
+
+#if DBG
+
+#if !defined(NT_UP)
+ bne cr.4, CheckDpcList // if ne, not processor zero
+#endif
+
+//
+// Check if the debugger is enabled, and whether it is time to poll
+// for a debugger breakin. (This check is only performed on cpu 0).
+//
+
+ subic. rDbgCount, rDbgCount, 1 // decrement poll counter
+ bge+ CheckDpcList // jif not time yet.
+ lbz r.3, 0(rDbg) // check if debugger enabled
+ li rDbgCount, 20 * 1000 // set breakin loop counter
+ cmpwi r.3, 0
+ beq+ CheckDpcList // jif debugger not enabled
+ bl ..KdPollBreakIn // check if breakin requested
+ cmpwi r.3, 0
+ beq+ CheckDpcList // jif no breakin request
+ li r.3, DBG_STATUS_CONTROL_C // send status to debugger
+ bl ..DbgBreakPointWithStatus
+
+#endif
+
+//
+// Disable interrupts and check if there is any work in the DPC list.
+//
+
+CheckDpcList:
+
+ mtmsr rIntOn // give interrupts a chance
+ isync // to interrupt spinning
+ mtmsr rIntOff // disable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+
+
+//
+// Process the deferred procedure call list for the current processor.
+//
+
+ lwz r.3, LsFlink(rDPCHEAD) // get address of first entry
+ cmpw r.3, rDPCHEAD // is list empty?
+ beq CheckNextThread // if eq, DPC list is empty
+
+ ori r.31, rDPCHEAD, 0
+ bl ..KiProcessDpcList // process the DPC list
+
+//
+// Clear dispatch interrupt pending.
+//
+
+ stb rZero, KiPcr+PcDispatchInterrupt(r.0) // clear pending DPC interrupt
+
+#if DBG
+ li rDbgCount, 0 // clear breakin loop counter
+#endif
+
+//
+// Check if a thread has been selected to run on this processor
+//
+
+CheckNextThread:
+ lwz r.31, PbNextThread(rPrcb) // get address of next thread
+ cmpwi r.31, 0
+ beq Idle // jif no thread to execute
+
+//
+// A thread has been selected for execution on this processor. Acquire
+// dispatcher database lock, get the thread address again (it may have
+// changed), clear the address of the next thread in the processor block,
+// and call swap context to start execution of the selected thread.
+//
+// N.B. If the dispatcher database lock cannot be obtained immediately,
+// then attempt to process another DPC rather than spinning on the
+// dispatcher database lock.
+//
+
+#if !defined(NT_UP)
+
+ TRY_TO_ACQUIRE_SPIN_LOCK(rDispLk, rDispLk, r.11, LkDisp, CheckDpcList)
+
+#endif
+
+ mtmsr rIntOn // enable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+
+#if !defined(NT_UP)
+ lwz r.31, PbNextThread(rPrcb) // get next thread address
+#endif
+
+ lwz r.30, PbCurrentThread(rPrcb) // get current thread address
+ stw rZero, PbNextThread(rPrcb) // clear address of next thread
+ stw r.31, PbCurrentThread(rPrcb) // set new thread current
+
+//
+// Acquire the context swap lock so the address space of the old process
+// cannot be deleted and then release the dispatcher database lock. In
+// this case the old process is the system process, but the context swap
+// code releases the context swap lock so it must be acquired.
+//
+// N.B. This lock is used to protect the address space until the context
+// switch has sufficiently progressed to the point where the address
+// space is no longer needed. This lock is also acquired by the reaper
+// thread before it finishes thread termination.
+//
+
+#if !defined(NT_UP)
+
+ ACQUIRE_SPIN_LOCK(r.27,r.31,r.3,LkCtxSw1,LkCtxSw1Spin)
+
+#endif
+
+//
+// Set the new thread's state to Running before releasing the dispatcher lock.
+//
+
+ li r.3, Running // set state of new thread
+ stb r.3, ThState(r.31) // to running.
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.28,rZero)
+#endif
+
+ bl ..SwapFromIdle // swap context to new thread
+
+KiIdleReturn:
+
+#if !defined(NT_UP)
+
+ ori rDispLk, rDispLkSave, 0 // restore &KiDispatcherLock
+
+//
+// rDbgCount (r.21) will have been reset to 0 by the register restore
+// at the end of SwapContext.
+//
+// If processor 0, check for debugger breakin, otherwise just check for
+// DPCs again.
+//
+
+#endif
+
+ b IdleLoop
+
+//
+// There are no entries in the DPC list and a thread has not been selected
+// for execution on this processor. Call the HAL so power managment can be
+// performed.
+//
+// N.B. The HAL is called with interrupts disabled. The HAL will return
+// with interrupts enabled.
+//
+
+Idle:
+ mtctr rHalIdle // set entry point
+ ori r.toc, rHalToc, 0 // set HAL's TOC
+ bctrl // call HalProcessorIdle
+ isync // give HAL's interrupt enable
+ // a chance to take effect
+ ori r.toc, rKTOC, 0 // restore ntoskrnl's TOC
+
+ b IdleLoop
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r.27,r.3,LkCtxSw1,LkCtxSw1Spin)
+#endif
+
+ DUMMY_EXIT(KiIdleLoop)
+
+#undef rDPCHEAD
+
+ SBTTL("Process Deferred Procedure Call List")
+//++
+//
+// Routine Description:
+//
+// This routine is called to process the given deferred procedure call
+// list.
+//
+// If called from KiDispatchInterrupt, we will have been switched to
+// the interrupt stack, the new stack pointer is in r.sp and entry is
+// at ..KiProcessDpcList.alt.
+//
+// If called from the idle loop, we run on the idle loop thread's
+// stack and no special action is needed. However, the idle loop
+// does not pass r.9 as we expect it and only passes r.0 = 0 on MP
+// systems. We take advantage of the separate entry points to set
+// these registers appropriately.
+//
+// N.B. Interrupts must be disabled on entry to this routine. Control
+// is returned to the caller with the same conditions true.
+//
+// Arguments:
+//
+// None.
+//
+// On entry:
+//
+// r.0 - Zero
+// r.9 - Machine State Register prior to disabling interrupts.
+// rPrcb r.29 - address of processor control block.
+// r.31 - address of DPC list head.
+//
+// On exit:
+//
+// r.0 - Zero
+// r.9 - Machine State Register prior to disabling interrupts.
+// rPrcb r.29 - address of processor control block.
+// r.31 - address of DPC list head.
+//
+// Return value:
+//
+// None.
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+dp.lr: .space 4
+dp.toc: .space 4
+
+#if DBG
+
+dp.func:.space 4
+dp.strt:.space 4
+dp.cnt: .space 4
+dp.time:.space 4
+
+#endif
+
+ .align 3 // ensure stack frame length is multile of 8
+dp.framelen:
+
+ .text
+
+#if DBG
+
+# define rDpStart r.22
+# define rDpCount r.23
+# define rDpTime r.24
+
+#endif
+
+
+ SPECIAL_ENTRY_S(KiProcessDpcList,_TEXT$00)
+
+ stwu r.sp, -dp.framelen(r.sp) // buy stack frame
+
+ // see routine description for why we do the following.
+
+ ori r.9, rIntOn, 0 // get MSR interrupts enabled
+
+#if defined(NT_UP)
+ li r.0, 0
+#endif
+
+
+..KiProcessDpcList.alt:
+
+ mflr r.7 // save return address
+
+ // save regs we will use,... don't need to save 29 and 31 as they
+ // were saved by our caller and currently contain the values we want.
+
+ stw r.toc, dp.toc(r.sp)
+
+#if DBG
+
+ stw rDpTime, dp.time(r.sp)
+ stw rDpCount, dp.cnt(r.sp)
+ stw rDpStart, dp.strt(r.sp)
+
+#endif
+
+ stw r.7, dp.lr(r.sp) // save Link Register
+
+ PROLOGUE_END(KiProcessDpcList)
+
+DpcCallRestart:
+
+ stw r.sp, PbDpcRoutineActive(rPrcb) // set DPC routine active
+
+//
+// Process the DPC list.
+//
+
+DpcCall:
+
+#if !defined(NT_UP)
+
+ addi r.7, rPrcb, PbDpcLock // compute DPC lock address
+
+ ACQUIRE_SPIN_LOCK(r.7, r.7, r.0, spinlk2, spinlk2spin)
+
+#endif
+
+ lwz r.3, LsFlink(r.31) // get address of first entry
+ lwz r.12, LsFlink(r.3) // get addr of next entry
+ cmpw r.3, r.31 // is list empty?
+ subi r.3, r.3, DpDpcListEntry // subtract DpcListEntry offset
+ beq- UnlkDpc0 // if yes, release the lock.
+
+//
+// Get deferred routine address, this is done early as what
+// we actually have is a function descriptor's address and we
+// need to get the entry point address.
+//
+
+ lwz r.11, DpDeferredRoutine(r.3)
+ lwz r.8, PbDpcQueueDepth(rPrcb) // get DPC queue depth
+
+//
+// remove entry from list
+//
+
+ stw r.12, LsFlink(r.31) // set addr of next in header
+ stw r.31, LsBlink(r.12) // set addr of previous in next
+
+ lwz r.10, 0(r.11) // get DPC code address
+
+//
+// entry removed, set up arguments for DPC proc
+//
+// args are-
+// dpc object address (r.3)
+// deferred context (r.4)
+// system argument 1 (r.5)
+// system argument 2 (r.6)
+//
+// note, the arguments must be loaded from the DPC object BEFORE
+// the inserted flag is cleared to prevent the object being
+// overwritten before its time.
+//
+
+ lwz r.4, DpDeferredContext(r.3)
+ lwz r.5, DpSystemArgument1(r.3)
+ lwz r.6, DpSystemArgument2(r.3)
+
+ subi r.8, r.8, 1 // decrement DPC queue depth
+ stw r.8, PbDpcQueueDepth(rPrcb) //
+ stw r.0, DpLock(r.3) // clear DPC inserted state
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.7, r.0)
+#endif
+
+ mtctr r.10 // ready address for branch
+
+ ENABLE_INTERRUPTS(r.9)
+
+ lwz r.toc, 4(r.11) // get DPC toc pointer
+
+#if DBG
+ lwz rDpStart, KiPcr2 + Pc2TickCountLow(r.0) // get current time
+ lwz rDpCount, PbInterruptCount(rPrcb)// get current interrupt count
+ lwz rDpTime, PbInterruptTime(rPrcb) // get current interrupt time
+ stw r.10, dp.func(r.sp)
+#endif
+
+ bctrl // call DPC routine
+
+ li r.0, 0 // reset zero constant
+
+#if DBG
+ lbz r.10, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ cmplwi r.10, DISPATCH_LEVEL // check if < DISPATCH_LEVEL
+ blt DpcBadIrql // jif IRQL < DISPATCH_LEVEL
+
+DpcIrqlOk:
+
+ lwz r.12, KiPcr2 + Pc2TickCountLow(r.0) // calculate time spent in
+ sub r.12, r.12, rDpStart // r.12 = time
+ cmpwi r.12, 100
+ bge DpcTookTooLong // jif >= 1 second
+DpcTimeOk:
+#endif
+
+//
+// Check to determine if any more DPCs are available to process.
+//
+
+ DISABLE_INTERRUPTS(r.9, r.10)
+
+ lwz r.3, LsFlink(r.31) // get address of first entry
+ cmpw r.3, r.31 // is list empty?
+ bne- DpcCall // if no, process it
+
+//
+// Clear DpcRoutineActive, then check one last time that the DPC queue is
+// empty. This is required to close a race condition with the DPC queueing
+// code where it appears that a DPC routine is active (and thus an
+// interrupt is not requested), but this code has decided that the queue
+// is empty and is clearing DpcRoutineActive.
+//
+
+ stw r.0, PbDpcRoutineActive(rPrcb)
+ stw r.0, PbDpcInterruptRequested(rPrcb) // clear DPC interrupt requested
+ eieio // force writes out
+
+ lwz r.3, LsFlink(r.31)
+ cmpw r.3, r.31
+ bne- DpcCallRestart
+
+DpcDone:
+
+//
+// List is empty, restore non-volatile registers we have used.
+//
+
+ lwz r.10, dp.lr(r.sp) // get link register
+
+#if DBG
+ lwz rDpTime, dp.time(r.sp)
+ lwz rDpCount, dp.cnt(r.sp)
+ lwz rDpStart, dp.strt(r.sp)
+#endif
+
+
+//
+// Return to caller.
+//
+
+ lwz r.toc, dp.toc(r.sp) // restore kernel toc
+ mtlr r.10 // set return address
+ lwz r.sp, 0(r.sp) // release stack frame
+
+ blr // return
+
+UnlkDpc0:
+
+//
+// The DPC list became empty while we were acquiring the DPC queue lock.
+// Clear DPC routine active. The race condition mentioned above doesn't
+// exist here because we hold the DPC queue lock.
+//
+
+ stw r.0, PbDpcRoutineActive(rPrcb)
+
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r.7, r.0)
+#endif
+
+ b DpcDone
+
+//
+// DpcTookTooLong, DpcBadIrql
+//
+// Come here is it took >= 1 second to execute a DPC routine. This is way
+// too long, assume something is wrong and breakpoint.
+//
+// This code is out of line to avoid wasting cache space for something that
+// (hopefully) never happens.
+//
+
+#if DBG
+
+DpcTookTooLong:
+ lwz r.toc, dp.toc(r.sp) // restore kernel's TOC
+ lwz r.11, PbInterruptCount(rPrcb) // get current interrupt count
+ lwz r.10, PbInterruptTime(rPrcb) // get current interrupt time
+ lwz r.toc, dp.toc(r.sp) // restore our toc
+ sub r.11, r.11, rDpCount // compute number of interrupts
+ sub r.10, r.10, rDpTime // compute interrupt time
+ bl ..DbgBreakPoint // execute debug breakpoint
+ b DpcTimeOk // continue
+
+DpcBadIrql:
+ lwz r.toc, dp.toc(r.sp) // restore kernel's TOC
+ bl ..DbgBreakPoint // breakpoint
+ b DpcIrqlOk // continue
+
+#endif
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r.7, r.11, spinlk2, spinlk2spin)
+#endif
+
+ DUMMY_EXIT(KiProcessDpcList)
+
+ SBTTL("Dispatch Interrupt")
+
+#define rDPCHEAD r.31
+
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of a software interrupt generated
+// at DISPATCH_LEVEL. Its function is to process the Deferred Procedure Call
+// (DPC) list, and then perform a context switch if a new thread has been
+// selected for execution on the processor.
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database unlocked. When a return to the caller finally occurs, the
+// IRQL remains at DISPATCH_LEVEL, and the dispatcher database is still
+// unlocked.
+//
+// Arguments:
+//
+// None.
+//
+// Outputs:
+// ( for call to KiProcessDpcList )
+// r.3 - address of first dpc in list
+// r.0 - Zero
+// r.9 - Machine State Register prior to disabling interrupts.
+// rPrcb r.29 - address of processor control block.
+// rDPCHEAD r.31 - address of DPC listhead.
+// r.9 Machine State Register prior to disabling interrupts.
+//
+// ( for call to KiDispIntSwapContext )
+//
+// r.28 pointer to Dispatcher Database Lock
+// r.29 rPrcb pointer to the processor control block
+// r.31 NTH pointer to new thread
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+di.lr: .space 4
+di.28: .space 4
+di.29: .space 4
+di.30: .space 4
+di.31: .space 4
+
+ .align 3 // ensure stack frame length is multile of 8
+di.framelen:
+
+ SPECIAL_ENTRY_S(KiDispatchInterrupt,_TEXT$00)
+
+ mflr r.0
+ stwu r.sp, -di.framelen(r.sp)
+ stw r.29, di.29(r.sp)
+ lwz rPrcb,KiPcr+PcPrcb(r.0)
+ stw r.30, di.30(r.sp)
+ stw r.31, di.31(r.sp)
+ stw r.0, di.lr(r.sp)
+ stw r.28, di.28(r.sp)
+
+ PROLOGUE_END(KiDispatchInterrupt)
+
+//
+// Setup commonly used constants
+//
+
+ lwz r.3, PbDpcBypassCount(rPrcb) // get DPC bypass count
+ li r.0, 0 // zero
+ addi rDPCHEAD, rPrcb, PbDpcListHead // compute DPC listhead address
+ addi r.3, r.3, 1 // increment DPC bypass count
+ stw r.3, PbDpcBypassCount(rPrcb) // store new DPC bypass count
+
+//
+// Process the deferred procedure call list.
+//
+
+PollDpcList:
+
+ DISABLE_INTERRUPTS(r.9, r.8)
+ lwz r.3, LsFlink(rDPCHEAD) // get address of first entry
+ stb r.0, KiPcr+PcDispatchInterrupt(r.0)
+ cmpw r.3, rDPCHEAD // list has entries?
+ beq- di.empty // jif list is empty
+
+//
+// Switch to the interrupt stack
+//
+
+ lwz r.6, KiPcr+PcInterruptStack(r.0)// get addr of interrupt stack
+ lwz r.28, KiPcr+PcInitialStack(r.0) // get current stack base
+ lwz r.30, KiPcr+PcStackLimit(r.0) // get current stack limit
+ subi r.4, r.6, KERNEL_STACK_SIZE // compute stack limit
+ stw r.sp,KiPcr+PcOnInterruptStack(r.0)// flag ON interrupt stack
+ stw r.sp, -dp.framelen(r.6) // save new back pointer
+
+//
+// N.B. Can't step thru the next two instructions.
+//
+
+ stw r.4, KiPcr+PcStackLimit(r.0) // set stack limit
+ stw r.6, KiPcr+PcInitialStack(r.0) // set current base to int stk
+ subi r.sp, r.6, dp.framelen // calc new sp
+
+ bl ..KiProcessDpcList.alt // process all DPCs for this
+ // processor.
+
+//
+// N.B. KiProcessDpcList left r.0, r.9 intact.
+//
+// Return from KiProcessDpcList switched back to the proper stack,
+// update PCR to reflect this.
+//
+
+ stw r.30, KiPcr+PcStackLimit(r.0) // restore stack limit
+ stw r.28, KiPcr+PcInitialStack(r.0) // set old stack current
+ stw r.0, KiPcr+PcOnInterruptStack(r.0)// clear ON interrupt stack
+
+di.empty:
+
+ ENABLE_INTERRUPTS(r.9)
+
+//
+// Check to determine if quantum end has occurred.
+//
+// N.B. If a new thread is selected as a result of processing a quantum
+// end request, then the new thread is returned with the dispatcher
+// database locked. Otherwise, NULL is returned with the dispatcher
+// database unlocked.
+//
+ lwz r.3, KiPcr+PcQuantumEnd(r.0) // get quantum end indicator
+ cmpwi r.3, 0 // if 0, no quantum end request
+ beq di.CheckForNewThread
+ stw r.0, KiPcr+PcQuantumEnd(r.0) // clear quantum end indicator
+
+ bl ..KiQuantumEnd // process quantum end
+ cmpwi r.3, 0 // new thread selected?
+ li r.0, 0 // reset r.0 to zero
+//
+// If KiQuantumEnd returned no new thread to run, the dispatcher
+// database is unlocked, get out.
+//
+
+ beq+ di.exit
+
+#if !defined(NT_UP)
+
+//
+// Even though the dispatcher database is already locked, we are expected
+// to pass the address of the lock in r.28.
+//
+
+ lwz r.28, [toc]KiDispatcherLock(r.toc)// get &KiDispatcherLock
+
+#endif
+
+ b di.Switch // switch to new thread
+
+//
+// Check to determine if a new thread has been selected for execution on
+// this processor.
+//
+
+di.CheckForNewThread:
+ lwz r.3, PbNextThread(rPrcb) // get address of next thread
+ cmpwi r.3, 0 // is there a new thread?
+ beq di.exit // no, branch.
+
+#if !defined(NT_UP)
+
+ lwz r.28, [toc]KiDispatcherLock(r.toc)// get &KiDispatcherLock
+
+
+//
+// Lock dispatcher database and reread address of next thread object since it
+// is possible for it to change in a multiprocessor system. (leave address
+// of lock in r.28).
+//
+
+ TRY_TO_ACQUIRE_SPIN_LOCK(r.28, r.28, r.11, di.spinlk, PollDpcList)
+
+#endif
+
+di.Switch:
+ lwz r.31, PbNextThread(rPrcb) // get thread address (again)
+ stw r.0, PbNextThread(rPrcb) // clear addr of next thread obj
+
+//
+// Reready current thread for execution and swap context to the
+// selected thread. We do this indirectly thru KiDispIntSwapContext
+// to avoid saving and restoring so many registers for the cases
+// when KiDispatchInterrupt does not thread switch.
+//
+
+ bl ..KiDispIntSwapContext // swap to new thread
+
+di.exit:
+
+ lwz r.0, di.lr(r.sp)
+ lwz r.31, di.31(r.sp)
+ lwz r.30, di.30(r.sp)
+ mtlr r.0
+ lwz r.29, di.29(r.sp)
+ lwz r.28, di.28(r.sp)
+ addi r.sp, r.sp, di.framelen
+
+ SPECIAL_EXIT(KiDispatchInterrupt)
+
+//++
+//
+// VOID
+// KiDispIntSwapContext (
+// IN PKTHREAD Thread
+// )
+//
+// Routine Description:
+//
+// This routine is called to perform a context switch to the specified
+// thread. The current (new previous) thread is re-readied for execution.
+//
+// Since this routine is called as subroutine all volatile registers are
+// considered free.
+//
+// Our caller has saved and will restore gprs 28 thru 31 and does not
+// care if we trash them.
+//
+// This routine is entered at IRQL DISPATCH_LEVEL with the dispatcher
+// database locked. When a return to the caller finally occurs, the
+// dispatcher database is unlocked.
+//
+// Arguments:
+//
+// r.28 pointer to Dispatcher Database Lock
+// r.29 rPrcb pointer to the processor control block
+// r.31 NTH pointer to new thread
+//
+// Outputs: ( for call to SwapContext )
+//
+// r.27 pointer to KiContextSwapLock
+// r.28 pointer to Dispatcher Database Lock
+// r.29 rPrcb pointer to processor control block
+// r.30 OTH pointer to old thread
+// r.31 NTH pointer to new thread
+//
+// Return Value:
+//
+// Wait completion status (r.3).
+//
+//--
+
+ .struct 0
+ .space swFrameLength
+kdiscLR:.space 4
+ .align 3 // ensure 8 byte alignment
+kdiscFrameLength:
+
+ .align 6 // cache line alignment
+
+ SPECIAL_ENTRY_S(KiDispIntSwapContext,_TEXT$00)
+
+ mflr r.0 // get return address
+ lwz r.30, KiPcr+PcCurrentThread(r.0) // get current (old) thread
+ stwu r.sp, -kdiscFrameLength(r.sp) // buy stack frame
+ stw r.14, swFrame + ExGpr14(r.sp) // save gpr 14
+ stw r.26, swFrame + ExGpr26(r.sp) // save gprs 26 and 27
+ stw r.27, swFrame + ExGpr27(r.sp) //
+
+//
+// Gprs 28, 29, 30 and 31 saved/restored by KiDispatchInterrupt
+//
+
+ stw r.0, kdiscLR(r.sp) // save return address
+
+ PROLOGUE_END(KiDispIntSwapContext)
+
+ stw r.31, PbCurrentThread(rPrcb) // set new thread current
+
+//
+// Reready current thread and swap context to the selected thread.
+//
+
+ ori r.3, r.30, 0
+
+#if !defined(NT_UP)
+
+ lwz r.27, [toc]KiContextSwapLock(r.2)
+
+#endif
+
+ bl ..KiReadyThread // reready current thread
+ bl ..SwapContext // switch threads
+
+//
+// Restore registers and return.
+//
+
+ lwz r.0, kdiscLR(r.sp) // restore return address
+ lwz r.26, swFrame + ExGpr26(r.sp) // restore gpr 26 and 27
+ mtlr r.0 // set return address
+ lwz r.27, swFrame + ExGpr27(r.sp) //
+
+//
+// Gprs 28, 29, 30 and 31 saved/restored by KiDispatchInterrupt
+//
+
+ lwz r.14, swFrame + ExGpr14(r.sp) // restore gpr 14
+ addi r.sp, r.sp, kdiscFrameLength // return stack frame
+
+ SPECIAL_EXIT(KiDispIntSwapContext)
+
+//++
+//
+// VOID
+// KiRequestSoftwareInterrupt (KIRQL RequestIrql)
+//
+// Routine Description:
+//
+// This function requests a software interrupt at the specified IRQL
+// level.
+//
+// Arguments:
+//
+// RequestIrql (r.3) - Supplies the request IRQL value.
+// Allowable values are APC_LEVEL (1)
+// DPC_LEVEL (2)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRequestSoftwareInterrupt)
+
+ lbz r.6, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ rlwinm r.4, r.3, 31, 0x1 // transform 1 or 2 to 0 or 1
+ li r.5, 1 // non-zero value
+ cmpw r.6, r.3 // is current IRQL < requested IRQL?
+ stb r.5, KiPcr+PcSoftwareInterrupt(r.4) // set interrupt pending
+ blt ..KiDispatchSoftwareInterrupt // jump to dispatch interrupt if
+ // current IRQL low enough (note
+ // that this is a jump, not a call)
+
+ LEAF_EXIT(KiRequestSoftwareInterrupt)
diff --git a/private/ntos/ke/ppc/dmpstate.c b/private/ntos/ke/ppc/dmpstate.c
new file mode 100644
index 000000000..335d5078a
--- /dev/null
+++ b/private/ntos/ke/ppc/dmpstate.c
@@ -0,0 +1,797 @@
+/*++
+
+Copyright (c) 1993 IBM and Microsoft Corporation
+
+Module Name:
+
+ dmpstate.c
+
+Abstract:
+
+ This module implements the architecture specific routine that dumps
+ the machine state when a bug check occurs and no debugger is hooked
+ to the system. It is assumed that it is called from bug check.
+
+Author:
+
+ Chuck Bauman 19-Sep-1993
+
+Environment:
+
+ Kernel mode.
+
+Revision History:
+
+ Based on Dave Cutler's MIPS implemenation
+
+ Tom Wood (twood) 19-Aug-1994
+ Update to use RtlVirtualUnwind even when there isn't a function table
+ entry. Add stack limit parameters to RtlVirtualUnwind.
+
+ Changed KiLookupFunctionEntry to deal with the indirect entries.
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototypes.
+//
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ );
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ );
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ );
+
+//
+// Define external data.
+//
+
+extern LIST_ENTRY PsLoadedModuleList;
+
+VOID
+KeDumpMachineState (
+ IN PKPROCESSOR_STATE ProcessorState,
+ IN PCHAR Buffer,
+ IN PULONG BugCheckParameters,
+ IN ULONG NumberOfParameters,
+ IN PKE_BUGCHECK_UNICODE_TO_ANSI UnicodeToAnsiRoutine
+ )
+
+/*++
+
+Routine Description:
+
+ This function formats and displays the machine state at the time of the
+ to bug check.
+
+Arguments:
+
+ ProcessorState - Supplies a pointer to a processor state record.
+
+ Buffer - Supplies a pointer to a buffer to be used to output machine
+ state information.
+
+ BugCheckParameters - Supplies a pointer to an array of additional
+ bug check information.
+
+ NumberOfParameters - Suppiles the size of the bug check parameters
+ array.
+
+ UnicodeToAnsiRoutine - Supplies a pointer to a routine to convert Unicode strings
+ to Ansi strings without touching paged translation tables.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PCONTEXT ContextRecord;
+ ULONG ControlPc;
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ ULONG DisplayColumn;
+ ULONG DisplayHeight;
+ ULONG DisplayRow;
+ ULONG DisplayWidth;
+ UNICODE_STRING DllName;
+ ULONG EstablisherFrame;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PVOID ImageBase;
+ ULONG Index;
+ BOOLEAN InFunction;
+ ULONG LastStack;
+ PLIST_ENTRY ModuleListHead;
+ PLIST_ENTRY NextEntry;
+ ULONG NextPc;
+ ULONG StackLimit;
+ UCHAR AnsiBuffer[ 32 ];
+ ULONG DateStamp;
+
+ //
+ // Call the HAL to force all external interrupts to be disabled
+ // at the interrupt controller. PowerPC optimization does not
+ // do this when raising to high level.
+ //
+ for (Index = 0; Index < MAXIMUM_VECTOR; Index++) {
+ HalDisableSystemInterrupt(Index, HIGH_LEVEL);
+ }
+
+ //
+ // Query display parameters.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ //
+ // Display any addresses that fall within the range of any module in
+ // the loaded module list.
+ //
+
+ for (Index = 0; Index < NumberOfParameters; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)*BugCheckParameters,
+ &ImageBase,
+ &DataTableEntry);
+
+ if (ImageBase != NULL) {
+ sprintf(Buffer,
+ "*** %08lX has base at %08lX - %s\n",
+ *BugCheckParameters,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+ }
+
+ BugCheckParameters += 1;
+ }
+
+ //
+ // Virtually unwind to the caller of bug check.
+ //
+
+ ContextRecord = &ProcessorState->ContextFrame;
+ LastStack = ContextRecord->Gpr1;
+ ControlPc = ContextRecord->Lr - 4;
+ NextPc = ControlPc;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if (FunctionEntry != NULL) {
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL,
+ 0,
+ 0xffffffff);
+ }
+
+ //
+ // At this point the context record contains the machine state at the
+ // call to bug check.
+ //
+ // Put out the machine state at the time of the bugcheck.
+ //
+
+ sprintf(Buffer,
+ "\n Machine State at Call to Bug Check IAR:%08lX MSR:%08lX\n",
+ ContextRecord->Lr,
+ ContextRecord->Msr);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Format and output the integer registers.
+ //
+
+ sprintf(Buffer,
+ " R0:%8lX R1:%8lX R2:%8lX R3:%8lX R4:%8lX R5:%8lX\n",
+ ContextRecord->Gpr0,
+ ContextRecord->Gpr1,
+ ContextRecord->Gpr2,
+ ContextRecord->Gpr3,
+ ContextRecord->Gpr4,
+ ContextRecord->Gpr5);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ " R6:%8lX R7:%8lX R8:%8lX R9:%8lX R10:%8lX R11:%8lX\n",
+ ContextRecord->Gpr6,
+ ContextRecord->Gpr7,
+ ContextRecord->Gpr8,
+ ContextRecord->Gpr9,
+ ContextRecord->Gpr10,
+ ContextRecord->Gpr11);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "R12:%8lX R13:%8lX R14:%8lX R15:%8lX R16:%8lX R17:%8lX\n",
+ ContextRecord->Gpr12,
+ ContextRecord->Gpr13,
+ ContextRecord->Gpr14,
+ ContextRecord->Gpr15,
+ ContextRecord->Gpr16,
+ ContextRecord->Gpr17);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "R18:%8lX R19:%8lX R20:%8lX R21:%8lX R22:%8lX R23:%8lX\n",
+ ContextRecord->Gpr18,
+ ContextRecord->Gpr19,
+ ContextRecord->Gpr20,
+ ContextRecord->Gpr21,
+ ContextRecord->Gpr22,
+ ContextRecord->Gpr23);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "R24:%8lX R25:%8lX R26:%8lX R27:%8lX R28:%8lX R29:%8lX\n",
+ ContextRecord->Gpr24,
+ ContextRecord->Gpr25,
+ ContextRecord->Gpr26,
+ ContextRecord->Gpr27,
+ ContextRecord->Gpr28,
+ ContextRecord->Gpr29);
+
+ HalDisplayString(Buffer);
+
+ sprintf(Buffer,
+ "R30:%8lX R31:%8lX CR:%8lX CTR:%8lX XER:%8lX\n",
+ ContextRecord->Gpr30,
+ ContextRecord->Gpr31,
+ ContextRecord->Cr,
+ ContextRecord->Ctr,
+ ContextRecord->Xer);
+
+ HalDisplayString(Buffer);
+
+#if 0
+
+ //
+ // I'd much rather see a longer stack trace and skip the floating
+ // point stuff when the system crashes. plj
+ //
+
+ //
+ // Format and output the floating registers.
+ //
+ DumpFloat = (PULONG)(&ContextRecord->Fpr0);
+ sprintf(Buffer,
+ " F0- F3:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr4);
+ sprintf(Buffer,
+ " F4- F7:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr8);
+ sprintf(Buffer,
+ " F8-F11:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr12);
+ sprintf(Buffer,
+ "F12-F15:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr16);
+ sprintf(Buffer,
+ "F16-F19:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr20);
+ sprintf(Buffer,
+ "F20-F23:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr24);
+ sprintf(Buffer,
+ "F24-F27:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpr28);
+ sprintf(Buffer,
+ "F28-F31:%08lX%08lX %08lX%08lX %08lX%08lX %08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat,
+ *(DumpFloat+3),
+ *(DumpFloat+2),
+ *(DumpFloat+5),
+ *(DumpFloat+4),
+ *(DumpFloat+7),
+ *(DumpFloat+6));
+
+ HalDisplayString(Buffer);
+
+
+ DumpFloat = (PULONG)(&ContextRecord->Fpscr);
+ sprintf(Buffer,
+ " FPSCR:%08lX%08lX\n",
+ *(DumpFloat+1),
+ *DumpFloat);
+
+ HalDisplayString(Buffer);
+
+#define STAKWALK 4
+#else
+#define STAKWALK 8
+#endif
+
+ //
+ // Output short stack back trace with base address.
+ //
+
+ DllName.Length = 0;
+ DllName.Buffer = L"";
+ if (FunctionEntry != NULL) {
+ StackLimit = (ULONG)KeGetCurrentThread()->KernelStack;
+ HalDisplayString("Callee-Sp Return-Ra Dll Base - Name\n");
+ for (Index = 0; Index < STAKWALK; Index += 1) {
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ sprintf(Buffer,
+ " %08lX %08lX : %08lX - %s\n",
+ ContextRecord->Gpr1,
+ NextPc + 4,
+ ImageBase,
+ (*UnicodeToAnsiRoutine)( (ImageBase != NULL) ? &DataTableEntry->BaseDllName : &DllName,
+ AnsiBuffer, sizeof( AnsiBuffer )));
+
+ HalDisplayString(Buffer);
+
+ if ((NextPc != ControlPc) || (ContextRecord->Gpr1 != LastStack)) {
+ ControlPc = NextPc;
+ LastStack = ContextRecord->Gpr1;
+ FunctionEntry = KiLookupFunctionEntry(ControlPc);
+ if ((FunctionEntry != NULL) && (LastStack < StackLimit)) {
+ NextPc = RtlVirtualUnwind(ControlPc,
+ FunctionEntry,
+ ContextRecord,
+ &InFunction,
+ &EstablisherFrame,
+ NULL,
+ 0,
+ 0xffffffff);
+ } else {
+ NextPc = ContextRecord->Lr;
+ }
+
+ } else {
+ break;
+ }
+ }
+ }
+
+ //
+ // Output the build number and other useful information.
+ //
+
+ sprintf(Buffer,
+ "\nIRQL : %d, DPC Active : %s, SYSVER 0x%08x\n",
+ KeGetCurrentIrql(),
+ KeIsExecutingDpc() ? "TRUE" : "FALSE",
+ NtBuildNumber);
+
+ HalDisplayString(Buffer);
+
+ //
+ // Output the processor id and the primary cache sizes.
+ //
+
+ sprintf(Buffer,
+ "Processor Id: %d.%d, Icache: %d, Dcache: %d",
+ PCR->ProcessorVersion,
+ PCR->ProcessorRevision,
+ PCR->FirstLevelIcacheSize,
+ PCR->FirstLevelDcacheSize);
+
+ HalDisplayString(Buffer);
+
+ //
+ // If the display width is greater than 80 + 24 (the size of a DLL
+ // name and base address), then display all the modules loaded in
+ // the system.
+ //
+
+ HalQueryDisplayParameters(&DisplayWidth,
+ &DisplayHeight,
+ &DisplayColumn,
+ &DisplayRow);
+
+ if (DisplayWidth > (80 + 24)) {
+ HalDisplayString("\n");
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ //
+ // Output display headers.
+ //
+
+ Index = 1;
+ KiDisplayString(80, Index, "Dll Base DateStmp - Name");
+ NextEntry = ModuleListHead->Flink;
+ if (NextEntry != NULL) {
+
+ //
+ // Scan the list of loaded modules and display their base
+ // address and name.
+ //
+
+ while (NextEntry != ModuleListHead) {
+ Index += 1;
+ DataTableEntry = CONTAINING_RECORD(NextEntry,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ if (MmDbgReadCheck(DataTableEntry->DllBase) != NULL) {
+ PIMAGE_NT_HEADERS NtHeaders;
+
+ NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase);
+ DateStamp = NtHeaders->FileHeader.TimeDateStamp;
+
+ } else {
+ DateStamp = 0;
+ }
+ sprintf(Buffer,
+ "%08lX %08lx - %s",
+ DataTableEntry->DllBase,
+ DateStamp,
+ (*UnicodeToAnsiRoutine)( &DataTableEntry->BaseDllName, AnsiBuffer, sizeof( AnsiBuffer )));
+
+ KiDisplayString(80, Index, Buffer);
+ NextEntry = NextEntry->Flink;
+ if (Index > DisplayHeight) {
+ break;
+ }
+ }
+ }
+ }
+
+ //
+ // Reset the current display position.
+ //
+
+ HalSetDisplayParameters(DisplayColumn, DisplayRow);
+
+ //
+ // The system has crashed, if we are running without the Kernel
+ // debugger attached, attach it now.
+ //
+
+ KdInitSystem(NULL, FALSE);
+
+ return;
+}
+
+VOID
+KiDisplayString (
+ IN ULONG Column,
+ IN ULONG Row,
+ IN PCHAR Buffer
+ )
+
+/*++
+
+Routine Description:
+
+ This function display a string starting at the specified column and row
+ position on the screen.
+
+Arguments:
+
+ Column - Supplies the starting column of where the string is displayed.
+
+ Row - Supplies the starting row of where the string is displayed.
+
+ Bufer - Supplies a pointer to the string that is displayed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Position the cursor and display the string.
+ //
+
+ HalSetDisplayParameters(Column, Row);
+ HalDisplayString(Buffer);
+ return;
+}
+
+PRUNTIME_FUNCTION
+KiLookupFunctionEntry (
+ IN ULONG ControlPc
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches the currently active function tables for an entry
+ that corresponds to the specified PC value.
+
+Arguments:
+
+ ControlPc - Supplies the address of an instruction within the specified
+ function.
+
+Return Value:
+
+ If there is no entry in the function table for the specified PC, then
+ NULL is returned. Otherwise, the address of the function table entry
+ that corresponds to the specified PC is returned.
+
+--*/
+
+{
+
+ PLDR_DATA_TABLE_ENTRY DataTableEntry;
+ PRUNTIME_FUNCTION FunctionEntry;
+ PRUNTIME_FUNCTION FunctionTable;
+ ULONG SizeOfExceptionTable;
+ LONG High;
+ PVOID ImageBase;
+ LONG Low;
+ LONG Middle;
+
+ //
+ // Search for the image that includes the specified PC value.
+ //
+
+ ImageBase = KiPcToFileHeader((PVOID)ControlPc,
+ &ImageBase,
+ &DataTableEntry);
+
+ //
+ // If an image is found that includes the specified PC, then locate the
+ // function table for the image.
+ //
+
+ if (ImageBase != NULL) {
+ FunctionTable = (PRUNTIME_FUNCTION)RtlImageDirectoryEntryToData(
+ ImageBase, TRUE, IMAGE_DIRECTORY_ENTRY_EXCEPTION,
+ &SizeOfExceptionTable);
+
+ //
+ // If a function table is located, then search the function table
+ // for a function table entry for the specified PC.
+ //
+
+ if (FunctionTable != NULL) {
+
+ //
+ // Initialize search indicies.
+ //
+
+ Low = 0;
+ High = (SizeOfExceptionTable / sizeof(RUNTIME_FUNCTION)) - 1;
+
+ //
+ // Perform binary search on the function table for a function table
+ // entry that subsumes the specified PC.
+ //
+
+ while (High >= Low) {
+
+ //
+ // Compute next probe index and test entry. If the specified PC
+ // is greater than of equal to the beginning address and less
+ // than the ending address of the function table entry, then
+ // return the address of the function table entry. Otherwise,
+ // continue the search.
+ //
+
+ Middle = (Low + High) >> 1;
+ FunctionEntry = &FunctionTable[Middle];
+ if (ControlPc < FunctionEntry->BeginAddress) {
+ High = Middle - 1;
+
+ } else if (ControlPc >= FunctionEntry->EndAddress) {
+ Low = Middle + 1;
+
+ } else {
+
+ //
+ // The capability exists for more than one function entry
+ // to map to the same function. This permits a function to
+ // have (within reason) discontiguous code segment(s). If
+ // PrologEndAddress is out of range, it is re-interpreted
+ // as a pointer to the primary function table entry for
+ // that function. The out of range test takes into account
+ // the redundant encoding of millicode and glue code.
+ //
+
+ if (((FunctionEntry->PrologEndAddress < FunctionEntry->BeginAddress) ||
+ (FunctionEntry->PrologEndAddress >= FunctionEntry->EndAddress)) &&
+ (FunctionEntry->PrologEndAddress & 3) == 0) {
+ FunctionEntry = (PRUNTIME_FUNCTION)FunctionEntry->PrologEndAddress;
+ }
+
+ return FunctionEntry;
+ }
+ }
+ }
+ }
+
+ //
+ // A function table entry for the specified PC was not found.
+ //
+
+ return NULL;
+}
+
+PVOID
+KiPcToFileHeader(
+ IN PVOID PcValue,
+ OUT PVOID *BaseOfImage,
+ OUT PLDR_DATA_TABLE_ENTRY *DataTableEntry
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base of an image that contains the
+ specified PcValue. An image contains the PcValue if the PcValue
+ is within the ImageBase, and the ImageBase plus the size of the
+ virtual image.
+
+Arguments:
+
+ PcValue - Supplies a PcValue.
+
+ BaseOfImage - Returns the base address for the image containing the
+ PcValue. This value must be added to any relative addresses in
+ the headers to locate portions of the image.
+
+ DataTableEntry - Suppies a pointer to a variable that receives the
+ address of the data table entry that describes the image.
+
+Return Value:
+
+ NULL - No image was found that contains the PcValue.
+
+ NON-NULL - Returns the base address of the image that contain the
+ PcValue.
+
+--*/
+
+{
+
+ PLIST_ENTRY ModuleListHead;
+ PLDR_DATA_TABLE_ENTRY Entry;
+ PLIST_ENTRY Next;
+ ULONG Bounds;
+ PVOID ReturnBase, Base;
+
+ //
+ // If the module list has been initialized, then scan the list to
+ // locate the appropriate entry.
+ //
+
+ if (KeLoaderBlock != NULL) {
+ ModuleListHead = &KeLoaderBlock->LoadOrderListHead;
+
+ } else {
+ ModuleListHead = &PsLoadedModuleList;
+ }
+
+ ReturnBase = NULL;
+ Next = ModuleListHead->Flink;
+ if (Next != NULL) {
+ while (Next != ModuleListHead) {
+ Entry = CONTAINING_RECORD(Next,
+ LDR_DATA_TABLE_ENTRY,
+ InLoadOrderLinks);
+
+ Next = Next->Flink;
+ Base = Entry->DllBase;
+ Bounds = (ULONG)Base + Entry->SizeOfImage;
+ if ((ULONG)PcValue >= (ULONG)Base && (ULONG)PcValue < Bounds) {
+ *DataTableEntry = Entry;
+ ReturnBase = Base;
+ break;
+ }
+ }
+ }
+
+ *BaseOfImage = ReturnBase;
+ return ReturnBase;
+}
diff --git a/private/ntos/ke/ppc/exceptn.c b/private/ntos/ke/ppc/exceptn.c
new file mode 100644
index 000000000..a2ec6d81a
--- /dev/null
+++ b/private/ntos/ke/ppc/exceptn.c
@@ -0,0 +1,961 @@
+/*++
+
+Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+
+Module Name:
+
+ exceptn.c
+
+Abstract:
+
+ This module implements the code necessary to dispatch expections to the
+ proper mode and invoke the exception dispatcher.
+
+Author:
+
+ Rick Simpson 2-Aug-1993
+ Adapted from MIPS version by David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+#define _KXPPC_C_HEADER_
+#include "kxppc.h"
+
+BOOLEAN
+KiEmulateDcbz (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PKTRAP_FRAME TrapFrame
+ );
+
+//
+// Data misalignment exception (auto alignment fixup) control.
+//
+// If KiEnableAlignmentFaultExceptions is false, then no alignment
+// exceptions are raised and all misaligned user and kernel mode data
+// references are emulated.
+//
+// Otherwise if KiEnableAlignmentFaultExceptions is true, then the
+// current thread automatic alignment fixup enable determines whether
+// emulation is attempted in user mode.
+//
+// N.B. This default value may be reset from the Registry during init.
+//
+
+ULONG KiEnableAlignmentFaultExceptions = TRUE;
+
+//
+// Breakpoint is a trap word immediate with a TO field of all ones.
+//
+
+#define BREAK_INST (TRAP_INSTR | TO_BREAKPOINT)
+
+//
+// Define multiply overflow and divide by zero breakpoint instruction values.
+//
+
+#define DIVIDE_BREAKPOINT (TRAP_INSTR | TO_DIVIDE_BY_ZERO)
+#define UDIVIDE_BREAKPOINT (TRAP_INSTR | TO_UNCONDITIONAL_DIVIDE_BY_ZERO)
+
+//
+// Define external kernel breakpoint and breakin breakpoint instructions.
+//
+
+#define KERNEL_BREAKPOINT_INSTRUCTION (BREAK_INSTR | DEBUG_STOP_BREAKPOINT)
+#define KDDEBUG_BREAKPOINT (BREAK_INSTR | BREAKIN_BREAKPOINT)
+
+//
+// Define available hardware breakpoint register mask
+//
+ULONG KiBreakPoints;
+
+VOID
+KeContextFromKframes (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN OUT PCONTEXT ContextFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified trap and exception frames
+ frames into the specified context frame according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame from which volatile context
+ should be copied into the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame from which context
+ should be copied into the context record.
+
+ ContextFrame - Supplies a pointer to the context frame that receives the
+ context copied from the trap and exception frames.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set machine state, instr address, link, count registers
+ //
+
+ ContextFrame->Msr = TrapFrame->Msr;
+ ContextFrame->Iar = TrapFrame->Iar;
+ ContextFrame->Lr = TrapFrame->Lr;
+ ContextFrame->Ctr = TrapFrame->Ctr;
+ }
+
+ //
+ // Set integer register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Volatile integer regs in trap frame are 0..12
+ //
+
+ RtlMoveMemory (&ContextFrame->Gpr0, &TrapFrame->Gpr0,
+ sizeof (ULONG) * 13);
+
+ //
+ // Non-volatile integer regs in exception frame are 13..31
+ //
+
+ RtlMoveMemory (&ContextFrame->Gpr13, &ExceptionFrame->Gpr13,
+ sizeof (ULONG) * 19);
+
+ //
+ // The CR is made up of volatile and non-volatile fields,
+ // but the entire CR is saved in the trap frame
+ //
+
+ ContextFrame->Cr = TrapFrame->Cr;
+
+ //
+ // Fixed Point Exception Register (XER) is part of the
+ // integer state
+ //
+
+ ContextFrame->Xer = TrapFrame->Xer;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Volatile floating point regs in trap frame are 0..13
+ //
+
+ RtlMoveMemory(&ContextFrame->Fpr0, &TrapFrame->Fpr0,
+ sizeof(DOUBLE) * (14));
+
+ //
+ // Non-volatile floating point regs in exception frame are 14..31
+ //
+
+ RtlMoveMemory(&ContextFrame->Fpr14, &ExceptionFrame->Fpr14,
+ sizeof(DOUBLE) * (18));
+
+ //
+ // Set floating point status and control register.
+ //
+
+ ContextFrame->Fpscr = TrapFrame->Fpscr;
+ }
+
+ //
+ // Fetch Dr register contents if requested. Values may be trash.
+ //
+
+ if ((ContextFrame->ContextFlags & CONTEXT_DEBUG_REGISTERS) ==
+ CONTEXT_DEBUG_REGISTERS) {
+
+ ContextFrame->Dr0 = TrapFrame->Dr0;
+ ContextFrame->Dr1 = TrapFrame->Dr1;
+ ContextFrame->Dr2 = TrapFrame->Dr2;
+ ContextFrame->Dr3 = TrapFrame->Dr3;
+ ContextFrame->Dr6 = TrapFrame->Dr6;
+ ContextFrame->Dr6 |= KiBreakPoints;
+ ContextFrame->Dr5 = 0; // Zero initialize unused regs
+ ContextFrame->Dr4 = 0;
+
+ //
+ // If it's a user mode frame, and the thread doesn't have DRs set,
+ // and we just return the trash in the frame, we risk accidentally
+ // making the thread active with trash values on a set. Therefore,
+ // Dr7 must be set to the number of available data address breakpoint
+ // registers if we get a non-active user mode frame.
+ //
+
+ if (((TrapFrame->PreviousMode) != KernelMode) &&
+ (KeGetCurrentThread()->DebugActive)) {
+
+ ContextFrame->Dr7 = TrapFrame->Dr7;
+ } else {
+
+ ContextFrame->Dr7 = 0;
+ }
+ }
+
+ return;
+}
+
+VOID
+KeContextToKframes (
+ IN OUT PKTRAP_FRAME TrapFrame,
+ IN OUT PKEXCEPTION_FRAME ExceptionFrame,
+ IN PCONTEXT ContextFrame,
+ IN ULONG ContextFlags,
+ IN KPROCESSOR_MODE PreviousMode
+ )
+
+/*++
+
+Routine Description:
+
+ This routine moves the selected contents of the specified context frame into
+ the specified trap and exception frames according to the specified context
+ flags.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame that receives the volatile
+ context from the context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame that receives
+ the nonvolatile context from the context record.
+
+ ContextFrame - Supplies a pointer to a context frame that contains the
+ context that is to be copied into the trap and exception frames.
+
+ ContextFlags - Supplies the set of flags that specify which parts of the
+ context frame are to be copied into the trap and exception frames.
+
+ PreviousMode - Supplies the processor mode for which the trap and exception
+ frames are being built.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Set control information if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL) {
+
+ //
+ // Set instruction address, link, count, and machine state registers
+ //
+
+ TrapFrame->Iar = ContextFrame->Iar;
+ TrapFrame->Lr = ContextFrame->Lr;
+ TrapFrame->Ctr = ContextFrame->Ctr;
+ TrapFrame->Msr = SANITIZE_MSR(ContextFrame->Msr, PreviousMode);
+ }
+
+ //
+ // Set integer registers contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER) {
+
+ //
+ // Volatile integer regs are 0..12
+ //
+
+ RtlMoveMemory(&TrapFrame->Gpr0, &ContextFrame->Gpr0,
+ sizeof(ULONG) * (13));
+
+ //
+ // Non-volatile integer regs are 13..31
+ //
+
+ RtlMoveMemory(&ExceptionFrame->Gpr13, &ContextFrame->Gpr13,
+ sizeof(ULONG) * (19));
+
+ //
+ // Copy the Condition Reg and Fixed Point Exception Reg
+ //
+
+ TrapFrame->Cr = ContextFrame->Cr;
+ TrapFrame->Xer = ContextFrame->Xer;
+ }
+
+ //
+ // Set floating register contents if specified.
+ //
+
+ if ((ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT) {
+
+ //
+ // Volatile floating point regs are 0..13
+ //
+
+ RtlMoveMemory(&TrapFrame->Fpr0, &ContextFrame->Fpr0,
+ sizeof(DOUBLE) * (14));
+
+ //
+ // Non-volatile floating point regs are 14..31
+ //
+
+ RtlMoveMemory(&ExceptionFrame->Fpr14, &ContextFrame->Fpr14,
+ sizeof(DOUBLE) * (18));
+
+ //
+ // Set floating point status and control register.
+ //
+
+ TrapFrame->Fpscr = SANITIZE_FPSCR(ContextFrame->Fpscr, PreviousMode);
+ }
+
+ //
+ // Set debug register state if specified. If previous mode is user
+ // mode (i.e. it's a user frame we're setting) and if effect will be to
+ // cause at least one of the debug register enable bits in Dr7
+ // to be set then set DebugActive to the enable bit mask.
+ //
+
+ if ((ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS) {
+
+ //
+ // Set the debug control register for the 601 and 604
+ // indicating the number of address breakpoints supported.
+ //
+
+ TrapFrame->Dr0 = SANITIZE_DRADDR(ContextFrame->Dr0, PreviousMode);
+ TrapFrame->Dr1 = SANITIZE_DRADDR(ContextFrame->Dr1, PreviousMode);
+ TrapFrame->Dr2 = SANITIZE_DRADDR(ContextFrame->Dr2, PreviousMode);
+ TrapFrame->Dr3 = SANITIZE_DRADDR(ContextFrame->Dr3, PreviousMode);
+ TrapFrame->Dr6 = SANITIZE_DR6(ContextFrame->Dr6, PreviousMode);
+ TrapFrame->Dr7 = SANITIZE_DR7(ContextFrame->Dr7, PreviousMode);
+
+ if (PreviousMode != KernelMode) {
+ KeGetPcr()->DebugActive = KeGetCurrentThread()->DebugActive =
+ (UCHAR)(TrapFrame->Dr7 & DR7_ACTIVE);
+ }
+ }
+
+ return;
+}
+
+VOID
+KiDispatchException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN KPROCESSOR_MODE PreviousMode,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to dispatch an exception to the proper mode and
+ to cause the exception dispatcher to be called.
+
+ If the exception is a data misalignment, this is the first chance for
+ handling the exception, and the current thread has enabled automatic
+ alignment fixup, then an attempt is made to emulate the unaligned
+ reference.
+
+ If the exception is a floating exception (N.B. the pseudo status
+ STATUS_FLOAT_STACK_CHECK is used to signify this), we convert the
+ exception code to the correct STATUS based on the FPSCR.
+ It is up to the handler to figure out what to do to emulate/repair
+ the operation.
+
+ If the exception is neither a data misalignment nor a floating point
+ exception and the the previous mode is kernel, then the exception
+ dispatcher is called directly to process the exception. Otherwise the
+ exception record, exception frame, and trap frame contents are copied
+ to the user mode stack. The contents of the exception frame and trap
+ are then modified such that when control is returned, execution will
+ commence in user mode in a routine which will call the exception
+ dispatcher.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ PreviousMode - Supplies the previous processor mode.
+
+ FirstChance - Supplies a boolean variable that specifies whether this
+ is the first (TRUE) or second (FALSE) time that this exception has
+ been processed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ CONTEXT ContextFrame;
+ EXCEPTION_RECORD ExceptionRecord1;
+ LONG Length;
+ BOOLEAN UserApcPending;
+
+ //
+ // If the exception is a data misalignment, this is the first chance for
+ // handling the exception, and the current thread has enabled automatic
+ // alignment fixup, then attempt to emulate the unaligned reference.
+ //
+ // We always emulate dcbz, even if the thread hasn't enabled automatic
+ // alignment fixup. This is because the hardware declares an alignment
+ // fault if dcbz is attempted on noncached memory.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_DATATYPE_MISALIGNMENT) {
+ if (FirstChance != FALSE) {
+
+ //
+ // If alignment fault exceptions are not enabled, then no exception
+ // should be raised and the data reference should be emulated.
+ //
+
+ if ((KiEnableAlignmentFaultExceptions == FALSE) ||
+ (KeGetCurrentThread()->AutoAlignment != FALSE) ||
+ (KeGetCurrentThread()->ApcState.Process->AutoAlignment != FALSE)) {
+ if (KiEmulateReference(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ } else {
+ if (KiEmulateDcbz(ExceptionRecord, ExceptionFrame, TrapFrame) != FALSE) {
+ KeGetCurrentPrcb()->KeAlignmentFixupCount += 1;
+ goto Handled2;
+ }
+ }
+ }
+ }
+
+ //
+ // If the exception is a breakpoint, then translate it to an appropriate
+ // exception code if it is a division by zero or an integer overflow
+ // caused by multiplication.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) {
+
+ ULONG Instr = ExceptionRecord->ExceptionInformation[0];
+
+ if ((Instr & 0xffe0ffff) == DIVIDE_BREAKPOINT ||
+ (Instr & 0xffe0ffff) == UDIVIDE_BREAKPOINT) {
+ ExceptionRecord->ExceptionCode = STATUS_INTEGER_DIVIDE_BY_ZERO;
+ } else if (Instr == KDDEBUG_BREAKPOINT) {
+ TrapFrame->Iar += 4;
+ }
+ }
+
+ //
+ // If the exception is a floating point exception, then the
+ // ExceptionCode was set to STATUS_FLOAT_STACK_CHECK. We now sort
+ // that out and set a more correct STATUS code. We clear the
+ // exception enable bit in the FPSCR of the exception being reported
+ // to eliminate floating point exception recursion.
+ //
+
+ if (ExceptionRecord->ExceptionCode == STATUS_FLOAT_STACK_CHECK) {
+
+ PFPSCR Fpscr = (PFPSCR)(&TrapFrame->Fpscr);
+
+ if ((Fpscr->XE == 1) && (Fpscr->XX == 1)) {
+
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INEXACT_RESULT;
+ Fpscr->XE = 0;
+
+ }
+ else if ((Fpscr->ZE == 1) && (Fpscr->ZX == 1)) {
+
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_DIVIDE_BY_ZERO;
+ Fpscr->ZE = 0;
+
+ }
+ else if ((Fpscr->UE == 1) && (Fpscr->UX == 1)) {
+
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_UNDERFLOW;
+ Fpscr->UE = 0;
+
+ }
+
+ else if ((Fpscr->OE == 1) && (Fpscr->OX == 1)) {
+
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_OVERFLOW;
+ Fpscr->OE = 0;
+
+ }
+ else {
+
+ // Must be some form of Invalid Operation
+
+ ExceptionRecord->ExceptionCode = STATUS_FLOAT_INVALID_OPERATION;
+ Fpscr->VE = 0;
+ }
+ }
+
+ //
+ // Move machine state from trap and exception frames to a context frame,
+ // and increment the number of exceptions dispatched.
+ //
+
+ ContextFrame.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS;
+ KeContextFromKframes(TrapFrame, ExceptionFrame, &ContextFrame);
+ KeGetCurrentPrcb()->KeExceptionDispatchCount += 1;
+
+ //
+ // Select the method of handling the exception based on the previous mode.
+ //
+
+ if (PreviousMode == KernelMode) {
+
+ //
+ // Previous mode was kernel.
+ //
+ // If this is the first chance, the kernel debugger is active, and
+ // the exception is a kernel breakpoint, then give the kernel debugger
+ // a chance to handle the exception.
+ //
+ // If this is the first chance and the kernel debugger is not active
+ // or does not handle the exception, then attempt to find a frame
+ // handler to handle the exception.
+ //
+ // If this is the second chance or the exception is not handled, then
+ // if the kernel debugger is active, then give the kernel debugger a
+ // second chance to handle the exception. If the kernel debugger does
+ // not handle the exception, then bug check.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a breakpoint,
+ // and the breakpoint is handled by the kernel debugger, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ ((ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (ExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP)) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ KernelMode) != FALSE)) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ KernelMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (RtlDispatchException(ExceptionRecord, &ContextFrame) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ if (KiDebugRoutine != NULL) {
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ PreviousMode,
+ TRUE)) != FALSE) {
+ goto Handled1;
+ }
+ }
+
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+
+ } else {
+
+ //
+ // Previous mode was user.
+ //
+ // If this is the first chance, the kernel debugger is active, the
+ // exception is a kernel breakpoint, and the current process is not
+ // being debugged, or the current process is being debugged, but the
+ // the breakpoint is not a kernel breakpoint instruction, then give
+ // the kernel debugger a chance to handle the exception.
+ //
+ // If this is the first chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // transfer the exception information to the user stack, transition to
+ // user mode, and attempt to dispatch the exception to a frame based
+ // handler. If a frame based handler handles the exception, then continue
+ // execution. Otherwise, execute the raise exception system service
+ // which will call this routine a second time to process the exception.
+ //
+ // If this is the second chance and the current process has a debugger
+ // port, then send a message to the debugger port and wait for a reply.
+ // If the debugger handles the exception, then continue execution. Else
+ // if the current process has a subsystem port, then send a message to
+ // the subsystem port and wait for a reply. If the subsystem handles the
+ // exception, then continue execution. Else terminate the thread.
+ //
+
+ if (FirstChance != FALSE) {
+
+ //
+ // If the kernel debugger is active, the exception is a kernel
+ // breakpoint, and the current process is not being debugged,
+ // or the current process is being debugged, but the breakpoint
+ // is not a kernel breakpoint instruction, then give the kernel
+ // debugger a chance to handle the exception.
+ //
+
+ if ((KiDebugRoutine != NULL) &&
+ ((ExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
+ (ExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP)) &&
+ (KdIsThisAKdTrap(ExceptionRecord,
+ &ContextFrame,
+ UserMode) != FALSE) &&
+ ((PsGetCurrentProcess()->DebugPort == NULL) ||
+ ((PsGetCurrentProcess()->DebugPort != NULL) &&
+ (ExceptionRecord->ExceptionInformation[0] !=
+ KERNEL_BREAKPOINT_INSTRUCTION)))) {
+
+ if (((KiDebugRoutine) (TrapFrame,
+ ExceptionFrame,
+ ExceptionRecord,
+ &ContextFrame,
+ UserMode,
+ FALSE)) != FALSE) {
+
+ goto Handled1;
+ }
+ }
+
+ //
+ // This is the first chance to handle the exception.
+ //
+
+ if (DbgkForwardException(ExceptionRecord, TRUE, FALSE)) {
+ TrapFrame->Fpscr = SANITIZE_FPSCR(TrapFrame->Fpscr, UserMode);
+ goto Handled2;
+ }
+
+ //
+ // Transfer exception information to the user stack, transition
+ // to user mode, and attempt to dispatch the exception to a frame
+ // based handler.
+ //
+ // We are running on the kernel stack now. On the user stack, we
+ // build a stack frame containing the following:
+ //
+ // | |
+ // |-----------------------------------|
+ // | |
+ // | Stack frame header |
+ // | |
+ // |- - - - - - - - - - - - - - - - - -|
+ // | |
+ // | Exception record |
+ // | |
+ // |- - - - - - - - - - - - - - - - - -|
+ // | |
+ // | Context record |
+ // | |
+ // | |
+ // | |
+ // |- - - - - - - - - - - - - - - - - -|
+ // | Saved TOC for backtrack |
+ // |- - - - - - - - - - - - - - - - - -|
+ // | |
+ // | |
+ // | STK_SLACK_SPACE |
+ // | |
+ // | |
+ // | |
+ // |- - - - - - - - - - - - - - - - - -|
+ // | |
+ // | User's stack frame |
+ // | |
+ // | |
+ //
+ // This stack frame is for KiUserExceptionDispatcher, the assembly
+ // langauge routine that effects transfer in user mode to
+ // RtlDispatchException. KiUserExceptionDispatcher is passed
+ // pointers to the Exception Record and Context Record as
+ // parameters.
+
+ repeat:
+ try {
+
+ //
+ // Compute positions on user stack of items shown above
+ //
+
+ ULONG Length = (sizeof (STACK_FRAME_HEADER) + sizeof (EXCEPTION_RECORD) +
+ sizeof (CONTEXT) + sizeof (ULONG) + STK_SLACK_SPACE + 7) & (~7);
+
+ ULONG UserStack = (ContextFrame.Gpr1 & (~7)) - Length;
+ ULONG ExceptSlot = UserStack + sizeof (STACK_FRAME_HEADER);
+ ULONG ContextSlot = ExceptSlot + sizeof (EXCEPTION_RECORD);
+ ULONG TocSlot = ContextSlot + sizeof (CONTEXT);
+
+ //
+ // Probe user stack area for writeability and then transfer the
+ // exception record and context record to the user stack area.
+ //
+
+ ProbeForWrite((PCHAR) UserStack, ContextFrame.Gpr1 - UserStack, sizeof(QUAD));
+ RtlMoveMemory((PVOID) ExceptSlot, ExceptionRecord, sizeof (EXCEPTION_RECORD));
+ RtlMoveMemory((PVOID) ContextSlot, &ContextFrame, sizeof (CONTEXT));
+
+ //
+ // Fill in TOC value as if it had been saved by prologue to
+ // KiUserExceptionDispatcher
+ //
+
+ *((PULONG) TocSlot) = ContextFrame.Gpr2;
+
+ //
+ // Set back chain from newly-constructed stack frame
+ //
+
+ *((PULONG) UserStack) = ContextFrame.Gpr1;
+
+ //
+ // Set address of exception record, context record,
+ // and the new stack pointer in the current trap frame.
+ //
+
+ TrapFrame->Gpr1 = UserStack; // Stack pointer
+ TrapFrame->Gpr3 = ExceptSlot; // First parameter
+ TrapFrame->Gpr4 = ContextSlot; // Second parameter
+
+ //
+ // Sanitize the floating status register so a recursive
+ // exception will not occur.
+ //
+
+ TrapFrame->Fpscr = SANITIZE_FPSCR(ContextFrame.Fpscr, UserMode);
+
+ //
+ // Set the execution address and TOC pointer of the exception
+ // routine that will call the exception dispatcher and then return
+ // to the trap handler. The trap handler will restore the exception
+ // and trap frame context and continue execution in the routine
+ // that will call the exception dispatcher.
+ //
+
+ {
+ PULONG FnDesc = (PULONG) KeUserExceptionDispatcher;
+ TrapFrame->Iar = FnDesc[0];
+ TrapFrame->Gpr2 = FnDesc[1];
+ }
+
+ return;
+
+ //
+ // If an exception occurs, then copy the new exception information
+ // to an exception record and handle the exception.
+ //
+
+ } except (KiCopyInformation(&ExceptionRecord1,
+ (GetExceptionInformation())->ExceptionRecord)) {
+
+ //
+ // If the exception is a stack overflow, then attempt
+ // to raise the stack overflow exception. Otherwise,
+ // the user's stack is not accessible, or is misaligned,
+ // and second chance processing is performed.
+ //
+
+ if (ExceptionRecord1.ExceptionCode == STATUS_STACK_OVERFLOW) {
+ ExceptionRecord1.ExceptionAddress = ExceptionRecord->ExceptionAddress;
+ RtlMoveMemory((PVOID)ExceptionRecord,
+ &ExceptionRecord1, sizeof(EXCEPTION_RECORD));
+ goto repeat;
+ }
+ }
+ }
+
+ //
+ // This is the second chance to handle the exception.
+ //
+
+ UserApcPending = KeGetCurrentThread()->ApcState.UserApcPending;
+ if (DbgkForwardException(ExceptionRecord, TRUE, TRUE)) {
+ TrapFrame->Fpscr = SANITIZE_FPSCR(TrapFrame->Fpscr, UserMode);
+ goto Handled2;
+
+ } else if (DbgkForwardException(ExceptionRecord, FALSE, TRUE)) {
+ //
+ // If a user APC was not previously pending and one is now
+ // pending, then the thread has been terminated and the PC
+ // must be forced to a legal address so an infinite loop does
+ // not occur for the case where a jump to an unmapped address
+ // occurred.
+ //
+
+ if ((UserApcPending == FALSE) &&
+ (KeGetCurrentThread()->ApcState.UserApcPending != FALSE)) {
+// TEMPORARY .... PAT
+// Commenting out reference to USPCR (a known legal address ..
+// TrapFrame->Iar = (ULONG)USPCR;
+ }
+
+ TrapFrame->Fpscr = SANITIZE_FPSCR(TrapFrame->Fpscr, UserMode);
+ goto Handled2;
+
+ } else {
+ ZwTerminateProcess(NtCurrentProcess(), ExceptionRecord->ExceptionCode);
+ KeBugCheckEx(KMODE_EXCEPTION_NOT_HANDLED,
+ ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionRecord->ExceptionAddress,
+ ExceptionRecord->ExceptionInformation[0],
+ ExceptionRecord->ExceptionInformation[1]);
+ }
+ }
+
+ //
+ // Move machine state from context frame to trap and exception frames and
+ // then return to continue execution with the restored state.
+ //
+
+Handled1:
+ KeContextToKframes(TrapFrame, ExceptionFrame, &ContextFrame,
+ ContextFrame.ContextFlags, PreviousMode);
+
+ //
+ // Exception was handled by the debugger or the associated subsystem
+ // and state was modified, if necessary, using the get state and set
+ // state capabilities. Therefore the context frame does not need to
+ // be transfered to the trap and exception frames.
+ //
+
+Handled2:
+ return;
+}
+
+ULONG
+KiCopyInformation (
+ IN OUT PEXCEPTION_RECORD ExceptionRecord1,
+ IN PEXCEPTION_RECORD ExceptionRecord2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called from an exception filter to copy the exception
+ information from one exception record to another when an exception occurs.
+
+Arguments:
+
+ ExceptionRecord1 - Supplies a pointer to the destination exception record.
+
+ ExceptionRecord2 - Supplies a pointer to the source exception record.
+
+Return Value:
+
+ A value of EXCEPTION_EXECUTE_HANDLER is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Copy one exception record to another and return value that causes
+ // an exception handler to be executed.
+ //
+
+ RtlMoveMemory((PVOID)ExceptionRecord1,
+ (PVOID)ExceptionRecord2,
+ sizeof(EXCEPTION_RECORD));
+
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+NTSTATUS
+KeRaiseUserException(
+ IN NTSTATUS ExceptionCode
+ )
+
+/*++
+
+Routine Description:
+
+ This function causes an exception to be raised in the calling thread's user-mode
+ context. It does this by editing the trap frame the kernel was entered with to
+ point to trampoline code that raises the requested exception.
+
+Arguments:
+
+ ExceptionCode - Supplies the status value to be used as the exception
+ code for the exception that is to be raised.
+
+Return Value:
+
+ The status value that should be returned by the caller.
+
+--*/
+
+{
+ PKTRAP_FRAME TrapFrame;
+ PULONG FnDesc;
+
+ ASSERT(KeGetPreviousMode() == UserMode);
+
+ TrapFrame = KeGetCurrentThread()->TrapFrame;
+ FnDesc = (PULONG)KeRaiseUserExceptionDispatcher;
+
+ TrapFrame->Iar = FnDesc[0];
+ TrapFrame->Gpr2 = FnDesc[1];
+
+ return(ExceptionCode);
+}
diff --git a/private/ntos/ke/ppc/flush.c b/private/ntos/ke/ppc/flush.c
new file mode 100644
index 000000000..aa72223e1
--- /dev/null
+++ b/private/ntos/ke/ppc/flush.c
@@ -0,0 +1,940 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ flush.c
+
+Abstract:
+
+ This module implements PowerPc machine dependent kernel functions to flush
+ the data and instruction caches and to flush I/O buffers.
+
+Author:
+
+ David N. Cutler (davec) 26-Apr-1990
+
+Modified by:
+
+ Pat Carr (patcarr@pets.sps.mot.com) 15-Aug-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced prototyes.
+//
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ );
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ );
+
+VOID
+KeChangeColorPage (
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN ULONG PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This routine changes the color of a page.
+
+Arguments:
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the change color
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiChangeColorPageTarget,
+ (PVOID)NewColor,
+ (PVOID)OldColor,
+ (PVOID)PageFrame);
+ }
+
+#endif
+
+#ifdef COLORED_PAGES
+ //
+ // Change the color of the page on the current processor.
+ //
+
+ HalChangeColorPage(NewColor, OldColor, PageFrame);
+
+#endif
+
+ //
+ // Wait until all target processors have finished changing the color
+ // of the page.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiChangeColorPageTarget (
+ IN PULONG SignalDone,
+ IN PVOID NewColor,
+ IN PVOID OldColor,
+ IN PVOID PageFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for changing the color of a page.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ NewColor - Supplies the page aligned virtual address of the new color
+ the page to change.
+
+ OldColor - Supplies the page aligned virtual address of the old color
+ of the page to change.
+
+ PageFrame - Supplies the page frame number of the page that is changed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Change the color of the page on the current processor and clear
+ // change color packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+#ifdef COLORED_PAGES
+ HalChangeColorPage(NewColor, OldColor, (ULONG)PageFrame);
+#endif
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepDcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the data cache on all processors that are currently
+ running threads which are children of the current process or flushes the
+ data cache on all processors in the host configuration.
+
+ N.B. PowerPC maintains cache coherency across processors however
+ in this routine, the range of addresses being flushed is unknown
+ so we must still broadcast the request to the other processors.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which data
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepDcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the data cache on the current processor.
+ //
+
+ HalSweepDcache();
+
+ //
+ // Wait until all target processors have finished sweeping the their
+ // data cache.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepDcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the data cache on target
+ processors.
+
+ N.B. PowerPC maintains cache coherency in the D-Cache across all
+ processors. This routine should not be used but is here in case
+ it actually is used.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the data cache on the current processor and clear the sweep
+ // data cache packet address to signal the source to continue.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepDcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcache (
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the instruction cache on all processors that are
+ currently running threads which are children of the current process or
+ flushes the instruction cache on all processors in the host configuration.
+
+ N.B. Although PowerPC maintains cache coherency across processors, we
+ use the flash invalidate function (h/w) for I-Cache sweeps which doesn't
+ maintain coherency so we still do the MP I-Cache flush in s/w. plj.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+#if !defined(NT_UP)
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheTarget,
+ NULL,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Sweep the instruction cache on the current processor.
+ //
+ // If the processor is not a 601, flush the data cache first.
+ //
+
+ if ( ( (KeGetPvr() >> 16 ) & 0xffff ) > 1 ) {
+ HalSweepDcache();
+ }
+
+ HalSweepIcache();
+
+ //
+ // Wait until all target processors have finished sweeping their
+ // instruction caches.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheTarget (
+ IN PULONG SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping the instruction cache on
+ target processors.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Parameter1 - Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Sweep the instruction cache on the current processor and clear
+ // the sweep instruction cache packet address to signal the source
+ // to continue.
+ //
+ // If the processor is not a 601, flush the data cache first.
+ //
+
+ if ( ( (KeGetPvr() >> 16 ) & 0xffff ) > 1 ) {
+ HalSweepDcache();
+ }
+
+
+#if !defined(NT_UP)
+
+ HalSweepIcache();
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeSweepIcacheRange (
+ IN BOOLEAN AllProcessors,
+ IN PVOID BaseAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ This function is used to flush a range of virtual addresses from the
+ primary instruction cache on all processors that are currently running
+ threads which are children of the current process or flushes the range
+ of virtual addresses from the primary instruction cache on all
+ processors in the host configuration.
+
+Arguments:
+
+ AllProcessors - Supplies a boolean value that determines which instruction
+ caches are flushed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Offset;
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+ ULONG ProcessorType;
+ ULONG DcacheAlignment;
+ ULONG IcacheAlignment;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // If the length of the range is greater than the size of the primary
+ // instruction cache, then flush the entire cache.
+ //
+ // N.B. It is assumed that the size of the primary instruction and
+ // data caches are the same.
+ //
+
+ if (Length > PCR->FirstLevelIcacheSize) {
+ KeSweepIcache(AllProcessors);
+ return;
+ }
+
+ ProcessorType = KeGetPvr() >> 16;
+
+ if (ProcessorType != 1) {
+
+ // PowerPc 601 has a unified cache; all others have dual caches.
+ // Flush the Dcache prior to sweeping the Icache in case we need
+ // to fetch a modified instruction currently Dcache resident.
+
+ DcacheAlignment = PCR->DcacheAlignment;
+ Offset = (ULONG)BaseAddress & DcacheAlignment;
+ HalSweepDcacheRange(
+ (PVOID)((ULONG)BaseAddress & ~DcacheAlignment),
+ (Offset + Length + DcacheAlignment) & ~DcacheAlignment);
+ }
+
+#if 0
+
+ //
+ // PowerPC h/w maintains coherency across processors. No need
+ // to send IPI request.
+ //
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ //
+ // Compute the set of target processors, and send the sweep range
+ // parameters to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiSweepIcacheRangeTarget,
+ (PVOID)BaseAddress,
+ (PVOID)Length,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Flush the specified range of virtual addresses from the primary
+ // instruction cache.
+ //
+
+ IcacheAlignment = PCR->IcacheAlignment;
+ Offset = (ULONG)BaseAddress & IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)BaseAddress & ~IcacheAlignment),
+ (Offset + Length + IcacheAlignment) & ~IcacheAlignment);
+
+ //
+ // Wait until all target processors have finished sweeping the specified
+ // range of addresses from the instruction cache.
+ //
+
+#if 0
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KiSweepIcacheRangeTarget (
+ IN PULONG SignalDone,
+ IN PVOID BaseAddress,
+ IN PVOID Length,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for sweeping a range of addresses from the
+ instruction cache.
+
+ N.B. This routine is not used on PowerPC as the h/w can be relied
+ upon to maintain cache coherency.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ BaseAddress - Supplies a pointer to the base of the range that is flushed.
+
+ Length - Supplies the length of the range that is flushed if the base
+ address is specified.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#if 0
+
+ ULONG Offset;
+ ULONG IcacheAlignment;
+ //
+ // Sweep the specified instruction cache range on the current processor.
+ //
+
+ IcacheAlignment = PCR->IcacheAlignment;
+ Offset = (ULONG)(BaseAddress) & IcacheAlignment;
+ HalSweepIcacheRange((PVOID)((ULONG)(BaseAddress) & ~IcacheAlignment),
+ (Offset + (ULONG)Length + IcacheAlignment) & ~IcacheAlignment);
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
+
+VOID
+KeFlushIoBuffers (
+ IN PMDL Mdl,
+ IN BOOLEAN ReadOperation,
+ IN BOOLEAN DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the I/O buffer specified by the memory descriptor
+ list from the data cache on all processors.
+
+Arguments:
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KAFFINITY TargetProcessors;
+ ULONG MaxLocalSweep;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+ //
+ // If the operation is a DMA operation, then check if the flush
+ // can be avoided because the host system supports the right set
+ // of cache coherency attributes. Otherwise, the flush can also
+ // be avoided if the operation is a programmed I/O and not a page
+ // read.
+ //
+
+ if (DmaOperation != FALSE) {
+ if (ReadOperation != FALSE) {
+
+#if DBG
+
+ //
+ // Yes, it's a DMA operation, and yes, it's a read. PPC
+ // I-Caches do not snoop so this code is here only in debug
+ // systems to ensure KiDmaIoCoherency is set reasonably.
+ //
+
+ if ((KiDmaIoCoherency & DMA_READ_ICACHE_INVALIDATE) != 0) {
+
+ ASSERT((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0);
+
+ return;
+ }
+
+#endif
+
+ //
+ // If the operation is NOT a page read, then the read will
+ // not affect the I-Cache. The PPC architecture ensures the
+ // D-Cache will remain coherent.
+ //
+
+ if ((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) {
+ ASSERT((KiDmaIoCoherency & DMA_READ_DCACHE_INVALIDATE) != 0);
+ return;
+ }
+
+ } else if ((KiDmaIoCoherency & DMA_WRITE_DCACHE_SNOOP) != 0) {
+ return;
+ }
+
+ } else if ((Mdl->MdlFlags & MDL_IO_PAGE_READ) == 0) {
+ return;
+ }
+
+ //
+ // If the processor has a unified cache (currently the only
+ // PowerPC to fall into this category is a 601) then there
+ // are no problems with the I-Cache not snooping and D-Cache
+ // coherency is architected.
+ //
+
+ if ((KeGetPvr() >> 16) == 1) {
+ return;
+ }
+
+ //
+ // Either the operation is a DMA operation and the right coherency
+ // atributes are not supported by the host system, or the operation
+ // is programmed I/O and a page read.
+ //
+ // If the amount of data to sweep is large, sweep the entire
+ // data and inctruction caches on all processors, otherwise,
+ // sweep the explicit range covered by the mdl.
+ //
+ // Sweeping the range covered by the mdl will be broadcast
+ // to the other processors by the PPC h/w coherency mechanism.
+ // (1 DCBST + 1 ICBI per block)
+ // Sweeping the entire D-Cache involves (potentially) loading
+ // and broadcasting a DCBST for each block in the D-Cache on
+ // every processor.
+ //
+ // For this reason we only sweep all if the amount to flush
+ // is greater than the First Level D Cache size * number of
+ // processors in the system.
+ //
+
+ MaxLocalSweep = PCR->FirstLevelDcacheSize;
+
+#if !defined(NT_UP)
+
+ MaxLocalSweep *= KeNumberProcessors;
+
+#endif
+
+ if (Mdl->ByteCount > MaxLocalSweep) {
+
+ //
+ // Raise IRQL to synchronization level to prevent a context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+#if !defined(NT_UP)
+
+ //
+ // Compute the set of target processors and send the sweep parameters
+ // to the target processors, if any, for execution.
+ //
+
+ TargetProcessors = KeActiveProcessors & PCR->NotMember;
+ if (TargetProcessors != 0) {
+
+ KiIpiSendPacket(TargetProcessors,
+ KiFlushIoBuffersTarget,
+ (PVOID)Mdl,
+ (PVOID)((ULONG)ReadOperation),
+ (PVOID)((ULONG)DmaOperation));
+
+ }
+
+#endif
+
+ //
+ // Flush the caches on the current processor.
+ //
+
+ HalSweepDcache();
+
+ HalSweepIcache();
+
+ //
+ // Wait until all target processors have finished
+ // flushing their caches.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower IRQL to its previous level and return.
+ //
+
+ KeLowerIrql(OldIrql);
+
+ return;
+ }
+
+ //
+ // The amount of data to be flushed is sufficiently small that it
+ // should be done on this processor only, allowing the h/w to ensure
+ // coherency.
+ //
+
+ HalFlushIoBuffers(Mdl, ReadOperation, DmaOperation);
+
+}
+
+VOID
+KiFlushIoBuffersTarget (
+ IN PULONG SignalDone,
+ IN PVOID Mdl,
+ IN PVOID ReadOperation,
+ IN PVOID DmaOperation
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for flushing an I/O buffer on target
+ processors. On PowerPC this routine is only called when it has
+ been determined that it is more efficient to sweep the entire
+ cache than to sweep the range specified in the mdl.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ Mdl - Supplies a pointer to a memory descriptor list that describes the
+ I/O buffer location.
+
+ ReadOperation - Supplies a boolean value that determines whether the I/O
+ operation is a read into memory.
+
+ DmaOperation - Supplies a boolean value that determines whether the I/O
+ operation is a DMA operation.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Flush the caches on the current processor.
+ //
+
+#if !defined(NT_UP)
+
+ HalSweepDcache();
+
+ HalSweepIcache();
+
+ KiIpiSignalPacketDone(SignalDone);
+
+#endif
+
+ return;
+}
diff --git a/private/ntos/ke/ppc/flushtb.c b/private/ntos/ke/ppc/flushtb.c
new file mode 100644
index 000000000..ef0ab8654
--- /dev/null
+++ b/private/ntos/ke/ppc/flushtb.c
@@ -0,0 +1,274 @@
+/*++
+
+Copyright (c) 1992-1994 Microsoft Corporation
+Copyright (c) 1994 IBM Corporation
+
+Module Name:
+
+ flushtb.c
+
+Abstract:
+
+ This module implements machine dependent functions to flush the
+ translation buffer and synchronize PIDs in an MP system.
+
+Author:
+
+ David N. Cutler (davec) 13-May-1989
+
+ Modified for PowerPC by Mark Mergen (mergen@watson.ibm.com) 25-Aug-1994
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+VOID
+KeFlushEntireTb (
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes the entire translation buffer (TB) on all
+ processors that are currently running threads which are children
+ of the current process or flushes the entire translation buffer
+ on all processors in the host configuration.
+
+ N.B. The entire translation buffer on all processors in the host
+ configuration is always flushed since PowerPC TB is tagged by
+ VSID and translations are held across context switch boundaries.
+
+Arguments:
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= SYNCH_LEVEL);
+
+#if !defined(NT_UP)
+
+ //
+ // Raise IRQL to synchronization level to avoid a possible context switch.
+ //
+
+ OldIrql = KeRaiseIrqlToSynchLevel();
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushEntireTb);
+
+#endif
+
+ //
+ // Flush TB on current processor.
+ // PowerPC hardware broadcasts if MP.
+ //
+
+ KeFlushCurrentTb();
+
+#if !defined(NT_UP)
+
+ //
+ // Lower IRQL to previous level.
+ //
+
+ KeLowerIrql(OldIrql);
+
+#endif
+
+ return;
+}
+
+VOID
+KeFlushMultipleTb (
+ IN ULONG Number,
+ IN PVOID *Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE *PtePointer OPTIONAL,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes multiple entries from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes multiple entries from
+ the translation buffer on all processors in the host configuration.
+
+ N.B. The specified translation entries on all processors in the host
+ configuration are always flushed since PowerPC TB is tagged by
+ VSID and translations are held across context switch boundaries.
+
+Arguments:
+
+ Number - Supplies the number of TB entries to flush.
+
+ Virtual - Supplies a pointer to an array of virtual addresses that
+ are within the pages whose translation buffer entries are to be
+ flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies an optional pointer to an array of pointers to
+ page table entries that receive the specified page table entry
+ value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // If a page table entry address array is specified, then set the
+ // specified page table entries to the specific value.
+ //
+
+ if (ARGUMENT_PRESENT(PtePointer)) {
+ for (Index = 0; Index < Number; Index += 1) {
+ *PtePointer[Index] = PteValue;
+ }
+ }
+
+#if !defined(NT_UP)
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushSingleTb);
+
+#endif
+
+ //
+ // Flush the specified entries from the TB on the current processor.
+ // PowerPC hardware broadcasts if MP.
+ //
+
+ for (Index = 0; Index < Number; Index += 1) {
+ KiFlushSingleTb(Invalid, Virtual[Index]);
+ }
+
+ return;
+}
+
+HARDWARE_PTE
+KeFlushSingleTb (
+ IN PVOID Virtual,
+ IN BOOLEAN Invalid,
+ IN BOOLEAN AllProcessors,
+ IN PHARDWARE_PTE PtePointer,
+ IN HARDWARE_PTE PteValue
+ )
+
+/*++
+
+Routine Description:
+
+ This function flushes a single entry from the translation buffer
+ on all processors that are currently running threads which are
+ children of the current process or flushes a single entry from
+ the translation buffer on all processors in the host configuration.
+
+ N.B. The specified translation entry on all processors in the host
+ configuration is always flushed since PowerPC TB is tagged by
+ VSID and translations are held across context switch boundaries.
+
+Arguments:
+
+ Virtual - Supplies a virtual address that is within the page whose
+ translation buffer entry is to be flushed.
+
+ Invalid - Supplies a boolean value that specifies the reason for
+ flushing the translation buffer.
+
+ AllProcessors - Supplies a boolean value that determines which
+ translation buffers are to be flushed.
+
+ PtePointer - Supplies a pointer to the page table entry which
+ receives the specified value.
+
+ PteValue - Supplies the the new page table entry value.
+
+Return Value:
+
+ The previous contents of the specified page table entry is returned
+ as the function value.
+
+--*/
+
+{
+
+ HARDWARE_PTE OldPte;
+
+ ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Collect call data.
+ //
+
+#if defined(_COLLECT_FLUSH_SINGLE_CALLDATA_)
+
+ RECORD_CALL_DATA(&KiFlushSingleCallData);
+
+#endif
+
+ //
+ // Capture the previous contents of the page table entry and set the
+ // page table entry to the new value.
+ //
+
+ OldPte = *PtePointer;
+ *PtePointer = PteValue;
+
+#if !defined(NT_UP)
+
+ IPI_INSTRUMENT_COUNT(KeGetCurrentPrcb()->Number, FlushSingleTb);
+
+#endif
+
+ //
+ // Flush the specified entry from the TB on the current processor.
+ // PowerPC hardware broadcasts if MP.
+ //
+
+ KiFlushSingleTb(Invalid, Virtual);
+
+ //
+ // Return the previous page table entry value.
+ //
+
+ return OldPte;
+}
diff --git a/private/ntos/ke/ppc/genppc.c b/private/ntos/ke/ppc/genppc.c
new file mode 100644
index 000000000..c893ef81f
--- /dev/null
+++ b/private/ntos/ke/ppc/genppc.c
@@ -0,0 +1,990 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ genppc.c
+
+Abstract:
+
+ This module implements a program which generates PPC machine dependent
+ structure offset definitions for kernel structures that are accessed in
+ assembly code.
+
+Author:
+
+ David N. Cutler (davec) 27-Mar-1990
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+#pragma hdrstop
+
+#define HEADER_FILE
+
+#include "excpt.h"
+#include "ctype.h"
+#include "stdio.h"
+#include "stdarg.h"
+#include "stdlib.h"
+
+#if defined(_M_IX86) // IBMCDB
+
+#define _RESTORE_P386_DEF _M_IX86
+#define _M_PPC 1
+#define R4000 1
+#undef i386
+#undef _X86_
+#undef _M_IX86
+
+#endif
+
+#include "nt.h"
+#include "ntdef.h"
+#include "ntkeapi.h"
+#include "ntppc.h"
+#include "ntimage.h"
+#include "ntseapi.h"
+#include "ntobapi.h"
+#include "ntlpcapi.h"
+#include "ntioapi.h"
+#include "ntmmapi.h"
+#include "ntldr.h"
+#include "ntpsapi.h"
+#include "ntexapi.h"
+#include "ntnls.h"
+#include "ntrtl.h"
+#include "nturtl.h"
+#include "ntcsrmsg.h"
+#include "ntcsrsrv.h"
+#include "ntosdef.h"
+#include "ntxcapi.h"
+#include "ppc.h"
+#include "arc.h"
+#include "ke.h"
+#include "ex.h"
+#include "ps.h"
+#include "bugcodes.h"
+#include "ntstatus.h"
+#include "exboosts.h"
+#include "ppcdef.h"
+#include "setjmp.h"
+
+#if defined(RESTORE_P386_DEF) // IBMCDB
+
+#undef _MSC_VER 800
+#define _M_IX86 RESTORE_P386_DEF
+
+#endif
+
+//
+// Define architecture specific generation macros.
+//
+
+#define genAlt(Name, Type, Member) \
+ dumpf("#define " #Name " 0x%lx\n", OFFSET(Type, Member))
+
+#define genCom(Comment) \
+ dumpf("\n"); \
+ dumpf("//\n"); \
+ dumpf("// " Comment "\n"); \
+ dumpf("//\n"); \
+ dumpf("\n")
+
+#define genDef(Prefix, Type, Member) \
+ dumpf("#define " #Prefix #Member " 0x%lx\n", OFFSET(Type, Member))
+
+#define genVal(Name, Value) \
+ dumpf("#define " #Name " 0x%lx\n", Value)
+
+#define genSpc() dumpf("\n");
+
+//
+// Define member offset computation macro.
+//
+
+#define OFFSET(type, field) ((LONG)(&((type *)0)->field))
+
+FILE *KsPpc;
+FILE *HalPpc;
+
+//
+// EnableInc(a) - Enables output to goto specified include file
+//
+
+#define EnableInc(a) OutputEnabled |= a;
+
+//
+// DisableInc(a) - Disables output to goto specified include file
+//
+
+#define DisableInc(a) OutputEnabled &= ~a;
+
+ULONG OutputEnabled;
+
+#define KSPPC 0x01
+#define HALPPC 0x02
+
+#define KERNEL KSPPC
+#define HAL HALPPC
+
+VOID dumpf (const char *format, ...);
+
+
+//
+// This routine returns the bit number right to left of a field.
+//
+
+LONG
+t (
+ IN ULONG z
+ )
+
+{
+ LONG i;
+
+ for (i = 0; i < 32; i += 1) {
+ if ((z >> i) & 1) {
+ break;
+ }
+ }
+ return i;
+}
+
+//
+// This program generates the PPC machine dependent assembler offset
+// definitions.
+//
+
+VOID
+main (argc, argv)
+ int argc;
+ char *argv[];
+{
+
+ char *outName;
+ LONG EventOffset;
+
+ //
+ // Create file for output.
+ //
+
+ if (argc == 2) {
+ outName = argv[ 1 ];
+ } else {
+ outName = "\\nt\\public\\sdk\\inc\\ksppc.h";
+ }
+ outName = argc >= 2 ? argv[1] : "\\nt\\public\\sdk\\inc\\ksppc.h";
+ KsPpc = fopen( outName, "w" );
+
+ if (KsPpc == NULL) {
+ fprintf( stderr, "GENPPC: Cannot open %s for writing.\n", outName);
+ perror("GENPPC");
+ exit(1);
+ }
+
+ fprintf( stderr, "GENPPC: Writing %s header file.\n", outName );
+
+ outName = argc >= 3 ? argv[2] : "\\nt\\private\\ntos\\inc\\halppc.h";
+
+ HalPpc = fopen( outName, "w" );
+
+ if (HalPpc == NULL) {
+ fprintf( stderr, "GENPPC: Cannot open %s for writing.\n", outName);
+ perror("GENPPC");
+ exit(1);
+ }
+
+ fprintf( stderr, "GENPPC: Writing %s header file.\n", outName );
+
+ //
+ // Include statement for PPC architecture static definitions.
+ //
+
+ EnableInc (KSPPC | HALPPC);
+
+ dumpf("#include \"kxppc.h\"\n");
+
+ DisableInc (HALPPC);
+
+ //
+ // Include architecture independent definitions.
+ //
+
+#include "..\genxx.inc"
+
+ //
+ // Generate architecture dependent definitions.
+ //
+ // Processor control register structure definitions.
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Processor Control Registers Structure Offset Definitions");
+
+ genVal(PCR_MINOR_VERSION, PCR_MINOR_VERSION);
+ genVal(PCR_MAJOR_VERSION, PCR_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pc, KPCR, MinorVersion);
+ genDef(Pc, KPCR, MajorVersion);
+ genDef(Pc, KPCR, InterruptRoutine);
+ genDef(Pc, KPCR, PcrPage2);
+ genDef(Pc, KPCR, Kseg0Top);
+ genDef(Pc, KPCR, FirstLevelDcacheSize);
+ genDef(Pc, KPCR, FirstLevelDcacheFillSize);
+ genDef(Pc, KPCR, FirstLevelIcacheSize);
+ genDef(Pc, KPCR, FirstLevelIcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelDcacheSize);
+ genDef(Pc, KPCR, SecondLevelDcacheFillSize);
+ genDef(Pc, KPCR, SecondLevelIcacheSize);
+ genDef(Pc, KPCR, SecondLevelIcacheFillSize);
+ genDef(Pc, KPCR, Prcb);
+ genDef(Pc, KPCR, Teb);
+ genDef(Pc, KPCR, DcacheAlignment);
+ genDef(Pc, KPCR, DcacheFillSize);
+ genDef(Pc, KPCR, IcacheAlignment);
+ genDef(Pc, KPCR, IcacheFillSize);
+ genDef(Pc, KPCR, ProcessorVersion);
+ genDef(Pc, KPCR, ProcessorRevision);
+ genDef(Pc, KPCR, ProfileInterval);
+ genDef(Pc, KPCR, ProfileCount);
+ genDef(Pc, KPCR, StallExecutionCount);
+ genDef(Pc, KPCR, StallScaleFactor);
+ genDef(Pc, KPCR, CachePolicy);
+ genDef(Pc, KPCR, IcacheMode);
+ genDef(Pc, KPCR, DcacheMode);
+ genDef(Pc, KPCR, IrqlMask);
+ genDef(Pc, KPCR, IrqlTable);
+ genDef(Pc, KPCR, CurrentIrql);
+ genDef(Pc, KPCR, Number);
+ genDef(Pc, KPCR, SetMember);
+ genDef(Pc, KPCR, CurrentThread);
+ genDef(Pc, KPCR, AlignedCachePolicy);
+ genDef(Pc, KPCR, SoftwareInterrupt);
+ genDef(Pc, KPCR, ApcInterrupt);
+ genDef(Pc, KPCR, DispatchInterrupt);
+ genDef(Pc, KPCR, NotMember);
+ genDef(Pc, KPCR, SystemReserved);
+ genDef(Pc, KPCR, HalReserved);
+
+ DisableInc (HALPPC);
+
+ genDef(Pc, KPCR, FirstLevelActive);
+ genDef(Pc, KPCR, SystemServiceDispatchStart);
+ genDef(Pc, KPCR, SystemServiceDispatchEnd);
+ genDef(Pc, KPCR, InterruptStack);
+ genDef(Pc, KPCR, QuantumEnd);
+ genDef(Pc, KPCR, InitialStack);
+ genDef(Pc, KPCR, PanicStack);
+ genDef(Pc, KPCR, BadVaddr);
+ genDef(Pc, KPCR, StackLimit);
+ genDef(Pc, KPCR, SavedStackLimit);
+ genDef(Pc, KPCR, SavedV0);
+ genDef(Pc, KPCR, SavedV1);
+ genDef(Pc, KPCR, DebugActive);
+ genDef(Pc, KPCR, GprSave);
+ genDef(Pc, KPCR, SiR0);
+ genDef(Pc, KPCR, SiR2);
+ genDef(Pc, KPCR, SiR3);
+ genDef(Pc, KPCR, SiR4);
+ genDef(Pc, KPCR, SiR5);
+ genDef(Pc, KPCR, PgDirRa);
+ genDef(Pc, KPCR, OnInterruptStack);
+ genDef(Pc, KPCR, SavedInitialStack);
+
+ genVal(ProcessorControlRegisterLength, ((sizeof(KPCR) + 15) & ~15));
+
+ genSpc();
+
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountLow);
+ genDef(Pc2, KUSER_SHARED_DATA, TickCountMultiplier);
+ genDef(Pc2, KUSER_SHARED_DATA, InterruptTime);
+ genDef(Pc2, KUSER_SHARED_DATA, SystemTime);
+
+ //
+ // Offsets to elements within the InterruptRoutine table.
+ //
+
+ genSpc();
+
+ genVal(IrPmiVector, sizeof(unsigned) * PMI_VECTOR);
+ genVal(IrMachineCheckVector, sizeof(unsigned) * MACHINE_CHECK_VECTOR);
+ genVal(IrDeviceVector, sizeof(unsigned) * EXTERNAL_INTERRUPT_VECTOR);
+ genVal(IrDecrementVector, sizeof(unsigned) * DECREMENT_VECTOR);
+
+ //
+ // Processor block structure definitions.
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Processor Block Structure Offset Definitions");
+
+ genVal(PRCB_MINOR_VERSION, PRCB_MINOR_VERSION);
+ genVal(PRCB_MAJOR_VERSION, PRCB_MAJOR_VERSION);
+
+ genSpc();
+
+ genDef(Pb, KPRCB, MinorVersion);
+ genDef(Pb, KPRCB, MajorVersion);
+ genDef(Pb, KPRCB, CurrentThread);
+ genDef(Pb, KPRCB, NextThread);
+ genDef(Pb, KPRCB, IdleThread);
+ genDef(Pb, KPRCB, Number);
+ genDef(Pb, KPRCB, SetMember);
+ genDef(Pb, KPRCB, RestartBlock);
+ genDef(Pb, KPRCB, PcrPage);
+ genDef(Pb, KPRCB, SystemReserved);
+ genDef(Pb, KPRCB, HalReserved);
+
+ DisableInc (HALPPC);
+
+ genDef(Pb, KPRCB, DpcTime);
+ genDef(Pb, KPRCB, InterruptTime);
+ genDef(Pb, KPRCB, KernelTime);
+ genDef(Pb, KPRCB, UserTime);
+ genDef(Pb, KPRCB, AdjustDpcThreshold);
+ genDef(Pb, KPRCB, InterruptCount);
+ genDef(Pb, KPRCB, ApcBypassCount);
+ genDef(Pb, KPRCB, DpcBypassCount);
+ genDef(Pb, KPRCB, IpiFrozen);
+ genDef(Pb, KPRCB, ProcessorState);
+ genAlt(PbAlignmentFixupCount, KPRCB, KeAlignmentFixupCount);
+ genAlt(PbContextSwitches, KPRCB, KeContextSwitches);
+ genAlt(PbDcacheFlushCount, KPRCB, KeDcacheFlushCount);
+ genAlt(PbExceptionDispatchCount, KPRCB, KeExceptionDispatchCount);
+ genAlt(PbFirstLevelTbFills, KPRCB, KeFirstLevelTbFills);
+ genAlt(PbFloatingEmulationCount, KPRCB, KeFloatingEmulationCount);
+ genAlt(PbIcacheFlushCount, KPRCB, KeIcacheFlushCount);
+ genAlt(PbSecondLevelTbFills, KPRCB, KeSecondLevelTbFills);
+ genAlt(PbSystemCalls, KPRCB, KeSystemCalls);
+ genDef(Pb, KPRCB, CurrentPacket);
+ genDef(Pb, KPRCB, TargetSet);
+ genDef(Pb, KPRCB, WorkerRoutine);
+ genDef(Pb, KPRCB, RequestSummary);
+ genDef(Pb, KPRCB, SignalDone);
+ genDef(Pb, KPRCB, DpcInterruptRequested);
+ genDef(Pb, KPRCB, MaximumDpcQueueDepth);
+ genDef(Pb, KPRCB, MinimumDpcRate);
+ genDef(Pb, KPRCB, IpiCounts);
+ genDef(Pb, KPRCB, StartCount);
+ genDef(Pb, KPRCB, DpcLock);
+ genDef(Pb, KPRCB, DpcListHead);
+ genDef(Pb, KPRCB, DpcQueueDepth);
+ genDef(Pb, KPRCB, DpcCount);
+ genDef(Pb, KPRCB, DpcLastCount);
+ genDef(Pb, KPRCB, DpcRequestRate);
+ genDef(Pb, KPRCB, DpcRoutineActive);
+ genVal(ProcessorBlockLength, ((sizeof(KPRCB) + 15) & ~15));
+
+ //
+ // Immediate interprocessor command definitions.
+ //
+
+ genCom("Immediate Interprocessor Command Definitions");
+
+ genVal(IPI_APC, IPI_APC);
+ genVal(IPI_DPC, IPI_DPC);
+ genVal(IPI_FREEZE, IPI_FREEZE);
+ genVal(IPI_PACKET_READY, IPI_PACKET_READY);
+
+ //
+ // Interprocessor interrupt count structure offset definitions.
+ //
+
+ genCom("Interprocessor Interrupt Count Structure Offset Definitions");
+
+ genDef(Ic, KIPI_COUNTS, Freeze);
+ genDef(Ic, KIPI_COUNTS, Packet);
+ genDef(Ic, KIPI_COUNTS, DPC);
+ genDef(Ic, KIPI_COUNTS, APC);
+ genDef(Ic, KIPI_COUNTS, FlushSingleTb);
+ genDef(Ic, KIPI_COUNTS, FlushEntireTb);
+ genDef(Ic, KIPI_COUNTS, ChangeColor);
+ genDef(Ic, KIPI_COUNTS, SweepDcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcache);
+ genDef(Ic, KIPI_COUNTS, SweepIcacheRange);
+ genDef(Ic, KIPI_COUNTS, FlushIoBuffers);
+
+ //
+ // Context frame offset definitions and flag definitions.
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Context Frame Offset and Flag Definitions");
+
+ genVal(CONTEXT_FULL, CONTEXT_FULL);
+ genVal(CONTEXT_CONTROL, CONTEXT_CONTROL);
+ genVal(CONTEXT_FLOATING_POINT, CONTEXT_FLOATING_POINT);
+ genVal(CONTEXT_INTEGER, CONTEXT_INTEGER);
+
+ genSpc();
+
+ genDef(Cx, CONTEXT, Fpr0);
+ genDef(Cx, CONTEXT, Fpr1);
+ genDef(Cx, CONTEXT, Fpr2);
+ genDef(Cx, CONTEXT, Fpr3);
+ genDef(Cx, CONTEXT, Fpr4);
+ genDef(Cx, CONTEXT, Fpr5);
+ genDef(Cx, CONTEXT, Fpr6);
+ genDef(Cx, CONTEXT, Fpr7);
+ genDef(Cx, CONTEXT, Fpr8);
+ genDef(Cx, CONTEXT, Fpr9);
+ genDef(Cx, CONTEXT, Fpr10);
+ genDef(Cx, CONTEXT, Fpr11);
+ genDef(Cx, CONTEXT, Fpr12);
+ genDef(Cx, CONTEXT, Fpr13);
+ genDef(Cx, CONTEXT, Fpr14);
+ genDef(Cx, CONTEXT, Fpr15);
+ genDef(Cx, CONTEXT, Fpr16);
+ genDef(Cx, CONTEXT, Fpr17);
+ genDef(Cx, CONTEXT, Fpr18);
+ genDef(Cx, CONTEXT, Fpr19);
+ genDef(Cx, CONTEXT, Fpr20);
+ genDef(Cx, CONTEXT, Fpr21);
+ genDef(Cx, CONTEXT, Fpr22);
+ genDef(Cx, CONTEXT, Fpr23);
+ genDef(Cx, CONTEXT, Fpr24);
+ genDef(Cx, CONTEXT, Fpr25);
+ genDef(Cx, CONTEXT, Fpr26);
+ genDef(Cx, CONTEXT, Fpr27);
+ genDef(Cx, CONTEXT, Fpr28);
+ genDef(Cx, CONTEXT, Fpr29);
+ genDef(Cx, CONTEXT, Fpr30);
+ genDef(Cx, CONTEXT, Fpr31);
+ genDef(Cx, CONTEXT, Fpscr);
+ genDef(Cx, CONTEXT, Gpr0);
+ genDef(Cx, CONTEXT, Gpr1);
+ genDef(Cx, CONTEXT, Gpr2);
+ genDef(Cx, CONTEXT, Gpr3);
+ genDef(Cx, CONTEXT, Gpr4);
+ genDef(Cx, CONTEXT, Gpr5);
+ genDef(Cx, CONTEXT, Gpr6);
+ genDef(Cx, CONTEXT, Gpr7);
+ genDef(Cx, CONTEXT, Gpr8);
+ genDef(Cx, CONTEXT, Gpr9);
+ genDef(Cx, CONTEXT, Gpr10);
+ genDef(Cx, CONTEXT, Gpr11);
+ genDef(Cx, CONTEXT, Gpr12);
+ genDef(Cx, CONTEXT, Gpr13);
+ genDef(Cx, CONTEXT, Gpr14);
+ genDef(Cx, CONTEXT, Gpr15);
+ genDef(Cx, CONTEXT, Gpr16);
+ genDef(Cx, CONTEXT, Gpr17);
+ genDef(Cx, CONTEXT, Gpr18);
+ genDef(Cx, CONTEXT, Gpr19);
+ genDef(Cx, CONTEXT, Gpr20);
+ genDef(Cx, CONTEXT, Gpr21);
+ genDef(Cx, CONTEXT, Gpr22);
+ genDef(Cx, CONTEXT, Gpr23);
+ genDef(Cx, CONTEXT, Gpr24);
+ genDef(Cx, CONTEXT, Gpr25);
+ genDef(Cx, CONTEXT, Gpr26);
+ genDef(Cx, CONTEXT, Gpr27);
+ genDef(Cx, CONTEXT, Gpr28);
+ genDef(Cx, CONTEXT, Gpr29);
+ genDef(Cx, CONTEXT, Gpr30);
+ genDef(Cx, CONTEXT, Gpr31);
+ genDef(Cx, CONTEXT, Cr);
+ genDef(Cx, CONTEXT, Xer);
+ genDef(Cx, CONTEXT, Msr);
+ genDef(Cx, CONTEXT, Iar);
+ genDef(Cx, CONTEXT, Lr);
+ genDef(Cx, CONTEXT, Ctr);
+ genDef(Cx, CONTEXT, ContextFlags);
+ genDef(Cx, CONTEXT, Dr0);
+ genDef(Cx, CONTEXT, Dr1);
+ genDef(Cx, CONTEXT, Dr2);
+ genDef(Cx, CONTEXT, Dr3);
+ genDef(Cx, CONTEXT, Dr4);
+ genDef(Cx, CONTEXT, Dr5);
+ genDef(Cx, CONTEXT, Dr6);
+ genDef(Cx, CONTEXT, Dr7);
+
+ genVal(ContextFrameLength, (sizeof(CONTEXT) + 15) & (~15));
+
+ //
+ // Call/return stack frame header offset definitions.
+ //
+
+ genCom("Call/Return Stack Frame Header Offset Definitions and Length");
+
+ genDef(Cr, STACK_FRAME_HEADER, BackChain);
+ genDef(Cr, STACK_FRAME_HEADER, GlueSaved1);
+ genDef(Cr, STACK_FRAME_HEADER, GlueSaved2);
+ genDef(Cr, STACK_FRAME_HEADER, Reserved1);
+ genDef(Cr, STACK_FRAME_HEADER, Spare1);
+ genDef(Cr, STACK_FRAME_HEADER, Spare2);
+
+ genDef(Cr, STACK_FRAME_HEADER, Parameter0);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter1);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter2);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter3);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter4);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter5);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter6);
+ genDef(Cr, STACK_FRAME_HEADER, Parameter7);
+
+ genVal(StackFrameHeaderLength, (sizeof(STACK_FRAME_HEADER) + 7) & (~7));
+
+ //
+ // Exception frame offset definitions.
+ //
+
+ genCom("Exception Frame Offset Definitions and Length");
+
+ genDef(Ex, KEXCEPTION_FRAME, Gpr13);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr14);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr15);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr16);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr17);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr18);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr19);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr20);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr21);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr22);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr23);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr24);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr25);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr26);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr27);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr28);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr29);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr30);
+ genDef(Ex, KEXCEPTION_FRAME, Gpr31);
+
+ genDef(Ex, KEXCEPTION_FRAME, Fpr14);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr15);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr16);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr17);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr18);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr19);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr20);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr21);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr22);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr23);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr24);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr25);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr26);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr27);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr28);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr29);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr30);
+ genDef(Ex, KEXCEPTION_FRAME, Fpr31);
+
+ genVal(ExceptionFrameLength, (sizeof(KEXCEPTION_FRAME) + 7) & (~7));
+
+ //
+ // Swap Frame offset definitions.
+ //
+
+ DisableInc (HALPPC);
+
+ genCom("Swap Frame Definitions and Length");
+
+ genDef(Sw, KSWAP_FRAME, ConditionRegister);
+ genDef(Sw, KSWAP_FRAME, SwapReturn);
+
+ genVal(SwapFrameLength, (sizeof(KSWAP_FRAME) + 7) & (~7));
+
+ EnableInc (HALPPC);
+
+ //
+ // Jump buffer offset definitions.
+ //
+
+ DisableInc (HALPPC);
+
+ genCom("Jump Offset Definitions and Length");
+
+ genDef(Jb, _JUMP_BUFFER, Fpr14);
+ genDef(Jb, _JUMP_BUFFER, Fpr15);
+ genDef(Jb, _JUMP_BUFFER, Fpr16);
+ genDef(Jb, _JUMP_BUFFER, Fpr17);
+ genDef(Jb, _JUMP_BUFFER, Fpr18);
+ genDef(Jb, _JUMP_BUFFER, Fpr19);
+ genDef(Jb, _JUMP_BUFFER, Fpr20);
+ genDef(Jb, _JUMP_BUFFER, Fpr21);
+ genDef(Jb, _JUMP_BUFFER, Fpr22);
+ genDef(Jb, _JUMP_BUFFER, Fpr23);
+ genDef(Jb, _JUMP_BUFFER, Fpr24);
+ genDef(Jb, _JUMP_BUFFER, Fpr25);
+ genDef(Jb, _JUMP_BUFFER, Fpr26);
+ genDef(Jb, _JUMP_BUFFER, Fpr27);
+ genDef(Jb, _JUMP_BUFFER, Fpr28);
+ genDef(Jb, _JUMP_BUFFER, Fpr29);
+ genDef(Jb, _JUMP_BUFFER, Fpr30);
+ genDef(Jb, _JUMP_BUFFER, Fpr31);
+
+ genDef(Jb, _JUMP_BUFFER, Gpr1);
+ genDef(Jb, _JUMP_BUFFER, Gpr2);
+ genDef(Jb, _JUMP_BUFFER, Gpr13);
+ genDef(Jb, _JUMP_BUFFER, Gpr14);
+ genDef(Jb, _JUMP_BUFFER, Gpr15);
+ genDef(Jb, _JUMP_BUFFER, Gpr16);
+ genDef(Jb, _JUMP_BUFFER, Gpr17);
+ genDef(Jb, _JUMP_BUFFER, Gpr18);
+ genDef(Jb, _JUMP_BUFFER, Gpr19);
+ genDef(Jb, _JUMP_BUFFER, Gpr20);
+ genDef(Jb, _JUMP_BUFFER, Gpr21);
+ genDef(Jb, _JUMP_BUFFER, Gpr22);
+ genDef(Jb, _JUMP_BUFFER, Gpr23);
+ genDef(Jb, _JUMP_BUFFER, Gpr24);
+ genDef(Jb, _JUMP_BUFFER, Gpr25);
+ genDef(Jb, _JUMP_BUFFER, Gpr26);
+ genDef(Jb, _JUMP_BUFFER, Gpr27);
+ genDef(Jb, _JUMP_BUFFER, Gpr28);
+ genDef(Jb, _JUMP_BUFFER, Gpr29);
+ genDef(Jb, _JUMP_BUFFER, Gpr30);
+ genDef(Jb, _JUMP_BUFFER, Gpr31);
+
+ genDef(Jb, _JUMP_BUFFER, Cr);
+ genDef(Jb, _JUMP_BUFFER, Iar);
+ genDef(Jb, _JUMP_BUFFER, Type);
+
+ //
+ // Trap frame offset definitions.
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Trap Frame Offset Definitions and Length");
+
+ genDef(Tr, KTRAP_FRAME, TrapFrame);
+ genDef(Tr, KTRAP_FRAME, OldIrql);
+ genDef(Tr, KTRAP_FRAME, PreviousMode);
+ genDef(Tr, KTRAP_FRAME, SavedApcStateIndex);
+ genDef(Tr, KTRAP_FRAME, SavedKernelApcDisable);
+ genDef(Tr, KTRAP_FRAME, ExceptionRecord);
+
+ genDef(Tr, KTRAP_FRAME, Gpr0);
+ genDef(Tr, KTRAP_FRAME, Gpr1);
+ genDef(Tr, KTRAP_FRAME, Gpr2);
+ genDef(Tr, KTRAP_FRAME, Gpr3);
+ genDef(Tr, KTRAP_FRAME, Gpr4);
+ genDef(Tr, KTRAP_FRAME, Gpr5);
+ genDef(Tr, KTRAP_FRAME, Gpr6);
+ genDef(Tr, KTRAP_FRAME, Gpr7);
+ genDef(Tr, KTRAP_FRAME, Gpr8);
+ genDef(Tr, KTRAP_FRAME, Gpr9);
+ genDef(Tr, KTRAP_FRAME, Gpr10);
+ genDef(Tr, KTRAP_FRAME, Gpr11);
+ genDef(Tr, KTRAP_FRAME, Gpr12);
+
+ genDef(Tr, KTRAP_FRAME, Fpr0);
+ genDef(Tr, KTRAP_FRAME, Fpr1);
+ genDef(Tr, KTRAP_FRAME, Fpr2);
+ genDef(Tr, KTRAP_FRAME, Fpr3);
+ genDef(Tr, KTRAP_FRAME, Fpr4);
+ genDef(Tr, KTRAP_FRAME, Fpr5);
+ genDef(Tr, KTRAP_FRAME, Fpr6);
+ genDef(Tr, KTRAP_FRAME, Fpr7);
+ genDef(Tr, KTRAP_FRAME, Fpr8);
+ genDef(Tr, KTRAP_FRAME, Fpr9);
+ genDef(Tr, KTRAP_FRAME, Fpr10);
+ genDef(Tr, KTRAP_FRAME, Fpr11);
+ genDef(Tr, KTRAP_FRAME, Fpr12);
+ genDef(Tr, KTRAP_FRAME, Fpr13);
+
+ genDef(Tr, KTRAP_FRAME, Fpscr);
+ genDef(Tr, KTRAP_FRAME, Cr);
+ genDef(Tr, KTRAP_FRAME, Xer);
+ genDef(Tr, KTRAP_FRAME, Msr);
+ genDef(Tr, KTRAP_FRAME, Iar);
+ genDef(Tr, KTRAP_FRAME, Lr);
+ genDef(Tr, KTRAP_FRAME, Ctr);
+
+ genDef(Tr, KTRAP_FRAME, Dr0);
+ genDef(Tr, KTRAP_FRAME, Dr1);
+ genDef(Tr, KTRAP_FRAME, Dr2);
+ genDef(Tr, KTRAP_FRAME, Dr3);
+ genDef(Tr, KTRAP_FRAME, Dr4);
+ genDef(Tr, KTRAP_FRAME, Dr5);
+ genDef(Tr, KTRAP_FRAME, Dr6);
+ genDef(Tr, KTRAP_FRAME, Dr7);
+
+ genVal(TrapFrameLength, (sizeof(KTRAP_FRAME) + 7) & (~7));
+
+ //
+ // Usermode callout frame definitions
+ //
+
+ DisableInc (HALPPC);
+
+ genCom("Usermode callout frame definitions");
+
+ genDef(Cu, KCALLOUT_FRAME, Frame);
+ genDef(Cu, KCALLOUT_FRAME, CbStk);
+ genDef(Cu, KCALLOUT_FRAME, TrFr);
+ genDef(Cu, KCALLOUT_FRAME, InStk);
+ genDef(Cu, KCALLOUT_FRAME, TrIar);
+ genDef(Cu, KCALLOUT_FRAME, TrToc);
+ genDef(Cu, KCALLOUT_FRAME, R3);
+ genDef(Cu, KCALLOUT_FRAME, R4);
+ genDef(Cu, KCALLOUT_FRAME, Lr);
+ genDef(Cu, KCALLOUT_FRAME, Gpr);
+ genDef(Cu, KCALLOUT_FRAME, Fpr);
+
+ genVal(CuFrameLength, sizeof(KCALLOUT_FRAME));
+
+ genCom("Usermode callout user frame definitions");
+
+ genDef(Ck, UCALLOUT_FRAME, Frame);
+ genDef(Ck, UCALLOUT_FRAME, Buffer);
+ genDef(Ck, UCALLOUT_FRAME, Length);
+ genDef(Ck, UCALLOUT_FRAME, ApiNumber);
+ genDef(Ck, UCALLOUT_FRAME, Lr);
+ genDef(Ck, UCALLOUT_FRAME, Toc);
+
+ genVal(CkFrameLength, sizeof(UCALLOUT_FRAME));
+
+ //
+ // Exception stack frame definitions
+ //
+
+ genCom("Exception stack frame frame definitions");
+
+ genVal(STK_SLACK_SPACE, STK_SLACK_SPACE);
+ genAlt(TF_BASE, KEXCEPTION_STACK_FRAME, TrapFrame);
+ genAlt(KERN_SYS_CALL_FRAME, KEXCEPTION_STACK_FRAME, ExceptionFrame);
+ genAlt(EF_BASE, KEXCEPTION_STACK_FRAME, ExceptionFrame);
+ genDef(Ef, KEXCEPTION_STACK_FRAME, Lr);
+ genDef(Ef, KEXCEPTION_STACK_FRAME, Cr);
+ genAlt(USER_SYS_CALL_FRAME, KEXCEPTION_STACK_FRAME, SlackSpace);
+ genAlt(STACK_DELTA_NEWSTK, KEXCEPTION_STACK_FRAME, SlackSpace);
+ genVal(STACK_DELTA, sizeof(KEXCEPTION_STACK_FRAME));
+
+ EnableInc (HALPPC);
+
+ //
+ // Processor State Frame offsets relative to base
+ //
+
+ genCom("Processor State Frame Offset Definitions");
+
+ genDef(Ps, KPROCESSOR_STATE, ContextFrame);
+ genDef(Ps, KPROCESSOR_STATE, SpecialRegisters);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr0);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr1);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr2);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr3);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr4);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr5);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr6);
+ genDef(Sr, KSPECIAL_REGISTERS, KernelDr7);
+ genDef(Sr, KSPECIAL_REGISTERS, Sprg0);
+ genDef(Sr, KSPECIAL_REGISTERS, Sprg1);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr0);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr1);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr2);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr3);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr4);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr5);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr6);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr7);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr8);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr9);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr10);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr11);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr12);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr13);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr14);
+ genDef(Sr, KSPECIAL_REGISTERS, Sr15);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT0L);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT0U);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT1L);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT1U);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT2L);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT2U);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT3L);
+ genDef(Sr, KSPECIAL_REGISTERS, DBAT3U);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT0L);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT0U);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT1L);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT1U);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT2L);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT2U);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT3L);
+ genDef(Sr, KSPECIAL_REGISTERS, IBAT3U);
+ genDef(Sr, KSPECIAL_REGISTERS, Sdr1);
+
+ genVal(ProcessorStateLength, ((sizeof(KPROCESSOR_STATE) + 15) & ~15));
+
+ //
+ // Loader Parameter Block offset definitions.
+ //
+
+ genCom("Loader Parameter Block Offset Definitions");
+
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, LoadOrderListHead);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, MemoryDescriptorListHead);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, KernelStack);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, Prcb);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, Process);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, Thread);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, RegistryLength);
+ genDef(Lpb, LOADER_PARAMETER_BLOCK, RegistryBase);
+ genAlt(LpbInterruptStack, LOADER_PARAMETER_BLOCK, u.Ppc.InterruptStack);
+ genAlt(LpbFirstLevelDcacheSize, LOADER_PARAMETER_BLOCK, u.Ppc.FirstLevelDcacheSize);
+ genAlt(LpbFirstLevelDcacheFillSize, LOADER_PARAMETER_BLOCK, u.Ppc.FirstLevelDcacheFillSize);
+ genAlt(LpbFirstLevelIcacheSize, LOADER_PARAMETER_BLOCK, u.Ppc.FirstLevelIcacheSize);
+ genAlt(LpbFirstLevelIcacheFillSize, LOADER_PARAMETER_BLOCK, u.Ppc.FirstLevelIcacheFillSize);
+ genAlt(LpbHashedPageTable, LOADER_PARAMETER_BLOCK, u.Ppc.HashedPageTable);
+ genAlt(LpbPanicStack, LOADER_PARAMETER_BLOCK, u.Ppc.PanicStack);
+ genAlt(LpbPcrPage, LOADER_PARAMETER_BLOCK, u.Ppc.PcrPage);
+ genAlt(LpbPdrPage, LOADER_PARAMETER_BLOCK, u.Ppc.PdrPage);
+ genAlt(LpbSecondLevelDcacheSize, LOADER_PARAMETER_BLOCK, u.Ppc.SecondLevelDcacheSize);
+ genAlt(LpbSecondLevelDcacheFillSize, LOADER_PARAMETER_BLOCK, u.Ppc.SecondLevelDcacheFillSize);
+ genAlt(LpbSecondLevelIcacheSize, LOADER_PARAMETER_BLOCK, u.Ppc.SecondLevelIcacheSize);
+ genAlt(LpbSecondLevelIcacheFillSize, LOADER_PARAMETER_BLOCK, u.Ppc.SecondLevelIcacheFillSize);
+ genAlt(LpbPcrPage2, LOADER_PARAMETER_BLOCK, u.Ppc.PcrPage2);
+ genAlt(LpbIcacheMode, LOADER_PARAMETER_BLOCK, u.Ppc.IcacheMode);
+ genAlt(LpbDcacheMode, LOADER_PARAMETER_BLOCK, u.Ppc.DcacheMode);
+ genAlt(LpbNumberCongruenceClasses, LOADER_PARAMETER_BLOCK, u.Ppc.NumberCongruenceClasses);
+ genAlt(LpbKseg0Top, LOADER_PARAMETER_BLOCK, u.Ppc.Kseg0Top);
+ genAlt(LpbHashedPageTableSize, LOADER_PARAMETER_BLOCK, u.Ppc.HashedPageTableSize);
+ genAlt(LpbKernelKseg0PagesDescriptor, LOADER_PARAMETER_BLOCK, u.Ppc.KernelKseg0PagesDescriptor);
+ genAlt(LpbMinimumBlockLength, LOADER_PARAMETER_BLOCK, u.Ppc.MinimumBlockLength);
+ genAlt(LpbMaximumBlockLength, LOADER_PARAMETER_BLOCK, u.Ppc.MaximumBlockLength);
+
+ //
+ // Memory Allocation Descriptor offset definitions.
+ //
+
+ genCom("Memory Allocation Descriptor Offset Definitions");
+
+ genDef(Mad, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
+ genDef(Mad, MEMORY_ALLOCATION_DESCRIPTOR, MemoryType);
+ genDef(Mad, MEMORY_ALLOCATION_DESCRIPTOR, BasePage);
+ genDef(Mad, MEMORY_ALLOCATION_DESCRIPTOR, PageCount);
+
+ DisableInc (HALPPC);
+
+ //
+ // Address space layout definitions
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Address Space Layout Definitions");
+
+ genVal(KUSEG_BASE, KUSEG_BASE);
+ genVal(KSEG0_BASE, KSEG0_BASE);
+ dumpf("#define KSEG1_BASE PCR->Kseg0Top\n");
+ dumpf("#define KSEG2_BASE KSEG1_BASE\n");
+
+ DisableInc (HALPPC);
+
+ genVal(SYSTEM_BASE, SYSTEM_BASE);
+ genVal(PDE_BASE, PDE_BASE);
+ genVal(PTE_BASE, PTE_BASE);
+
+ //
+ // Page table and page directory entry definitions
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Page Table and Directory Entry Definitions");
+
+ genVal(PAGE_SIZE, PAGE_SIZE);
+ genVal(PAGE_SHIFT, PAGE_SHIFT);
+ genVal(PDI_SHIFT, PDI_SHIFT);
+ genVal(PTI_SHIFT, PTI_SHIFT);
+
+ DisableInc (HALPPC);
+
+ //
+ // Breakpoint instruction definitions
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Breakpoint Definitions");
+
+ genVal(USER_BREAKPOINT, USER_BREAKPOINT);
+ genVal(KERNEL_BREAKPOINT, KERNEL_BREAKPOINT);
+ genVal(BREAKIN_BREAKPOINT, BREAKIN_BREAKPOINT);
+
+ DisableInc (HALPPC);
+
+ genVal(BRANCH_TAKEN_BREAKPOINT, BRANCH_TAKEN_BREAKPOINT);
+ genVal(BRANCH_NOT_TAKEN_BREAKPOINT, BRANCH_NOT_TAKEN_BREAKPOINT);
+ genVal(SINGLE_STEP_BREAKPOINT, SINGLE_STEP_BREAKPOINT);
+ genVal(DIVIDE_OVERFLOW_BREAKPOINT, DIVIDE_OVERFLOW_BREAKPOINT);
+ genVal(DIVIDE_BY_ZERO_BREAKPOINT, DIVIDE_BY_ZERO_BREAKPOINT);
+ genVal(RANGE_CHECK_BREAKPOINT, RANGE_CHECK_BREAKPOINT);
+ genVal(STACK_OVERFLOW_BREAKPOINT, STACK_OVERFLOW_BREAKPOINT);
+ genVal(MULTIPLY_OVERFLOW_BREAKPOINT, MULTIPLY_OVERFLOW_BREAKPOINT);
+ genVal(DEBUG_PRINT_BREAKPOINT, DEBUG_PRINT_BREAKPOINT);
+ genVal(DEBUG_PROMPT_BREAKPOINT, DEBUG_PROMPT_BREAKPOINT);
+ genVal(DEBUG_STOP_BREAKPOINT, DEBUG_STOP_BREAKPOINT);
+ genVal(DEBUG_LOAD_SYMBOLS_BREAKPOINT, DEBUG_LOAD_SYMBOLS_BREAKPOINT);
+ genVal(DEBUG_UNLOAD_SYMBOLS_BREAKPOINT, DEBUG_UNLOAD_SYMBOLS_BREAKPOINT);
+
+ //
+ // Miscellaneous definitions
+ //
+
+ EnableInc (HALPPC);
+
+ genCom("Miscellaneous Definitions");
+
+ genVal(Executive, Executive);
+ genVal(KernelMode, KernelMode);
+ genVal(FALSE, FALSE);
+ genVal(TRUE, TRUE);
+ genVal(UNCACHED_POLICY, UNCACHED_POLICY);
+ genVal(KiPcr, KIPCR);
+ genVal(KiPcr2, KIPCR2);
+
+ DisableInc (HALPPC);
+
+ genVal(BASE_PRIORITY_THRESHOLD, BASE_PRIORITY_THRESHOLD);
+ genVal(EVENT_PAIR_INCREMENT, EVENT_PAIR_INCREMENT);
+ genVal(LOW_REALTIME_PRIORITY, LOW_REALTIME_PRIORITY);
+ genVal(KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
+ genVal(KERNEL_LARGE_STACK_COMMIT, KERNEL_LARGE_STACK_COMMIT);
+ genVal(MM_USER_PROBE_ADDRESS, MM_USER_PROBE_ADDRESS);
+ genVal(ROUND_TO_NEAREST, ROUND_TO_NEAREST);
+ genVal(ROUND_TO_ZERO, ROUND_TO_ZERO);
+ genVal(ROUND_TO_PLUS_INFINITY, ROUND_TO_PLUS_INFINITY);
+ genVal(ROUND_TO_MINUS_INFINITY, ROUND_TO_MINUS_INFINITY);
+ genVal(CLOCK_QUANTUM_DECREMENT, CLOCK_QUANTUM_DECREMENT);
+ genVal(READY_SKIP_QUANTUM, READY_SKIP_QUANTUM);
+ genVal(THREAD_QUANTUM, THREAD_QUANTUM);
+ genVal(WAIT_QUANTUM_DECREMENT, WAIT_QUANTUM_DECREMENT);
+ genVal(ROUND_TRIP_DECREMENT_COUNT, ROUND_TRIP_DECREMENT_COUNT);
+
+ //
+ // Close header file.
+ //
+
+ fprintf(stderr, " Finished\n");
+ return;
+}
+
+VOID
+dumpf (const char *format, ...)
+{
+ va_list(arglist);
+
+ va_start(arglist, format);
+
+ if (OutputEnabled & KSPPC) {
+ vfprintf (KsPpc, format, arglist);
+ }
+
+ if (OutputEnabled & HALPPC) {
+ vfprintf (HalPpc, format, arglist);
+ }
+
+ va_end(arglist);
+}
diff --git a/private/ntos/ke/ppc/getsetrg.c b/private/ntos/ke/ppc/getsetrg.c
new file mode 100644
index 000000000..f3d08f49f
--- /dev/null
+++ b/private/ntos/ke/ppc/getsetrg.c
@@ -0,0 +1,516 @@
+/*++
+
+Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+
+Module Name:
+
+ getsetrg.c
+
+Abstract:
+
+ This module implement the code necessary to get and set register values.
+ These routines are used during the emulation of unaligned data references
+ and floating point exceptions.
+
+Author:
+
+ Rick Simpson 6-Aug-1993
+
+ Based on MIPS version by David N. Cutler (davec) 17-Jun-1991
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+ULONG
+KiGetRegisterValue (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the value of a register from the specified
+ exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Only GPRs (integer regs) are supported, numbered 0..31.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the GP register number.
+ //
+
+ switch (Register) {
+ case 0:
+ return TrapFrame->Gpr0;
+ case 1:
+ return TrapFrame->Gpr1;
+ case 2:
+ return TrapFrame->Gpr2;
+ case 3:
+ return TrapFrame->Gpr3;
+ case 4:
+ return TrapFrame->Gpr4;
+ case 5:
+ return TrapFrame->Gpr5;
+ case 6:
+ return TrapFrame->Gpr6;
+ case 7:
+ return TrapFrame->Gpr7;
+ case 8:
+ return TrapFrame->Gpr8;
+ case 9:
+ return TrapFrame->Gpr9;
+ case 10:
+ return TrapFrame->Gpr10;
+ case 11:
+ return TrapFrame->Gpr11;
+ case 12:
+ return TrapFrame->Gpr12;
+ case 13:
+ return ExceptionFrame->Gpr13;
+ case 14:
+ return ExceptionFrame->Gpr14;
+ case 15:
+ return ExceptionFrame->Gpr15;
+ case 16:
+ return ExceptionFrame->Gpr16;
+ case 17:
+ return ExceptionFrame->Gpr17;
+ case 18:
+ return ExceptionFrame->Gpr18;
+ case 19:
+ return ExceptionFrame->Gpr19;
+ case 20:
+ return ExceptionFrame->Gpr20;
+ case 21:
+ return ExceptionFrame->Gpr21;
+ case 22:
+ return ExceptionFrame->Gpr22;
+ case 23:
+ return ExceptionFrame->Gpr23;
+ case 24:
+ return ExceptionFrame->Gpr24;
+ case 25:
+ return ExceptionFrame->Gpr25;
+ case 26:
+ return ExceptionFrame->Gpr26;
+ case 27:
+ return ExceptionFrame->Gpr27;
+ case 28:
+ return ExceptionFrame->Gpr28;
+ case 29:
+ return ExceptionFrame->Gpr29;
+ case 30:
+ return ExceptionFrame->Gpr30;
+ case 31:
+ return ExceptionFrame->Gpr31;
+ }
+ return(0); // to eliminate a compiler warning
+}
+
+VOID
+KiSetRegisterValue (
+ IN ULONG Register,
+ IN ULONG Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the value of a register in the specified
+ exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. This routine handles only GPRs (integer regs), numbered 0..31.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the GP register number.
+ //
+
+ switch (Register) {
+
+ case 0:
+ TrapFrame->Gpr0 = Value;
+ return;
+ case 1:
+ TrapFrame->Gpr1 = Value;
+ return;
+ case 2:
+ TrapFrame->Gpr2 = Value;
+ return;
+ case 3:
+ TrapFrame->Gpr3 = Value;
+ return;
+ case 4:
+ TrapFrame->Gpr4 = Value;
+ return;
+ case 5:
+ TrapFrame->Gpr5 = Value;
+ return;
+ case 6:
+ TrapFrame->Gpr6 = Value;
+ return;
+ case 7:
+ TrapFrame->Gpr7 = Value;
+ return;
+ case 8:
+ TrapFrame->Gpr8 = Value;
+ return;
+ case 9:
+ TrapFrame->Gpr9 = Value;
+ return;
+ case 10:
+ TrapFrame->Gpr10 = Value;
+ return;
+ case 11:
+ TrapFrame->Gpr11 = Value;
+ return;
+ case 12:
+ TrapFrame->Gpr12 = Value;
+ return;
+ case 13:
+ ExceptionFrame->Gpr13 = Value;
+ return;
+ case 14:
+ ExceptionFrame->Gpr14 = Value;
+ return;
+ case 15:
+ ExceptionFrame->Gpr15 = Value;
+ return;
+ case 16:
+ ExceptionFrame->Gpr16 = Value;
+ return;
+ case 17:
+ ExceptionFrame->Gpr17 = Value;
+ return;
+ case 18:
+ ExceptionFrame->Gpr18 = Value;
+ return;
+ case 19:
+ ExceptionFrame->Gpr19 = Value;
+ return;
+ case 20:
+ ExceptionFrame->Gpr20 = Value;
+ return;
+ case 21:
+ ExceptionFrame->Gpr21 = Value;
+ return;
+ case 22:
+ ExceptionFrame->Gpr22 = Value;
+ return;
+ case 23:
+ ExceptionFrame->Gpr23 = Value;
+ return;
+ case 24:
+ ExceptionFrame->Gpr24 = Value;
+ return;
+ case 25:
+ ExceptionFrame->Gpr25 = Value;
+ return;
+ case 26:
+ ExceptionFrame->Gpr26 = Value;
+ return;
+ case 27:
+ ExceptionFrame->Gpr27 = Value;
+ return;
+ case 28:
+ ExceptionFrame->Gpr28 = Value;
+ return;
+ case 29:
+ ExceptionFrame->Gpr29 = Value;
+ return;
+ case 30:
+ ExceptionFrame->Gpr30 = Value;
+ return;
+ case 31:
+ ExceptionFrame->Gpr31 = Value;
+ return;
+
+ }
+}
+
+DOUBLE
+KiGetFloatRegisterValue (
+ IN ULONG Register,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to get the value of a floating point register
+ from the specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ returned. Only FPRs (float regs) are supported, numbered 0..31.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ The value of the specified register is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the FP register number.
+ //
+
+ switch (Register) {
+ case 0:
+ return TrapFrame->Fpr0;
+ case 1:
+ return TrapFrame->Fpr1;
+ case 2:
+ return TrapFrame->Fpr2;
+ case 3:
+ return TrapFrame->Fpr3;
+ case 4:
+ return TrapFrame->Fpr4;
+ case 5:
+ return TrapFrame->Fpr5;
+ case 6:
+ return TrapFrame->Fpr6;
+ case 7:
+ return TrapFrame->Fpr7;
+ case 8:
+ return TrapFrame->Fpr8;
+ case 9:
+ return TrapFrame->Fpr9;
+ case 10:
+ return TrapFrame->Fpr10;
+ case 11:
+ return TrapFrame->Fpr11;
+ case 12:
+ return TrapFrame->Fpr12;
+ case 13:
+ return TrapFrame->Fpr13;
+ case 14:
+ return ExceptionFrame->Fpr14;
+ case 15:
+ return ExceptionFrame->Fpr15;
+ case 16:
+ return ExceptionFrame->Fpr16;
+ case 17:
+ return ExceptionFrame->Fpr17;
+ case 18:
+ return ExceptionFrame->Fpr18;
+ case 19:
+ return ExceptionFrame->Fpr19;
+ case 20:
+ return ExceptionFrame->Fpr20;
+ case 21:
+ return ExceptionFrame->Fpr21;
+ case 22:
+ return ExceptionFrame->Fpr22;
+ case 23:
+ return ExceptionFrame->Fpr23;
+ case 24:
+ return ExceptionFrame->Fpr24;
+ case 25:
+ return ExceptionFrame->Fpr25;
+ case 26:
+ return ExceptionFrame->Fpr26;
+ case 27:
+ return ExceptionFrame->Fpr27;
+ case 28:
+ return ExceptionFrame->Fpr28;
+ case 29:
+ return ExceptionFrame->Fpr29;
+ case 30:
+ return ExceptionFrame->Fpr30;
+ case 31:
+ return ExceptionFrame->Fpr31;
+ }
+}
+
+VOID
+KiSetFloatRegisterValue (
+ IN ULONG Register,
+ IN DOUBLE Value,
+ OUT PKEXCEPTION_FRAME ExceptionFrame,
+ OUT PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to set the value of a floating point register
+ in the specified exception or trap frame.
+
+Arguments:
+
+ Register - Supplies the number of the register whose value is to be
+ stored. This routine handles only Fprs (float regs), numbered 0..31.
+
+ Value - Supplies the value to be stored in the specified register.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Dispatch on the FP register number.
+ //
+
+ switch (Register) {
+
+ case 0:
+ TrapFrame->Fpr0 = Value;
+ return;
+ case 1:
+ TrapFrame->Fpr1 = Value;
+ return;
+ case 2:
+ TrapFrame->Fpr2 = Value;
+ return;
+ case 3:
+ TrapFrame->Fpr3 = Value;
+ return;
+ case 4:
+ TrapFrame->Fpr4 = Value;
+ return;
+ case 5:
+ TrapFrame->Fpr5 = Value;
+ return;
+ case 6:
+ TrapFrame->Fpr6 = Value;
+ return;
+ case 7:
+ TrapFrame->Fpr7 = Value;
+ return;
+ case 8:
+ TrapFrame->Fpr8 = Value;
+ return;
+ case 9:
+ TrapFrame->Fpr9 = Value;
+ return;
+ case 10:
+ TrapFrame->Fpr10 = Value;
+ return;
+ case 11:
+ TrapFrame->Fpr11 = Value;
+ return;
+ case 12:
+ TrapFrame->Fpr12 = Value;
+ return;
+ case 13:
+ TrapFrame->Fpr13 = Value;
+ return;
+ case 14:
+ ExceptionFrame->Fpr14 = Value;
+ return;
+ case 15:
+ ExceptionFrame->Fpr15 = Value;
+ return;
+ case 16:
+ ExceptionFrame->Fpr16 = Value;
+ return;
+ case 17:
+ ExceptionFrame->Fpr17 = Value;
+ return;
+ case 18:
+ ExceptionFrame->Fpr18 = Value;
+ return;
+ case 19:
+ ExceptionFrame->Fpr19 = Value;
+ return;
+ case 20:
+ ExceptionFrame->Fpr20 = Value;
+ return;
+ case 21:
+ ExceptionFrame->Fpr21 = Value;
+ return;
+ case 22:
+ ExceptionFrame->Fpr22 = Value;
+ return;
+ case 23:
+ ExceptionFrame->Fpr23 = Value;
+ return;
+ case 24:
+ ExceptionFrame->Fpr24 = Value;
+ return;
+ case 25:
+ ExceptionFrame->Fpr25 = Value;
+ return;
+ case 26:
+ ExceptionFrame->Fpr26 = Value;
+ return;
+ case 27:
+ ExceptionFrame->Fpr27 = Value;
+ return;
+ case 28:
+ ExceptionFrame->Fpr28 = Value;
+ return;
+ case 29:
+ ExceptionFrame->Fpr29 = Value;
+ return;
+ case 30:
+ ExceptionFrame->Fpr30 = Value;
+ return;
+ case 31:
+ ExceptionFrame->Fpr31 = Value;
+ return;
+
+ }
+}
diff --git a/private/ntos/ke/ppc/initkr.c b/private/ntos/ke/ppc/initkr.c
new file mode 100644
index 000000000..365402448
--- /dev/null
+++ b/private/ntos/ke/ppc/initkr.c
@@ -0,0 +1,742 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ initkr.c
+
+Abstract:
+
+ This module contains the code to initialize the kernel data structures
+ and to initialize the idle thread, its process, and the processor control
+ block.
+
+Author:
+
+ David N. Cutler (davec) 11-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Put all code for kernel initialization in the INIT section. It will be
+// deallocated by memory management when phase 1 initialization is completed.
+//
+
+#if defined(ALLOC_PRAGMA)
+
+#pragma alloc_text(INIT, KiInitializeKernel)
+
+#endif
+
+#if !defined(NT_UP)
+VOID
+KiPhase0SyncIoMap (
+ VOID
+ );
+#endif
+
+VOID
+KiSetDbatInvalid(
+ IN ULONG Number
+ );
+
+VOID
+KiSetDbat(
+ IN ULONG Number,
+ IN ULONG PhysicalAddress,
+ IN ULONG VirtualAddress,
+ IN ULONG Length,
+ IN ULONG Coherence
+ );
+
+ULONG
+KiInitExceptionFilter(
+ IN PEXCEPTION_POINTERS ExceptionPointers
+ )
+{
+#if DBG
+ DbgPrint("KE: Phase0 Exception Pointers = %x\n",ExceptionPointers);
+ DbgPrint("Code %x Addr %lx Info0 %x Info1 %x Info2 %x Info3 %x\n",
+ ExceptionPointers->ExceptionRecord->ExceptionCode,
+ (ULONG)ExceptionPointers->ExceptionRecord->ExceptionAddress,
+ ExceptionPointers->ExceptionRecord->ExceptionInformation[0],
+ ExceptionPointers->ExceptionRecord->ExceptionInformation[1],
+ ExceptionPointers->ExceptionRecord->ExceptionInformation[2],
+ ExceptionPointers->ExceptionRecord->ExceptionInformation[3]
+ );
+ DbgBreakPoint();
+#endif
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+VOID
+KiInitializeKernel (
+ IN PKPROCESS Process,
+ IN PKTHREAD Thread,
+ IN PVOID IdleStack,
+ IN PKPRCB Prcb,
+ IN CCHAR Number,
+ IN PLOADER_PARAMETER_BLOCK LoaderBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function gains control after the system has been bootstrapped and
+ before the system has been initialized. Its function is to initialize
+ the kernel data structures, initialize the idle thread and process objects,
+ initialize the processor control block, call the executive initialization
+ routine, and then return to the system startup routine. This routine is
+ also called to initialize the processor specific structures when a new
+ processor is brought on line.
+
+Arguments:
+
+ Process - Supplies a pointer to a control object of type process for
+ the specified processor.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread for
+ the specified processor.
+
+ IdleStack - Supplies a pointer the base of the real kernel stack for
+ idle thread on the specified processor.
+
+ Prcb - Supplies a pointer to a processor control block for the specified
+ processor.
+
+ Number - Supplies the number of the processor that is being
+ initialized.
+
+ LoaderBlock - Supplies a pointer to the loader parameter block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ LONG Index;
+ KIRQL OldIrql;
+ PRESTART_BLOCK RestartBlock;
+ KPCR *Pcr;
+
+ //
+ // Before the hashed page table is set up, the PCR must be referred
+ // to by its SPRG.1 address, not by its 0xffffd000 address.
+ //
+
+ Pcr = (PKPCR)PCRsprg1;
+
+#if !defined(NT_UP)
+
+ if (Number != 0) {
+ KiPhase0SyncIoMap();
+ }
+
+#endif
+
+ //
+ // Set processor version and revision in PCR
+ //
+
+ Pcr->ProcessorRevision = KeGetPvr() & 0xFFFF;
+ Pcr->ProcessorVersion = (KeGetPvr() >> 16);
+
+ //
+ // Set global processor architecture, level and revision. The
+ // latter two are the least common denominator on an MP system.
+ //
+
+ KeProcessorArchitecture = PROCESSOR_ARCHITECTURE_PPC;
+ KeFeatureBits = 0;
+ if (KeProcessorLevel == 0 ||
+ KeProcessorLevel > (USHORT)Pcr->ProcessorVersion
+ ) {
+ KeProcessorLevel = (USHORT)Pcr->ProcessorVersion;
+ }
+ if (KeProcessorRevision == 0 ||
+ KeProcessorRevision > (USHORT)Pcr->ProcessorRevision
+ ) {
+ KeProcessorRevision = (USHORT)Pcr->ProcessorRevision;
+ }
+
+ //
+ // Perform platform dependent processor initialization.
+ //
+
+ HalInitializeProcessor(Number);
+ HalSweepIcache();
+
+ //
+ // Save the address of the loader parameter block.
+ //
+
+ KeLoaderBlock = LoaderBlock;
+
+ //
+ // Initialize the processor block.
+ //
+
+ Prcb->MinorVersion = PRCB_MINOR_VERSION;
+ Prcb->MajorVersion = PRCB_MAJOR_VERSION;
+ Prcb->BuildType = 0;
+
+#if DBG
+
+ Prcb->BuildType |= PRCB_BUILD_DEBUG;
+
+#endif
+
+#if defined(NT_UP)
+
+ Prcb->BuildType |= PRCB_BUILD_UNIPROCESSOR;
+
+#endif
+
+ Prcb->CurrentThread = Thread;
+ Prcb->NextThread = (PKTHREAD)NULL;
+ Prcb->IdleThread = Thread;
+ Prcb->SetMember = 1 << Number;
+ Prcb->PcrPage = LoaderBlock->u.Ppc.PcrPage;
+ Prcb->ProcessorState.SpecialRegisters.Sprg0 =
+ LoaderBlock->u.Ppc.PcrPage << PAGE_SHIFT;
+ Prcb->ProcessorState.SpecialRegisters.Sprg1 = (ULONG)Pcr;
+
+#if !defined(NT_UP)
+
+ Prcb->TargetSet = 0;
+ Prcb->WorkerRoutine = NULL;
+ Prcb->RequestSummary = 0;
+ Prcb->IpiFrozen = 0;
+
+#if NT_INST
+
+ Prcb->IpiCounts = &KiIpiCounts[Number];
+
+#endif
+
+#endif
+
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+
+ //
+ // Initialize DPC listhead and lock.
+ //
+
+ InitializeListHead(&Prcb->DpcListHead);
+ KeInitializeSpinLock(&Prcb->DpcLock);
+
+ //
+ // Set address of processor block.
+ //
+
+ KiProcessorBlock[Number] = Prcb;
+
+ //
+ // Initialize the idle thread initial kernel stack value.
+ //
+
+ Pcr->InitialStack = IdleStack;
+ Pcr->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+
+ //
+ // Initialize all interrupt vectors to transfer control to the unexpected
+ // interrupt routine.
+ //
+ // N.B. This interrupt object is never actually "connected" to an interrupt
+ // vector via KeConnectInterrupt. It is initialized and then connected
+ // by simply storing the address of the dispatch code in the interrupt
+ // vector.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initial the address of the interrupt dispatch routine.
+ //
+
+ KxUnexpectedInterrupt.DispatchAddress = KiUnexpectedInterrupt;
+
+ //
+ // Copy the interrupt dispatch function descriptor into the interrupt
+ // object.
+ //
+
+ KxUnexpectedInterrupt.DispatchCode[0] =
+ *(PULONG)(KxUnexpectedInterrupt.DispatchAddress);
+ KxUnexpectedInterrupt.DispatchCode[1] =
+ *(((PULONG)(KxUnexpectedInterrupt.DispatchAddress))+1);
+
+ //
+ // Initialize the context swap spinlock.
+ //
+
+ KeInitializeSpinLock(&KiContextSwapLock);
+
+ //
+ // Set the default DMA I/O coherency attributes. PowerPC
+ // architecture dictates that the D-Cache is fully coherent
+ // but the I-Cache doesn't snoop.
+ //
+
+ KiDmaIoCoherency = DMA_READ_DCACHE_INVALIDATE | DMA_WRITE_DCACHE_SNOOP;
+ }
+
+ for (Index = 0; Index < MAXIMUM_VECTOR; Index += 1) {
+ Pcr->InterruptRoutine[Index] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+
+ //
+ // Initialize the profile count and interval.
+ //
+
+ Pcr->ProfileCount = 0;
+ Pcr->ProfileInterval = 0x200000;
+
+ //
+ // Initialize the passive release, APC, and DPC interrupt vectors.
+ //
+
+ Pcr->InterruptRoutine[0] = KiUnexpectedInterrupt;
+// Pcr->InterruptRoutine[APC_LEVEL] = KiApcInterrupt;
+// Pcr->InterruptRoutine[DISPATCH_LEVEL] = KiDispatchInterrupt;
+
+ // on PowerPC APC and Dispatch Level interrupts are handled explicitly
+ // so handle calls thru the dispatch table as no-ops.
+
+ Pcr->InterruptRoutine[APC_LEVEL] = KiUnexpectedInterrupt;
+ Pcr->InterruptRoutine[DISPATCH_LEVEL] = KiUnexpectedInterrupt;
+
+ Pcr->ReservedVectors = (1 << PASSIVE_LEVEL) |
+ (1 << APC_LEVEL) |
+ (1 << DISPATCH_LEVEL);
+
+ //
+ // Initialize the set member for the current processor, set IRQL to
+ // APC_LEVEL, and set the processor number.
+ //
+
+ Pcr->CurrentIrql = APC_LEVEL;
+ Pcr->SetMember = 1 << Number;
+ Pcr->NotMember = ~Pcr->SetMember;
+ Pcr->Number = Number;
+
+ //
+ // Set the initial stall execution scale factor. This value will be
+ // recomputed later by the HAL.
+ //
+
+ Pcr->StallScaleFactor = 50;
+
+ //
+ // Set address of process object in thread object.
+ //
+
+ Thread->ApcState.Process = Process;
+
+ //
+ // Set the appropriate member in the active processors set.
+ //
+
+ SetMember(Number, KeActiveProcessors);
+
+ //
+ // Set the number of processors based on the maximum of the current
+ // number of processors and the current processor number.
+ //
+
+ if ((Number + 1) > KeNumberProcessors) {
+ KeNumberProcessors = Number + 1;
+ }
+
+ //
+ // If the initial processor is being initialized, then initialize the
+ // per system data structures.
+ //
+
+ if (Number == 0) {
+
+ //
+ // Initialize the kernel debugger.
+ //
+
+ if (KdInitSystem(LoaderBlock, FALSE) == FALSE) {
+ KeBugCheck(PHASE0_INITIALIZATION_FAILED);
+ }
+
+ //
+ // Sweep both the D and the I caches.
+ //
+
+ HalSweepDcache();
+ HalSweepIcache();
+
+ //
+ // Ensure there are NO stale entries in the TLB by
+ // flushing the HPT/TLB even though the HPT is fresh.
+ //
+
+ KeFlushCurrentTb();
+
+#if DBG
+ if ((PCR->IcacheMode) || (PCR->DcacheMode))
+ {
+ DbgPrint("****** Dynamic Cache Mode Kernel Invocation\n");
+ if (PCR->IcacheMode)
+ DbgPrint("****** Icache is OFF\n");
+ if (PCR->DcacheMode)
+ DbgPrint("****** Dcache is OFF\n");
+ }
+#endif
+
+ //
+ // Initialize the address of the restart block for the boot master.
+ //
+
+ Prcb->RestartBlock = SYSTEM_BLOCK->RestartBlock;
+
+ //
+ // Initialize processor block array.
+ //
+
+ for (Index = 1; Index < MAXIMUM_PROCESSORS; Index += 1) {
+ KiProcessorBlock[Index] = (PKPRCB)NULL;
+ }
+
+ //
+ // Perform architecture independent initialization.
+ //
+
+ KiInitSystem();
+
+ //
+ // Initialize idle thread process object and then set:
+ //
+ // 1. all the quantum values to the maximum possible.
+ // 2. the process in the balance set.
+ // 3. the active processor mask to the specified processor.
+ //
+
+ KeInitializeProcess(Process,
+ (KPRIORITY)0,
+ (KAFFINITY)(0xffffffff),
+ (PULONG)(PDE_BASE + ((PDE_BASE >> PDI_SHIFT - 2) & 0xffc)),
+ FALSE);
+
+ Process->ThreadQuantum = MAXCHAR;
+
+ }
+
+ //
+ // Initialize idle thread object and then set:
+ //
+ // 1. the initial kernel stack to the specified idle stack.
+ // 2. the next processor number to the specified processor.
+ // 3. the thread priority to the highest possible value.
+ // 4. the state of the thread to running.
+ // 5. the thread affinity to the specified processor.
+ // 6. the specified processor member in the process active processors
+ // set.
+ //
+
+ KeInitializeThread(Thread, (PVOID)((ULONG)IdleStack - PAGE_SIZE),
+ (PKSYSTEM_ROUTINE)KeBugCheck,
+ (PKSTART_ROUTINE)NULL,
+ (PVOID)NULL, (PCONTEXT)NULL, (PVOID)NULL, Process);
+
+ Thread->InitialStack = IdleStack;
+ Thread->StackBase = IdleStack;
+ Thread->StackLimit = (PVOID)((ULONG)IdleStack - KERNEL_STACK_SIZE);
+ Thread->NextProcessor = Number;
+ Thread->Priority = HIGH_PRIORITY;
+ Thread->State = Running;
+ Thread->Affinity = (KAFFINITY)(1 << Number);
+ Thread->WaitIrql = DISPATCH_LEVEL;
+
+ //
+ // If the current processor is 0, then set the appropriate bit in the
+ // active summary of the idle process.
+ //
+
+ if (Number == 0) {
+ SetMember(Number, Process->ActiveProcessors);
+ }
+
+ //
+ // Execute the executive initialization.
+ //
+
+ try {
+ ExpInitializeExecutive(Number, LoaderBlock);
+
+ } except (KiInitExceptionFilter(GetExceptionInformation())) {
+ KeBugCheck (PHASE0_EXCEPTION);
+ }
+
+ //
+ // If the initial processor is being initialized, then compute the
+ // timer table reciprocal value and reset the PRCB values for the
+ // controllable DPC behavior in order to reflect any registry
+ // overrides.
+ //
+
+ if (Number == 0) {
+ KiTimeIncrementReciprocal = KiComputeReciprocal((LONG)KeMaximumIncrement,
+ &KiTimeIncrementShiftCount);
+ Prcb->MaximumDpcQueueDepth = KiMaximumDpcQueueDepth;
+ Prcb->MinimumDpcRate = KiMinimumDpcRate;
+ Prcb->AdjustDpcThreshold = KiAdjustDpcThreshold;
+ }
+
+ //
+ // Raise IRQL to dispatch level and set the priority of the idle thread
+ // to zero. This will have the effect of immediately causing the phase
+ // one initialization thread to get scheduled for execution. The idle
+ // thread priority is then set to the lowest realtime priority. This is
+ // necessary so that mutexes aquired at DPC level do not cause the active
+ // matrix to get corrupted.
+ //
+
+ KeRaiseIrqlToDpcLevel(&OldIrql);
+ KeSetPriorityThread(Thread, (KPRIORITY)0);
+ Thread->Priority = LOW_REALTIME_PRIORITY;
+
+ //
+ // Raise IRQL to the highest level.
+ //
+
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+
+ //
+ // If a restart block exists for the current process, then set boot
+ // completed.
+ //
+ // N.B. Firmware on uniprocessor machines configured for MP operation
+ // can have a restart block address of NULL.
+ //
+
+#if !defined(NT_UP)
+
+ RestartBlock = Prcb->RestartBlock;
+ if (RestartBlock != NULL) {
+ RestartBlock->BootStatus.BootFinished = 1;
+ }
+
+ //
+ // If the current processor is not 0, then set the appropriate bit in
+ // idle summary.
+ //
+
+ if (Number != 0) {
+ SetMember(Number, KiIdleSummary);
+ }
+
+#endif
+
+ return;
+}
+
+#define NumBats 3
+#define RoundDownTo8MB(x) ((x) & ~(0x7fffff))
+#define VirtBase 0xb0000000
+#define EightMeg(x) ((x) << 23)
+
+
+static struct {
+ ULONG PhysBase;
+ ULONG RefCount;
+} AllocatedBats[NumBats];
+
+PVOID
+KePhase0MapIo (
+ IN PVOID PhysicalAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ Assign a Virtual to Physical address translation using PowerPC
+ Block Address Translation registers for use by the HAL prior to
+ being able to achieve the same thing using MmMapIo.
+
+ Up to three BATs can be used for this function. This routine is
+ extremely simplistic. It will attempt to assign the required
+ address range within an existing BAT register if possible. In
+ deference to the 601, the maximum range covered by a BAT is
+ limited to 8MB. On processors that support seperate Instruction
+ and Data BATs, only the Data BAT will be set. Caching in the
+ region is disabled.
+
+ In the first pass at this, a full 8MB will be allocated.
+
+ The HAL should call KePhase0DeleteIoMap with the same parameters
+ to release the BAT when it is able to aquire the same physical
+ memory using MM.
+
+ WARNING: This code is NOT applicable for an MP solution. Further
+ study of this problem is required, specifically, a way of ensuring
+ a change to BATs on one processor is reflected on other processors.
+
+
+Arguments:
+
+ PhysicalAddress - Address to which the HAL needs access.
+
+ Length - Number of bytes.
+
+Return Value:
+
+ Virtual address corresponding to the requested physical address or 0
+ if unable to allocate.
+
+--*/
+
+{
+ ULONG Base, Offset;
+ LONG i, FreeBat = -1;
+
+ // The maximum allocation we allow is 8MB starting at an 8MB
+ // boundary.
+
+ Base = RoundDownTo8MB((ULONG)PhysicalAddress);
+ Offset = (ULONG)PhysicalAddress - Base;
+
+ // Chack length is acceptable
+
+ if ( (Offset + Length) > 0x800000 ) {
+ return (PVOID)0;
+ }
+
+ for ( i = 0 ; i < NumBats ; i++ ) {
+ if ( AllocatedBats[i].RefCount ) {
+ if ( Base == AllocatedBats[i].PhysBase ) {
+ // We have a match, reuse this bat
+ AllocatedBats[i].RefCount++;
+ return (PVOID)(VirtBase + EightMeg(i) + Offset);
+ }
+ } else {
+ // This index is not allocated, remember.
+ FreeBat = i;
+ }
+ }
+
+ // No match, we need to allocate another bat (if one is available).
+
+ if ( FreeBat == -1 ) {
+ return (PVOID)0;
+ }
+
+ AllocatedBats[FreeBat].PhysBase = Base;
+ AllocatedBats[FreeBat].RefCount = 1;
+ KiSetDbat(FreeBat + 1, Base, VirtBase + EightMeg(FreeBat), 0x800000, 6);
+ return (PVOID)(VirtBase + EightMeg(FreeBat) + Offset);
+}
+
+VOID
+KePhase0DeleteIoMap (
+ IN PVOID PhysicalAddress,
+ IN ULONG Length
+ )
+
+/*++
+
+Routine Description:
+
+ Removes a Virtual to Physical address translation that was
+ established with KePhase0MapIo.
+
+Arguments:
+
+ PhysicalAddress - Address to which the HAL needs access.
+
+ Length - Number of bytes. (Ignored)
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ ULONG Base;
+ LONG i;
+
+ Base = RoundDownTo8MB((ULONG)PhysicalAddress);
+
+ for ( i = 0 ; i < NumBats ; i++ ) {
+ if ( AllocatedBats[i].RefCount ) {
+ if ( Base == AllocatedBats[i].PhysBase ) {
+ // We have a match, detach from this bat
+ if ( --AllocatedBats[i].RefCount == 0 ) {
+ KiSetDbatInvalid(i+1);
+ }
+ return;
+ }
+ }
+ }
+
+ // if we get here we were called to detach from memory
+ // we don't have. This is insane.
+
+ KeBugCheck(MEMORY_MANAGEMENT);
+}
+
+
+#if !defined(NT_UP)
+
+VOID
+KiPhase0SyncIoMap (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This routine runs on all processors other than zero to
+ ensure that all processors have the same Block Address
+ Translations (BATs) established during phase 0.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ LONG i;
+
+ for ( i = 0 ; i < NumBats ; i++ ) {
+ if ( AllocatedBats[i].RefCount ) {
+ KiSetDbat(i + 1,
+ AllocatedBats[i].PhysBase,
+ VirtBase + EightMeg(i),
+ 0x800000,
+ 6);
+ }
+ }
+}
+
+#endif
+
diff --git a/private/ntos/ke/ppc/intobj.c b/private/ntos/ke/ppc/intobj.c
new file mode 100644
index 000000000..8d80849ad
--- /dev/null
+++ b/private/ntos/ke/ppc/intobj.c
@@ -0,0 +1,469 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ intobj.c
+
+Abstract:
+
+ This module implements the kernel interrupt object. Functions are provided
+ to initialize, connect, and disconnect interrupt objects.
+
+Author:
+
+ Peter L. Johnston (plj@vnet.ibm.com) 5-Oct-1993
+ Based on original code by David N. Cutler (davec) 3-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ 29-Jun-94 plj Updated for Daytona. (post Beta 2)
+
+--*/
+
+#include "ki.h"
+
+
+VOID
+KeInitializeInterrupt (
+ IN PKINTERRUPT Interrupt,
+ IN PKSERVICE_ROUTINE ServiceRoutine,
+ IN PVOID ServiceContext,
+ IN PKSPIN_LOCK SpinLock OPTIONAL,
+ IN ULONG Vector,
+ IN KIRQL Irql,
+ IN KIRQL SynchronizeIrql,
+ IN KINTERRUPT_MODE InterruptMode,
+ IN BOOLEAN ShareVector,
+ IN CCHAR ProcessorNumber,
+ IN BOOLEAN FloatingSave
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel interrupt object. The service routine,
+ service context, spin lock, vector, IRQL, Synchronized IRQL, and floating
+ context save flag are initialized.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+ ServiceRoutine - Supplies a pointer to a function that is to be
+ executed when an interrupt occurs via the specified interrupt
+ vector.
+
+ ServiceContext - Supplies a pointer to an arbitrary data structure which is
+ to be passed to the function specified by the ServiceRoutine parameter.
+
+ SpinLock - Supplies a pointer to an executive spin lock.
+
+ Vector - Supplies the index of the entry in the Interrupt Dispatch Table
+ that is to be associated with the ServiceRoutine function.
+
+ Irql - Supplies the request priority of the interrupting source.
+
+ SynchronizeIrql - The request priority that the interrupt should be
+ synchronized with.
+
+ InterruptMode - Supplies the mode of the interrupt; LevelSensitive or
+ Latched.
+
+ ShareVector - Supplies a boolean value that specifies whether the
+ vector can be shared with other interrupt objects or not. If FALSE
+ then the vector may not be shared, if TRUE it may be.
+ Latched.
+
+ ProcessorNumber - Supplies the number of the processor to which the
+ interrupt will be connected.
+
+ FloatingSave - Supplies a boolean value that determines whether the
+ floating point registers and pipe line are to be saved before calling
+ the ServiceRoutine function.
+ (currently ignored on PowerPC).
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+ //
+ // Initialize standard control object header.
+ //
+
+ Interrupt->Type = InterruptObject;
+ Interrupt->Size = sizeof(KINTERRUPT);
+
+ //
+ // Initialize the address of the service routine, the service context,
+ // the address of the spin lock, the address of the actual spin lock
+ // that will be used, the vector number, the IRQL of the interrupting
+ // source, the Synchronized IRQL of the interrupt object, the interrupt
+ // mode, the processor number, and the floating context save flag.
+ //
+
+ Interrupt->ServiceRoutine = ServiceRoutine;
+ Interrupt->ServiceContext = ServiceContext;
+
+ Interrupt->SpinLock = 0;
+ if (ARGUMENT_PRESENT(SpinLock)) {
+ Interrupt->ActualLock = SpinLock;
+ } else {
+ Interrupt->ActualLock = &Interrupt->SpinLock;
+ }
+
+ Interrupt->Vector = Vector;
+ Interrupt->Irql = Irql;
+ Interrupt->SynchronizeIrql = SynchronizeIrql;
+ Interrupt->Mode = InterruptMode;
+ Interrupt->ShareVector = ShareVector;
+ Interrupt->Number = ProcessorNumber;
+ Interrupt->FloatingSave = FloatingSave;
+
+ //
+ // Set the connected state of the interrupt object to FALSE.
+ //
+
+ Interrupt->Connected = FALSE;
+ return;
+}
+
+BOOLEAN
+KeConnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function connects an interrupt object to the interrupt vector
+ specified by the interrupt object. If the interrupt object is already
+ connected, or an attempt is made to connect to an interrupt that cannot
+ be connected, then a value of FALSE is returned. Else the specified
+ interrupt object is connected to the interrupt vector, the connected
+ state is set to TRUE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is already connected or an attempt is made to
+ connect to an interrupt vector that cannot be connected, then a value
+ of FALSE is returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ KIRQL Irql;
+ CHAR Number;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // If the interrupt object is already connected, the interrupt vector
+ // number is invalid, an attempt is being made to connect to a vector
+ // that cannot be connected, the interrupt request level is invalid,
+ // the processor number is invalid, of the interrupt vector is less
+ // than or equal to the highest level and it not equal to the specified
+ // IRQL, then do not connect the interrupt object. Else connect interrupt
+ // object to the specified vector and establish the proper interrupt
+ // dispatcher.
+ //
+
+ Connected = FALSE;
+ Irql = Interrupt->Irql;
+ Number = Interrupt->Number;
+ Vector = Interrupt->Vector;
+ if (
+ (Vector < MAXIMUM_VECTOR) && // will fit in interrupt table
+ (Irql <= HIGH_LEVEL) && // is at a reasonable priority
+ (Number < KeNumberProcessors) && // can run on a cpu we have
+ (
+ (Vector > HIGH_LEVEL) || // and is either EISA or
+ ((PCR->ReservedVectors & (1 << Vector)) == 0) // is NOT reserved
+ )
+ ) {
+
+ //
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the specified interrupt vector is not connected, then
+ // connect the interrupt vector to the interrupt dispatcher
+ // and set the new interrupt mode and enable masks.
+ // Else if the interrupt is
+ // already chained, then add the new interrupt object at the end
+ // of the chain. If the interrupt vector is not chained, then
+ // start a chain with the previous interrupt object at the front
+ // of the chain. The interrupt mode of all interrupt objects in
+ // a chain must be the same.
+ //
+
+ if (Interrupt->Connected == FALSE) {
+ if (PCR->InterruptRoutine[Vector] ==
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode)) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ if (Interrupt->FloatingSave) {
+ Interrupt->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupt->Irql == Interrupt->SynchronizeIrql) {
+#if defined(NT_UP)
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)Interrupt->ServiceRoutine;
+#else
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+#endif
+
+ } else {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ //
+ // Copy the function descriptor for the Dispatch routine
+ // into DispatchCode. This will be used by KiInterruptEx-
+ // ception to dispatch the interrupt.
+ //
+
+ Interrupt->DispatchCode[0] =
+ *(PULONG)(Interrupt->DispatchAddress);
+ Interrupt->DispatchCode[1] =
+ *(((PULONG)(Interrupt->DispatchAddress))+1);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)Interrupt->DispatchCode;
+
+ HalEnableSystemInterrupt(Vector, Irql, Interrupt->Mode);
+
+ } else if (Interrupt->ShareVector) {
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+ if (Interruptx->ShareVector &&
+ Interrupt->Mode == Interruptx->Mode) {
+ Connected = TRUE;
+ Interrupt->Connected = TRUE;
+ KeRaiseIrql((KIRQL)(max(Irql, SYNCH_LEVEL)), &PreviousIrql);
+ if (Interruptx->DispatchAddress !=
+ (PKINTERRUPT_ROUTINE)KiChainedDispatch) {
+ InitializeListHead(&Interruptx->InterruptListEntry);
+ Interruptx->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiChainedDispatch;
+ Interruptx->DispatchCode[0] =
+ *(PULONG)KiChainedDispatch;
+ Interruptx->DispatchCode[1] =
+ *(((PULONG)KiChainedDispatch)+1);
+ }
+
+ InsertTailList(&Interruptx->InterruptListEntry,
+ &Interrupt->InterruptListEntry);
+ KeLowerIrql(PreviousIrql);
+ }
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+ }
+
+ //
+ // Return whether interrupt was connected to the specified vector.
+ //
+
+ return Connected;
+}
+
+BOOLEAN
+KeDisconnectInterrupt (
+ IN PKINTERRUPT Interrupt
+ )
+
+/*++
+
+Routine Description:
+
+ This function disconnects an interrupt object from the interrupt vector
+ specified by the interrupt object. If the interrupt object is not
+ connected, then a value of FALSE is returned. Else the specified interrupt
+ object is disconnected from the interrupt vector, the connected state is
+ set to FALSE, and TRUE is returned as the function value.
+
+Arguments:
+
+ Interrupt - Supplies a pointer to a control object of type interrupt.
+
+Return Value:
+
+ If the interrupt object is not connected, then a value of FALSE is
+ returned. Else a value of TRUE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Connected;
+ PKINTERRUPT Interruptx;
+ PKINTERRUPT Interrupty;
+ KIRQL Irql;
+ KIRQL OldIrql;
+ KIRQL PreviousIrql;
+ ULONG Vector;
+
+ //
+ // Set system affinity to the specified processor.
+ //
+
+ KeSetSystemAffinityThread((KAFFINITY)(1 << Interrupt->Number));
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the interrupt object is connected, then disconnect it from the
+ // specified vector.
+ //
+
+ Connected = Interrupt->Connected;
+ if (Connected != FALSE) {
+ Irql = Interrupt->Irql;
+ Vector = Interrupt->Vector;
+
+ //
+ // If the specified interrupt vector is not connected to the chained
+ // interrupt dispatcher, then disconnect it by setting its dispatch
+ // address to the unexpected interrupt routine. Else remove the
+ // interrupt object from the interrupt chain. If there is only
+ // one entry remaining in the list, then reestablish the dispatch
+ // address.
+ //
+
+ Interruptx = CONTAINING_RECORD(PCR->InterruptRoutine[Vector],
+ KINTERRUPT,
+ DispatchCode[0]);
+
+ if (Interruptx->DispatchAddress ==
+ (PKINTERRUPT_ROUTINE)KiChainedDispatch) {
+ KeRaiseIrql((KIRQL)(max(Irql, SYNCH_LEVEL)), &PreviousIrql);
+ if (Interrupt == Interruptx) {
+ Interruptx = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT, InterruptListEntry);
+ Interruptx->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiChainedDispatch;
+ Interruptx->DispatchCode[0] = *(PULONG)KiChainedDispatch;
+ Interruptx->DispatchCode[1] = *(((PULONG)KiChainedDispatch)+1);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)Interruptx->DispatchCode;
+
+ }
+
+ RemoveEntryList(&Interrupt->InterruptListEntry);
+ Interrupty = CONTAINING_RECORD(Interruptx->InterruptListEntry.Flink,
+ KINTERRUPT,
+ InterruptListEntry);
+
+ if (Interruptx == Interrupty) {
+ if (Interrupt->FloatingSave) {
+ Interrupt->DispatchAddress = KiFloatingDispatch;
+
+ } else {
+ if (Interrupt->Irql == Interrupt->SynchronizeIrql) {
+#if defined(NT_UP)
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)Interrupt->ServiceRoutine;
+#else
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchSame;
+#endif
+
+ } else {
+ Interrupt->DispatchAddress =
+ (PKINTERRUPT_ROUTINE)KiInterruptDispatchRaise;
+ }
+ }
+
+ //
+ // Copy the function descriptor for the Dispatch routine
+ // into DispatchCode. This will be used by KiInterruptEx-
+ // ception to dispatch the interrupt.
+ //
+ Interrupty->DispatchCode[0] =
+ *(PULONG)(Interrupty->DispatchAddress);
+ Interrupty->DispatchCode[1] =
+ *(((PULONG)(Interrupty->DispatchAddress))+1);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)Interrupty->DispatchCode;
+
+ }
+
+ KeLowerIrql(PreviousIrql);
+
+ } else {
+ HalDisableSystemInterrupt(Vector, Irql);
+ PCR->InterruptRoutine[Vector] =
+ (PKINTERRUPT_ROUTINE)(&KxUnexpectedInterrupt.DispatchCode);
+ }
+#ifdef NOTDEF
+ KeSweepIcache(TRUE);
+#endif
+ Interrupt->Connected = FALSE;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Set system affinity back to the original value.
+ //
+
+ KeRevertToUserAffinityThread();
+
+ //
+ // Return whether interrupt was disconnected from the specified vector.
+ //
+
+ return Connected;
+}
diff --git a/private/ntos/ke/ppc/intsup.s b/private/ntos/ke/ppc/intsup.s
new file mode 100644
index 000000000..258a99f13
--- /dev/null
+++ b/private/ntos/ke/ppc/intsup.s
@@ -0,0 +1,862 @@
+// TITLE("Interrupt Support")
+//++
+//
+// Copyright (c) 1995 Microsoft Corporation and IBM Corporation
+//
+// Module Name:
+//
+// xxintsup.s
+//
+// Abstract:
+//
+// This module implements the PowerPC machine dependent code for
+// interrupt handling.
+//
+// Author:
+//
+// Chuck Lenzmeier (chuckl) 18-Feb-1995
+// Adapter from C code by Peter L. Johnston (plj@vnet.ibm.com) August 1993
+// Adapted from code by David N. Cutler (davec) 1-Apr-1991
+//
+// Environment:
+//
+// Kernel mode only, IRQL DISPATCH_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+ .extern __imp_KeLowerIrql
+ .extern __imp_KeRaiseIrql
+
+#if !defined(NT_UP) && SPINDBG1
+ .extern ..KiAcquireSpinLockDbg
+#endif
+
+ SBTTL("Synchronize Execution")
+//++
+//
+// BOOLEAN
+// KeSynchronizeExecution (
+// IN PKINTERRUPT Interrupt,
+// IN PKSYNCHRONIZE_ROUTINE SynchronizeRoutine,
+// IN PVOID SynchronizeContext
+// )
+//
+// Routine Description:
+//
+// This function synchronizes the execution of the specified routine
+// with the execution of the service routine associated with the
+// specified interrupt object.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to a control object of type interrupt.
+//
+// SynchronizeRoutine (r4) - Supplies a pointer to a function whose execution
+// is to be synchronized with the execution of the service routine associated
+// with the specified interrupt object.
+//
+// SynchronizeContext (r5) - Supplies a pointer to an arbitrary data structure
+// which is to be passed to the function specified by the SynchronizeRoutine
+// parameter.
+//
+// Return Value:
+//
+// The value returned by the SynchronizeRoutine function is returned as
+// the function value.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+kseLR: .space 4
+kseR31: .space 4
+kseR4: .space 4
+kseR5: .space 4
+kseIrql: .space 4
+kseToc: .space 4
+ .align 3 // ensure 8 byte alignment
+kseFrameLength:
+
+//
+// N.B. We are a bit footloose with the TOC pointer in this routine.
+// We do not restore it immediately after the call to KeRaiseIrql,
+// and only restore it on return from the synchronize routine. (And
+// we only restore it then because the call to KeLowerIrql needs it.)
+//
+
+ SPECIAL_ENTRY_S(KeSynchronizeExecution,_TEXT$00)
+
+ mflr r0 // get return address
+ stwu sp, -kseFrameLength(sp) // allocate stack frame
+ stw r31, kseR31(sp)
+ stw r0, kseLR(sp) // save return address
+
+ PROLOGUE_END(KeSynchronizeExecution)
+
+ stw r4, kseR4(sp) // save synchronization routine address
+ lwz r6, [toc]__imp_KeRaiseIrql(rtoc) // &&function descriptor
+ stw r5, kseR5(sp) // save synchronization routine context
+ lwz r6, 0(r6) // &function descriptor
+ stw rtoc, kseToc(sp) // save our TOC
+ lwz r5, 0(r6) // &KeRaiseIrql
+
+//
+// Raise IRQL to the synchronization level and acquire the associated
+// spin lock.
+//
+
+#if !defined(NT_UP)
+ lwz r31, InActualLock(r3) // get address of spin lock
+#endif
+
+ lwz rtoc, 4(r6) // HAL's TOC
+ lbz r3, InSynchronizeIrql(r3) // get synchronization IRQL
+ mtctr r5
+ addi r4, sp, kseIrql // compute address to save IRQL
+ bctrl
+ // N.B. skip restoring the TOC
+
+ lwz r4, kseR4(sp) // get synchronize routine descriptor
+
+#if !defined(NT_UP)
+ ACQUIRE_SPIN_LOCK(r31, r31, r5, kse_lock, kse_lock_spin)
+#endif
+
+//
+// Call specified routine passing the specified context parameter.
+//
+ lwz r5, 0(r4) // get synchronize routine address
+ lwz rtoc, 4(r4) // get synchronize routine TOC
+ mtctr r5 // put routine address in CTR
+ lwz r3, kseR5(sp) // get synchronize routine context
+ bctrl // call specified routine
+
+//
+// Release spin lock, lower IRQL to its previous level, and return the value
+// returned by the specified routine.
+//
+
+ lwz rtoc, kseToc(sp) // restore our TOC
+#if !defined(NT_UP)
+ li r5, 0 // get a 0 for spin lock
+#endif
+ lwz r6, [toc]__imp_KeLowerIrql(rtoc) // &&function descriptor
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r31, r5)
+#endif
+ lwz r6, 0(r6) // &function descriptor
+ ori r31, r3, 0 // save return value
+ lwz r5, 0(r6) // &KeLowerIrql
+
+ lbz r3, kseIrql(sp) // get saved IRQL
+ mtctr r5
+ lwz rtoc, 4(r6) // HAL's TOC
+ bctrl
+ lwz rtoc, kseToc(sp) // restore our TOC
+
+ ori r3, r31, 0 // set return value
+
+ lwz r0, kseLR(sp) // get return address
+ lwz r31, kseR31(sp) // restore r31
+ mtlr r0 // set return address
+ addi sp, sp, kseFrameLength // return stack frame
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r31, r5, kse_lock, kse_lock_spin)
+#endif
+
+ DUMMY_EXIT(KeSynchronizeExecution)
+
+ SBTTL("Chained Dispatch")
+//++
+//
+// VOID
+// KiChainedDispatch (
+// IN PKINTERRUPT Interrupt,
+// IN PVOID ServiceContext,
+// IN PVOID TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as a result of an interrupt being generated
+// via a vector that is connected to more than one interrupt object. Its
+// function is to walk the list of connected interrupt objects and call
+// each interrupt service routine. If the mode of the interrupt is latched,
+// then a complete traversal of the chain must be performed.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to the Interrupt Object.
+//
+// ServiceContext (r4) - Supplies a pointer to the Service Context associated
+// with this Interrupt Object.
+//
+// TrapFrame (r5) - Supplies the address of the Trap Frame created as a
+// result of this interrupt.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+kcdLR: .space 4
+#if !defined(NT_UP)
+kcdR24: .space 4
+#endif
+kcdR25: .space 4
+kcdR26: .space 4
+kcdR27: .space 4
+kcdR28: .space 4
+kcdR29: .space 4
+kcdR30: .space 4
+kcdR31: .space 4
+kcdIrql: .space 4
+kcdToc: .space 4
+ .align 3 // ensure 8 byte alignment
+kcdFrameLength:
+
+//
+// N.B. We are a bit footloose with the TOC pointer in this routine.
+// We only ensure the TOC is correct when we explicitly need it
+// to be correct. We do not restore it on exit. This is safe
+// because we know that our caller called us via a function
+// descriptor and will therefore restore its own TOC when we return.
+//
+
+ SPECIAL_ENTRY(KiChainedDispatch)
+
+ mflr r0 // get return address
+ stwu sp, -kcdFrameLength(sp) // allocate stack frame
+#if !defined(NT_UP)
+ stw r24, kcdR24(sp)
+#endif
+ stw r25, kcdR25(sp)
+ stw r26, kcdR26(sp)
+ stw r27, kcdR27(sp)
+ stw r28, kcdR28(sp)
+ stw r29, kcdR29(sp)
+ stw r30, kcdR30(sp)
+ stw r31, kcdR31(sp)
+ stw r0, kcdLR(sp) // save return address
+
+ PROLOGUE_END(KiChainedDispatch)
+
+ ori r25, r5, 0 // save trap frame address
+ stw rtoc, kcdToc(sp) // save our TOC
+
+//
+// Initialize loop variables.
+//
+
+ addi r31, r3, InInterruptListEntry // set address of listhead
+ ori r30, r31, 0 // set address of first entry
+ li r29, 0 // clear floating state saved flag
+ lbz r28, InMode(r3) // get mode of interrupt
+ lbz r27, InIrql(r3) // get interrupt source IRQL
+
+//
+// Walk the list of connected interrupt objects and call the respective
+// interrupt service routines.
+//
+
+kcd10:
+
+ lbz r8, InFloatingSave(r3) // get floating save flag
+ cmpwi r29, 0 // floating state already saved?
+ cmpwi cr7, r8, 0 // interrupt uses floating state?
+ bne kcd20 // if ne, floating state already saved
+ beq cr7, kcd20 // if eq, don't save floating state
+
+//
+// Save volatile floating registers in trap frame.
+//
+
+ li r29, 1 // set floating state saved flag
+ SAVE_VOLATILE_FLOAT_STATE(r25) // save volatile floating state
+
+kcd20:
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+ lbz r26, InSynchronizeIrql(r3) // get synchronization IRQL
+ cmpw r27, r26 // IRQL levels the same?
+ beq kcd25 // if eq, IRQL levels are the same
+
+ lwz r6, [toc]__imp_KeRaiseIrql(rtoc) // &&function descriptor
+ ori r3, r26, 0 // set synchronization IRQL
+ lwz r6, 0(r6) // &function descriptor
+ addi r4, sp, kcdIrql // compute address to save IRQL
+ lwz r5, 0(r6) // &KeRaiseIrql
+ lwz rtoc, 4(r6) // HAL's TOC
+ mtctr r5
+ bctrl
+ // N.B. skip restoring the TOC
+
+ subi r3, r30, InInterruptListEntry // recompute interrupt object address
+
+kcd25:
+
+ lwz r5, InServiceRoutine(r3) // get service routine descriptor
+
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if !defined(NT_UP)
+
+ lwz r24, InActualLock(r3) // get address of spin lock
+ ACQUIRE_SPIN_LOCK(r24, r24, r7, kcd_lock, kcd_lock_spin)
+#endif
+
+ lwz r6, 0(r5) // get address of service routine
+ lwz rtoc, 4(r5) // get service routine TOC
+ mtctr r6 // put routine address in CTR
+ lwz r4, InServiceContext(r3) // get service context
+ ori r5, r25, 0 // pass &TrapFrame
+ bctrl // call service routine
+
+//
+// Release the service routine spin lock. Lower IRQL to the interrupt source
+// level if synchronization level is not the same as the interrupt source level.
+//
+
+ lwz rtoc, kcdToc(sp) // restore our TOC
+#if !defined(NT_UP)
+ li r4, 0 // get a 0 for spin lock
+#endif
+ cmpw r27, r26 // IRQL levels the same?
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r24, r4)
+#endif
+ ori r26, r3, 0 // save service routine status
+ beq kcd35 // if eq, IRQL levels are the same
+
+ lwz r6, [toc]__imp_KeLowerIrql(rtoc) // &&function descriptor
+ ori r3, r27, 0 // set interrupt source IRQL
+ lwz r6, 0(r6) // &function descriptor
+ lwz r5, 0(r6) // &KeLowerIrql
+ lwz rtoc, 4(r6) // HAL's TOC
+ mtctr r5
+ bctrl
+ lwz rtoc, kcdToc(sp) // restore our TOC (for next loop)
+
+kcd35:
+
+//
+// Get next list entry and check for end of loop.
+//
+
+ lwz r30, LsFlink(r30) // get next interrupt object address
+ cmpwi r26, 0 // interrupt handled?
+ cmpwi cr7, r28, 0 // level sensitive interrupt?
+ cmpw cr6, r30, r31 // end of list?
+ beq kcd40 // if eq, interrupt not handled
+ beq cr7,kcd50 // if eq, level sensitive interrupt
+kcd40:
+ subi r3, r30, InInterruptListEntry // compute interrupt object address
+ bne cr6,kcd10 // if ne, not end of list
+kcd50:
+
+//
+// Either the interrupt is level sensitive and has been handled or the end of
+// the interrupt object chain has been reached. Check to determine if floating
+// machine state needs to be restored.
+//
+
+ cmpwi r29, 0 // floating state saved?
+ beq kcd60 // if eq, floating state not saved
+
+//
+// Restore volatile floating registers from trap frame.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE(r25) // restore volatile floating state
+
+kcd60:
+
+//
+// Restore nonvolatile registers, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lwz r0, kcdLR(sp) // get return address
+ lwz r31, kcdR31(sp) // restore r31
+ lwz r30, kcdR30(sp) // restore r30
+ lwz r29, kcdR29(sp) // restore r29
+ lwz r28, kcdR28(sp) // restore r28
+ lwz r27, kcdR27(sp) // restore r27
+ lwz r26, kcdR26(sp) // restore r26
+ lwz r25, kcdR25(sp) // restore r25
+#if !defined(NT_UP)
+ lwz r24, kcdR24(sp) // restore r24
+#endif
+ mtlr r0 // set return address
+ addi sp, sp, kcdFrameLength // return stack frame
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r24, r7, kcd_lock, kcd_lock_spin)
+#endif
+
+ DUMMY_EXIT(KiChainedDispatch)
+
+ SBTTL("Floating Dispatch")
+//++
+//
+// VOID
+// KiFloatingDispatch (
+// IN PKINTERRUPT Interrupt,
+// IN PVOID ServiceContext,
+// IN PVOID TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to save the volatile floating machine state and then call the specified
+// interrupt service routine.
+//
+// N.B. On entry to this routine only the volatile integer registers have
+// been saved.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to the Interrupt Object.
+//
+// ServiceContext (r4) - Supplies a pointer to the Service Context associated
+// with this Interrupt Object.
+//
+// TrapFrame (r5) - Supplies the address of the Trap Frame created as a
+// result of this interrupt.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+kfdLR: .space 4
+#if !defined(NT_UP)
+kfdR29: .space 4
+#endif
+kfdR30: .space 4
+kfdR31: .space 4
+kfdIrql: .space 4
+kfdToc: .space 4
+ .align 3 // ensure 8 byte alignment
+kfdFrameLength:
+
+//
+// N.B. We are a bit footloose with the TOC pointer in this routine.
+// We only ensure the TOC is correct when we explicitly need it
+// to be correct. We do not restore it on exit. This is safe
+// because we know that our caller called us via a function
+// descriptor and will therefore restore its own TOC when we return.
+//
+
+ SPECIAL_ENTRY(KiFloatingDispatch)
+
+ mflr r0 // get return address
+ stwu sp, -kfdFrameLength(sp) // allocate stack frame
+#if !defined(NT_UP)
+ stw r29, kfdR29(sp)
+#endif
+ stw r30, kfdR30(sp)
+ stw r31, kfdR31(sp)
+ stw r0, kfdLR(sp) // save return address
+
+ PROLOGUE_END(KiFloatingDispatch)
+
+ ori r30, r5, 0 // save trap frame address
+ stw rtoc, kfdToc(sp) // save our TOC
+
+//
+// Save volatile floating registers in trap frame.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE(r5) // save volatile floating state
+
+//
+// Raise IRQL to synchronization level if synchronization level is not
+// equal to the interrupt source level.
+//
+
+ ori r31, r3, 0 // save address of interrupt object
+ lbz r4, InIrql(r3) // get interrupt source IRQL
+ lbz r8, InSynchronizeIrql(r3) // get synchronization IRQL
+ cmpw r8, r4 // IRQL levels the same?
+ beq kfd10 // if eq, IRQL levels are the same
+
+ lwz r6, [toc]__imp_KeRaiseIrql(rtoc) // &&function descriptor
+ ori r3, r8, 0 // set synchronization IRQL
+ lwz r6, 0(r6) // &function descriptor
+ addi r4, sp, kfdIrql // compute address to save IRQL
+ lwz r8, 0(r6) // &KeLowerIrql
+ lwz rtoc, 4(r6) // HAL's TOC
+ mtctr r8
+ bctrl
+ // N.B. skip restoring the TOC
+
+ ori r3, r31, 0 // restore address of interrupt object
+
+kfd10:
+
+ lwz r8, InServiceRoutine(r31) // get service routine descriptor
+
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+#if !defined(NT_UP)
+ lwz r29, InActualLock(r31) // get address of spin lock
+ ACQUIRE_SPIN_LOCK(r29, r29, r7, kfd_lock, kfd_lock_spin)
+#endif
+
+ lwz r6, 0(r8) // get address of service routine
+ lwz rtoc, 4(r8) // get service routine TOC
+ mtctr r6 // put routine address in CTR
+ lwz r4, InServiceContext(r31) // get service context
+ bctrl // call service routine
+
+//
+// Release the service routine spin lock. Lower IRQL to the interrupt source
+// level if synchronization level is not the same as the interrupt source level.
+//
+
+ lwz rtoc, kfdToc(sp) // restore our TOC
+#if !defined(NT_UP)
+ li r6, 0 // get a 0 for spin lock
+#endif
+ lbz r3, InIrql(r31) // get interrupt source IRQL
+ lbz r4, InSynchronizeIrql(r31) // get synchronization IRQL
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r29, r6)
+#endif
+ cmpw r3, r4 // IRQL levels the same?
+ beq kfd30 // if eq, IRQL levels are the same
+
+ lwz r6, [toc]__imp_KeLowerIrql(rtoc) // &&function descriptor
+ lwz r6, 0(r6) // &function descriptor
+ lwz r5, 0(r6) // &KeLowerIrql
+ lwz rtoc, 4(r6) // HAL's TOC
+ mtctr r5
+ bctrl
+
+kfd30:
+
+//
+// Restore volatile floating registers from trap frame.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE(r30) // restore volatile floating state
+
+//
+// Restore nonvolatile registers, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lwz r0, kfdLR(sp) // get return address
+#if !defined(NT_UP)
+ lwz r29, kfdR29(sp) // restore r29
+#endif
+ lwz r30, kfdR30(sp) // restore r30
+ lwz r31, kfdR31(sp) // restore r31
+ mtlr r0 // set return address
+ addi sp, sp, kfdFrameLength // return stack frame
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r29, r7, kfd_lock, kfd_lock_spin)
+#endif
+
+ DUMMY_EXIT(KiFloatingDispatch)
+
+ SBTTL("Interrupt Dispatch - Raise IRQL")
+//++
+//
+// VOID
+// KiInterruptDispatchRaise (
+// IN PKINTERRUPT Interrupt,
+// IN PVOID ServiceContext,
+// IN PVOID TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// N.B. This routine raises the interrupt level to the synchronization
+// level specified in the interrupt object.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to the Interrupt Object.
+//
+// ServiceContext (r4) - Supplies a pointer to the Service Context associated
+// with this Interrupt Object.
+//
+// TrapFrame (r5) - Supplies the address of the Trap Frame created as a
+// result of this interrupt.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+kidrLR: .space 4
+kidrR31: .space 4
+kidrIrql: .space 4
+kidrToc: .space 4
+ .align 3 // ensure 8 byte alignment
+kidrFrameLength:
+
+//
+// N.B. We are a bit footloose with the TOC pointer in this routine.
+// We only ensure the TOC is correct when we explicitly need it
+// to be correct. We do not restore it on exit. This is safe
+// because we know that our caller called us via a function
+// descriptor and will therefore restore its own TOC when we return.
+//
+
+ SPECIAL_ENTRY(KiInterruptDispatchRaise)
+
+ mflr r0 // get return address
+ stwu sp, -kidrFrameLength(sp) // allocate stack frame
+ stw r31, kidrR31(sp)
+ lwz r6, [toc]__imp_KeRaiseIrql(rtoc) // &&function descriptor
+ stw r0, kidrLR(sp) // save return address
+
+ PROLOGUE_END(KiInterruptDispatchRaise)
+
+ lwz r6, 0(r6) // &function descriptor
+ stw rtoc, kidrToc(sp) // save our TOC
+
+//
+// Raise IRQL to synchronization level.
+//
+
+ lwz r8, 0(r6) // &KeRaiseIrql
+ ori r31, r3, 0 // save address of interrupt object
+ lwz rtoc, 4(r6) // HAL's TOC
+ lbz r3, InSynchronizeIrql(r3) // get synchronization IRQL
+ mtctr r8
+ addi r4, sp, kidrIrql // compute address to save IRQL
+ bctrl
+ // N.B. skip restoring the TOC
+
+ lwz r8, InServiceRoutine(r31) // get service routine descriptor
+ ori r3, r31, 0 // restore address of interrupt object
+ lwz r4, InServiceContext(r31) // get service context
+
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+ lwz r6, 0(r8) // get address of service routine
+
+#if !defined(NT_UP)
+ lwz r31, InActualLock(r31) // get address of spin lock
+ ACQUIRE_SPIN_LOCK(r31, r31, r7, kidr_lock, kidr_lock_spin)
+#endif
+
+ mtctr r6 // put routine address in CTR
+ lwz rtoc, 4(r8) // get service routine TOC
+ bctrl // call service routine
+
+//
+// Release the service routine spin lock. Lower IRQL to the previous level.
+//
+
+ lwz rtoc, kidrToc(sp) // restore our TOC
+#if !defined(NT_UP)
+ li r4, 0 // get a 0 for spin lock
+#endif
+ lwz r6, [toc]__imp_KeLowerIrql(rtoc) // &&function descriptor
+#if !defined(NT_UP)
+ RELEASE_SPIN_LOCK(r31, r4)
+#endif
+ lwz r6, 0(r6) // &function descriptor
+ lwz r8, 0(r6) // &KeLowerIrql
+ lwz rtoc, 4(r6) // HAL's TOC
+ mtctr r8
+ lbz r3,kidrIrql(sp) // get previous IRQL
+ bctrl
+ // N.B. skip restoring the TOC
+
+//
+// Restore nonvolatile registers, retrieve return address, deallocate
+// stack frame, and return.
+//
+
+ lwz r0, kidrLR(sp) // get return address
+ lwz r31, kidrR31(sp) // restore r31
+ mtlr r0 // set return address
+ addi sp, sp, kidrFrameLength // return stack frame
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r31, r7, kidr_lock, kidr_lock_spin)
+#endif
+
+ DUMMY_EXIT(KiInterruptDispatchRaise)
+
+ SBTTL("Interrupt Dispatch - Same IRQL")
+//++
+//
+// VOID
+// KiInterruptDispatchSame (
+// IN PKINTERRUPT Interrupt,
+// IN PVOID ServiceContext,
+// IN PVOID TrapFrame
+// )
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is connected to an interrupt object. Its function is
+// to directly call the specified interrupt service routine.
+//
+// Note: On PowerPC, if uniprocessor, this routine is bypassed completely.
+// The Interrupt Object is initialized such that KiInterruptException will
+// dispatch directly to the service routine.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to the Interrupt Object.
+//
+// ServiceContext (r4) - Supplies a pointer to the Service Context associated
+// with this Interrupt Object.
+//
+// TrapFrame (r5) - Supplies the address of the Trap Frame created as a
+// result of this interrupt.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+kidsLR: .space 4
+kidsR31: .space 4
+ .align 3 // ensure 8 byte alignment
+kidsFrameLength:
+
+//
+// N.B. We do not save/restore the TOC pointer in this routine.
+// This is safe because our caller called us via a function
+// descriptor and will therefore restore the TOC when we return.
+//
+
+#if defined(NT_UP)
+
+ LEAF_ENTRY(KiInterruptDispatchSame)
+
+ lwz r8, InServiceRoutine(r3) // get service routine descriptor
+ lwz r6, 0(r8) // get address of service routine
+ lwz rtoc, 4(r8) // get service routine TOC
+ mtctr r6 // put routine address in CTR
+ lwz r4, InServiceContext(r3) // get service context
+ bctr // jump to service routine
+
+ DUMMY_EXIT(KiInterruptDispatchSame)
+
+#else
+
+ SPECIAL_ENTRY(KiInterruptDispatchSame)
+
+ mflr r0 // get return address
+ stwu sp, -kidsFrameLength(sp) // allocate stack frame
+ stw r31, kidsR31(sp)
+ stw r0, kidsLR(sp) // save return address
+
+ PROLOGUE_END(KiInterruptDispatchSame)
+
+ lwz r8, InServiceRoutine(r3) // get service routine descriptor
+
+//
+// Acquire the service routine spin lock and call the service routine.
+//
+
+ lwz r31, InActualLock(r3) // get address of spin lock
+ lwz r6, 0(r8) // get address of service routine
+ ACQUIRE_SPIN_LOCK(r31, r31, r7, kids_lock, kids_lock_spin)
+
+ lwz rtoc, 4(r8) // get service routine TOC
+ mtctr r6 // put routine address in CTR
+ lwz r4, InServiceContext(r3) // get service context
+ bctrl // call service routine
+
+//
+// Release the service routine spin lock. Restore nonvolatile registers,
+// retrieve return address, deallocate stack frame, and return.
+//
+
+ li r4, 0 // get a 0 for spin lock
+ lwz r0, kidsLR(sp) // get return address
+ RELEASE_SPIN_LOCK(r31, r4)
+ lwz r31, kidsR31(sp) // restore r31
+ mtlr r0 // set return address
+ addi sp, sp, kidsFrameLength // return stack frame
+
+ blr
+
+ SPIN_ON_SPIN_LOCK(r31, r7, kids_lock, kids_lock_spin)
+
+ DUMMY_EXIT(KiInterruptDispatchSame)
+
+#endif // else defined(NT_UP)
+
+ SBTTL("Unexpected Interrupt")
+//++
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interrupt being generated
+// via a vector that is not connected to an interrupt object. Its function
+// is to report the error and dismiss the interrupt.
+//
+// Arguments:
+//
+// Interrupt (r3) - Supplies a pointer to the Interrupt Object.
+//
+// ServiceContext (r4) - Supplies a pointer to the Service Context associated
+// with this Interrupt Object.
+//
+// TrapFrame (r5) - Supplies the address of the Trap Frame created as a
+// result of this interrupt.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiUnexpectedInterrupt)
+
+ LEAF_EXIT(KiUnexpectedInterrupt) // ****** temp ******
diff --git a/private/ntos/ke/ppc/ipi.c b/private/ntos/ke/ppc/ipi.c
new file mode 100644
index 000000000..8c9cabc6a
--- /dev/null
+++ b/private/ntos/ke/ppc/ipi.c
@@ -0,0 +1,205 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+
+Module Name:
+
+ xxmpipi.c
+
+Abstract:
+
+ This module implements PowerPC specific MP routine.
+
+Author:
+
+ Pat Carr 15-Aug-1994
+
+Based on MIPS version authored by:
+
+ David N. Cutler 24-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward reference function prototypes.
+//
+
+VOID
+KiRestoreProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ );
+
+VOID
+KiSaveProcessorControlState (
+ IN PKPROCESSOR_STATE ProcessorState
+ );
+
+VOID
+KiRestoreProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the current
+ processor context structure in the processor block to the
+ specified trap and exception frames.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from the processor context structure
+ // to the specified trap and exception frames
+ //
+ Prcb = KeGetCurrentPrcb();
+
+#if !defined(NT_UP)
+
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame,
+ CONTEXT_FULL,
+ KernelMode);
+
+#endif
+
+ //
+ // Restore the current processor control state.
+ // Currently, the primary use is to allow the kernel
+ // debugger to set hardware debug registers. Still
+ // investigating whether this is required for MP systems.
+ //
+
+ KiRestoreProcessorControlState(&Prcb->ProcessorState);
+ return;
+}
+
+VOID
+KiSaveProcessorState (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves processor register state from the specified trap
+ and exception frames to the processor context structure in the current
+ processor block.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Get the address of the current processor block and move the
+ // specified register state from specified trap and exception
+ // frames to the current processor context structure.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_FULL |
+ CONTEXT_DEBUG_REGISTERS;
+ KeContextFromKframes(TrapFrame,
+ ExceptionFrame,
+ &Prcb->ProcessorState.ContextFrame);
+
+ //
+ // Save the current processor control state.
+ //
+ Prcb->ProcessorState.SpecialRegisters.KernelDr6 =
+ Prcb->ProcessorState.ContextFrame.Dr6;
+ KiSaveProcessorControlState(&Prcb->ProcessorState);
+ return;
+}
+
+BOOLEAN
+KiIpiServiceRoutine (
+ IN PKTRAP_FRAME TrapFrame,
+ IN PKEXCEPTION_FRAME ExceptionFrame
+ )
+
+/*++
+
+Routine Description:
+
+
+ This function is called at IPI_LEVEL to process any outstanding
+ interprocess request for the current processor.
+
+Arguments:
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ ExceptionFrame - Supplies a pointer to an exception frame
+
+Return Value:
+
+ A value of TRUE is returned, if one of more requests were service.
+ Otherwise, FALSE is returned.
+
+--*/
+
+{
+
+ ULONG RequestSummary;
+
+ //
+ // Process any outstanding interprocessor requests.
+ //
+
+ RequestSummary = KiIpiProcessRequests();
+
+ //
+ // If freeze is requested, then freeze target execution.
+ //
+
+ if ((RequestSummary & IPI_FREEZE) != 0) {
+ KiFreezeTargetExecution(TrapFrame, ExceptionFrame);
+ }
+
+ //
+ // Return whether any requests were processed.
+ //
+
+ return (RequestSummary & ~IPI_FREEZE) != 0;
+}
diff --git a/private/ntos/ke/ppc/irql.s b/private/ntos/ke/ppc/irql.s
new file mode 100644
index 000000000..e69b443e9
--- /dev/null
+++ b/private/ntos/ke/ppc/irql.s
@@ -0,0 +1,98 @@
+// TITLE("Manipulate Interrupt Request Level")
+//++
+//
+// Copyright (c) 1995 Microsoft Corporation
+//
+// Module Name:
+//
+// irql.s
+//
+// Abstract:
+//
+// This module implements the code necessary to lower and raise the current
+// Interrupt Request Level (IRQL). On PowerPC, hardware IRQL levels are
+// managed in the HAL. This module implements routines that deal with
+// software-only IRQLs.
+//
+// Author:
+//
+// Chuck Lenzmeier (chuckl) 12-Oct-1995
+// based on code by David N. Cutler (davec)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+ SBTTL("Raise Interrupt Request Level to DPC Level")
+//++
+//
+// VOID
+// KeRaiseIrqlToDpcLevel (
+// PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH level and returns
+// the old IRQL value.
+//
+// Arguments:
+//
+// OldIrql (r3) - Supplies a pointer to a variable that recieves the old
+// IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KeRaiseIrqlToDpcLevel)
+
+ li r4,DISPATCH_LEVEL // set new IRQL value
+ RAISE_SOFTWARE_IRQL(r4,r3,r5)
+
+ LEAF_EXIT(KeRaiseIrqlToDpcLevel)
+
+ SBTTL("Raise Interrupt Request Level to DPC Level")
+//++
+//
+// KIRQL
+// KfRaiseIrqlToDpcLevel (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH level and returns
+// the old IRQL value.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The previous IRQL is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(KfRaiseIrqlToDpcLevel)
+
+//
+// On PPC, synchronization level is the same as dispatch level.
+//
+
+ ALTERNATE_ENTRY(KeRaiseIrqlToSynchLevel)
+
+ li r4,DISPATCH_LEVEL // set new IRQL value
+ lbz r3,KiPcr+PcCurrentIrql(0) // get current IRQL
+ stb r4,KiPcr+PcCurrentIrql(0) // set new IRQL
+
+ LEAF_EXIT(KfRaiseIrqlToDpcLevel)
diff --git a/private/ntos/ke/ppc/miscasm.s b/private/ntos/ke/ppc/miscasm.s
new file mode 100644
index 000000000..508e408b6
--- /dev/null
+++ b/private/ntos/ke/ppc/miscasm.s
@@ -0,0 +1,656 @@
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// miscasm.s
+//
+// Abstract:
+//
+// This module implements machine dependent miscellaneous kernel functions.
+//
+// Author:
+//
+// Rick Simpson July 26, 1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// plj September 15, 1993 Added routines KiDisableInterrupts and
+// KiRestoreInterrupts.
+// Mark Mergen 09/93-10/93 Ke/KiFlush/FillTb KiSwapProcess subroutines.
+// Pat Carr 11/93 Mods for 603: Ke/KiFlush/FillTb routines.
+// Ying Chan 02/94 Mods for 604: Ke/KiFlush/FillTb routines.
+// plj 09/94 MP support + use PIDs for VSIDs
+// plj 02/95 KiSwapProcess moved to ctxswap.s
+// patcarr 02/95 Added support for 603+, 604+
+//
+//--
+
+//list(off)
+#include "ksppc.h"
+//list(on)
+
+// Globals referenced within this file:
+
+ .globl ..KiContinue
+ .globl ..KeTestAlertThread
+ .globl ..KiExceptionExit
+ .globl ..KiRaiseException
+
+
+//++
+//
+// KPCR *
+// KiGetPcr ()
+//
+// Routine Description:
+//
+// This function returns the effective address of the Processor
+// Control Region (KPCR *). This address is constant, and
+// this routine merely copies that constant into GPR 3 and returns.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Effective address of this processor's PCR.
+//
+//--
+
+ LEAF_ENTRY (KiGetPcr)
+
+ KIPCR(r.3)
+
+ LEAF_EXIT (KiGetPcr)
+
+//++
+//
+// void
+// KiSetDbat
+//
+// Routine Description:
+//
+// Writes a set of values to DBAT n
+//
+// No validation of parameters is done. Protection is set for kernel
+// mode access only.
+//
+// Arguments:
+//
+// r.3 Number of DBAT
+// r.4 Physical address
+// r.5 Virtual Address
+// r.6 Length (in bytes)
+// r.7 Coherence Requirements (WIM)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY (KiSetDbat)
+
+ mfpvr r.9 // different format for
+ // 601 vs other 6xx processors
+ cmpwi cr.5, r.3, 1
+ cmpwi cr.6, r.3, 2
+ cmpwi cr.7, r.3, 3
+
+ rlwinm. r.10, r.9, 0, 0xfffe0000// Check for 601
+
+ // calculate mask (ie BSM) If we knew the number passed in was
+ // always a power of two we could just subtract 1 and shift right
+ // 17 bits. But to be sure we will use a slightly more complex
+ // algorithm than will always generate a correct mask.
+ //
+ // the mask is given by
+ //
+ // ( 1 << ( 32 - 17 - cntlzw(Length - 1) ) ) - 1
+ // == ( 1 << ( 15 - cntlzw(Length - 1) ) ) - 1
+
+ addi r.6, r.6, -1
+ oris r.6, r.6, 1 // ensure min length 128KB
+ ori r.6, r.6, 0xffff
+ cntlzw r.6, r.6
+ subfic r.6, r.6, 15
+ li r.10, 1
+ slw r.6, r.10, r.6
+ addi r.6, r.6, -1
+
+ beq cr.0, KiSetDbat601
+
+ // processor is not a 601.
+
+ rlwinm r.7, r.7, 3, 0x38 // position WIM (G = 0)
+ rlwinm r.6, r.6, 2, 0x1ffc // restrict BAT maximum (non 601)
+ // after left shifting by 2.
+
+ // if caching is Inhibited, set the Guard bit as well.
+
+ rlwimi r.7, r.7, 30, 0x8 // copy G bit from I bit.
+ ori r.6, r.6, 0x2 // Valid (bit) in supervisor state only
+ ori r.7, r.7, 2 // PP = 0x2
+ or r.5, r.5, r.6 // = Virt addr | BL | Vs | Vp
+ or r.4, r.4, r.7 // = Phys addr | WIMG | 0 | PP
+
+ beq cr.5, KiSetDbat1
+ beq cr.6, KiSetDbat2
+ beq cr.7, KiSetDbat3
+
+KiSetDbat0:
+ mtdbatl 0, r.4
+ mtdbatu 0, r.5
+ b KiSetDbatExit
+
+KiSetDbat1:
+ mtdbatl 1, r.4
+ mtdbatu 1, r.5
+ b KiSetDbatExit
+
+KiSetDbat2:
+ mtdbatl 2, r.4
+ mtdbatu 2, r.5
+ b KiSetDbatExit
+
+KiSetDbat3:
+ mtdbatl 3, r.4
+ mtdbatu 3, r.5
+ b KiSetDbatExit
+
+ // 601 has different format BAT registers and actually only has
+ // one set unlike other PowerPC processors which have seperate
+ // Instruction and Data BATs. The 601 BAT registers are set
+ // with the mtibat[u|l] instructions.
+
+KiSetDbat601:
+
+ rlwinm r.7, r.7, 3, 0x70 // position WIMG (601 has no G bit)
+ rlwinm r.6, r.6, 0, 0x3f // restrict BAT maximum (601 = 8MB)
+ ori r.6, r.6, 0x40 // Valid bit
+ ori r.7, r.7, 4 // Ks = 0 | Ku = 1 | PP = 0b00
+ or r.4, r.4, r.6 // = Phys addr | Valid | BL
+ or r.5, r.5, r.7 // = Virt addr | WIM | Ks | Ku | PP
+
+ beq cr.5, KiSet601Bat1
+ beq cr.6, KiSet601Bat2
+ beq cr.7, KiSet601Bat3
+
+KiSet601Bat0:
+ mtibatl 0, r.4
+ mtibatu 0, r.5
+ b KiSetDbatExit
+
+KiSet601Bat1:
+ mtibatl 1, r.4
+ mtibatu 1, r.5
+ b KiSetDbatExit
+
+KiSet601Bat2:
+ mtibatl 2, r.4
+ mtibatu 2, r.5
+ b KiSetDbatExit
+
+KiSet601Bat3:
+ mtibatl 3, r.4
+ mtibatu 3, r.5
+
+KiSetDbatExit:
+ isync
+ LEAF_EXIT(KiSetDbat)
+
+//++
+//
+// void
+// KiSetDbatInvalid(BAT)
+//
+// Routine Description:
+//
+// Clears the valid bit(s) in DBAT n
+//
+// Arguments:
+//
+// r.3 Number of DBAT
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSetDbatInvalid)
+
+ mfpvr r.9 // different format for
+ // 601 vs other 6xx processors
+ cmpwi cr.5, r.3, 1
+ cmpwi cr.6, r.3, 2
+ cmpwi cr.7, r.3, 3
+
+ rlwinm. r.10, r.9, 0, 0xfffe0000// Check for 601
+
+ li r.0, 0 // no valid bit
+
+ beq cr.0, KiInvalidateBat601
+
+ // processor is not a 601.
+
+ beq cr.5, KiInvalidateDbat1
+ beq cr.6, KiInvalidateDbat2
+ beq cr.7, KiInvalidateDbat3
+
+KiInvalidateDbat0:
+ mtdbatu 0, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidateDbat1:
+ mtdbatu 1, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidateDbat2:
+ mtdbatu 2, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidateDbat3:
+ mtdbatu 3, r.0
+ b KiSetDbatInvalidExit
+
+ // 601 has different format BAT registers and actually only has
+ // one set unlike other PowerPC processors which have seperate
+ // Instruction and Data BATs. The 601 BAT registers are set
+ // with the mtibat[u|l] instructions.
+
+KiInvalidateBat601:
+
+ beq cr.5, KiInvalidate601Bat1
+ beq cr.6, KiInvalidate601Bat2
+ beq cr.7, KiInvalidate601Bat3
+
+KiInvalidate601Bat0:
+ mtibatl 0, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidate601Bat1:
+ mtibatl 1, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidate601Bat2:
+ mtibatl 2, r.0
+ b KiSetDbatInvalidExit
+
+KiInvalidate601Bat3:
+ mtibatl 3, r.0
+
+KiSetDbatInvalidExit:
+ isync
+ LEAF_EXIT(KiSetDbatInvalid)
+
+//++
+//
+// ULONG
+// KiGetPvr ()
+//
+// Routine Description:
+//
+// Returns the contents of PVR, the Processor Version Register.
+// This is a read-only register, so there is no corresponding
+// function to write the PVR.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Contents of PVR
+//
+//--
+
+ LEAF_ENTRY (KiGetPvr)
+
+ mfpvr r.3 // read PVR
+
+ LEAF_EXIT (KiGetPvr)
+
+//++
+//
+// BOOLEAN
+// KiDisableInterrupts (VOID)
+//
+// Routine Description:
+//
+// This function disables interrupts and returns whether interrupts
+// were previously enabled.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// A boolean value that determines whether interrupts were previously
+// enabled (TRUE) or disabled (FALSE).
+//
+//--
+
+ LEAF_ENTRY(KiDisableInterrupts)
+
+ DISABLE_INTERRUPTS(r.3, r.4) // turn off interrupts, old MSR
+ // in r.3
+ extrwi r.3, r.3, 1, MSR_EE // isolate enable bit
+ LEAF_EXIT(KiDisableInterrupts)
+
+
+//++
+//
+// VOID
+// KiRestoreInterrupts (IN BOOLEAN Enable)
+//
+// Routine Description:
+//
+// This function restores the interrupt enable that was returned by
+// the disable interrupts function.
+//
+// Arguments:
+//
+// Enable (r.3) - Supplies the interrupt enable value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreInterrupts)
+
+ mfmsr r.4 // get current processor state
+ insrwi r.4, r.3, 1, MSR_EE // insert external interrupt
+ // enable/disable
+ mtmsr r.4 // set new state
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+
+ LEAF_EXIT(KiRestoreInterrupts)
+
+//++
+//
+// NTSTATUS
+// NtContinue (
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN TestAlert
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to continue execution after
+// an exception has occurred. Its functions is to transfer information from
+// the specified context record into the trap frame that was built when the
+// system service was executed, and then exit the system as if an exception
+// had occurred.
+//
+// Arguments:
+//
+// ContextRecord (r.3) - Supplies a pointer to a context record.
+//
+// TestAlert (r.4) - Supplies a boolean value that specifies whether alert
+// should be tested for the previous processor mode.
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record is misaligned or is not accessible, then the appropriate
+// status code is returned.
+//
+//--
+
+ .struct 0
+con_cr_hdr: .space StackFrameHeaderLength
+con_ex_frame: .space ExceptionFrameLength
+con_tr_frame: .space 4
+con_test_alert: .space 4
+con_saved_lr: .space 4
+ .align 3
+con_cr_length:
+
+ NESTED_ENTRY (NtContinue,con_cr_length,0,0)
+ PROLOGUE_END (NtContinue)
+
+ stw r.4, con_test_alert (r.sp) // save test alert argument
+ stw r.12, con_tr_frame (r.sp) // save the trap frame address
+
+//
+// Transfer information from the context frame to the exception and trap
+// frames.
+//
+ // r.3 points to Context (1st parm)
+ la r.4, con_ex_frame (r.sp) // r.4 = addr of Exception Frame (2nd parm)
+ ori r.5, r.12, 0 // Pass the real trap frame
+ bl ..KiContinue // transfer context to kernel frames
+
+//
+// If the kernel continuation routine returns success, then exit via the
+// exception exit code. Otherwise return to the system service dispatcher.
+//
+
+ cmpwi r.3, 0 // test return value
+ bne con_20 // branch if non-zero (failed)
+
+//
+// Check to determine if alert should be tested for the previous processor
+// mode and restore the previous mode in the thread object.
+//
+
+ lwz r.4, KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.5, con_test_alert (r.sp) // get test alert argument
+ lwz r.12, con_tr_frame (r.sp) // get trap frame address
+ cmpwi r.5, 0 // test test alert flag
+ lwz r.6, TrTrapFrame (r.12) // get old trap frame address
+ lbz r.7, TrPreviousMode (r.12) // get old previous mode
+ lbz r.3, ThPreviousMode (r.4) // get current previous mode
+ stw r.6, ThTrapFrame (r.4) // restore old trap frame address
+ stb r.7, ThPreviousMode (r.4) // restore old previous mode
+ beq con_10 // if flag zero, don't test for alert
+ bl ..KeTestAlertThread // test alert for current thread
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+con_10:
+ la r.3, con_ex_frame (r.sp) // parm 1 = Exception Frame addr
+ lwz r.4, con_tr_frame (r.sp) // set the original trap frame addr
+ b ..KiExceptionExit // finish in exception exit
+
+//
+// Context record is misaligned or not accessible.
+//
+
+con_20:
+ NESTED_EXIT (NtContinue,con_cr_length,0,0)
+
+//++
+//
+// NTSTATUS
+// NtRaiseException (
+// IN PEXCEPTION_RECORD ExceptionRecord,
+// IN PCONTEXT ContextRecord,
+// IN BOOLEAN FirstChance
+// )
+//
+// Routine Description:
+//
+// This routine is called as a system service to raise an exception.
+// The exception can be raised as a first or second chance exception.
+//
+// Arguments:
+//
+// ExceptionRecord (r.3) - Supplies a pointer to an exception record.
+//
+// ContextRecord (r.4) - Supplies a pointer to a context record.
+//
+// FirstChance (r.5) - Supplies a boolean value that determines whether
+// this is the first (TRUE) or second (FALSE) chance for dispatching
+// the exception.
+//
+// N.B. Register r.12 is assumed to contain the address of a trap frame.
+// (HACK!) See above description of NtContinue().
+//
+// Return Value:
+//
+// Normally there is no return from this routine. However, if the specified
+// context record or exception record is misaligned or is not accessible,
+// then the appropriate status code is returned.
+//
+//--
+
+ .struct 0
+rai_cr_hdr: .space StackFrameHeaderLength
+rai_ex_frame: .space ExceptionFrameLength
+rai_tr_frame: .space 4
+rai_saved_lr: .space 4
+ .align 3
+rai_cr_length:
+
+ NESTED_ENTRY (NtRaiseException,rai_cr_length,0,0)
+ PROLOGUE_END (NtRaiseException)
+
+ stw r.12, rai_tr_frame (r.sp) // save incoming Trap Frame pointer
+
+ ori r.7, r.5, 0 // move "first chance" arg to 5th position
+
+//
+// Save the nonvolatile machine state so that it can be restored by exception
+// exit if it is not overwritten by the specified context record.
+//
+
+ la r.5, rai_ex_frame (r.sp) // point r.5 to the Exception Frame
+
+ stw r.13, ExGpr13 (r.5) // save non-volatile GPRs
+ stw r.14, ExGpr14 (r.5)
+ stw r.15, ExGpr15 (r.5)
+ stw r.16, ExGpr16 (r.5)
+ stw r.17, ExGpr17 (r.5)
+ stw r.18, ExGpr18 (r.5)
+ stw r.19, ExGpr19 (r.5)
+ stw r.20, ExGpr20 (r.5)
+ stw r.21, ExGpr21 (r.5)
+ stw r.22, ExGpr22 (r.5)
+ stw r.23, ExGpr23 (r.5)
+ stw r.24, ExGpr24 (r.5)
+ stw r.25, ExGpr25 (r.5)
+ stw r.26, ExGpr26 (r.5)
+ stw r.27, ExGpr27 (r.5)
+ stw r.28, ExGpr28 (r.5)
+ stw r.29, ExGpr29 (r.5)
+ stw r.30, ExGpr30 (r.5)
+ stw r.31, ExGpr31 (r.5)
+
+ stfd f.14, ExFpr14 (r.5) // save non-volatile FPRs
+ stfd f.15, ExFpr15 (r.5)
+ stfd f.16, ExFpr16 (r.5)
+ stfd f.17, ExFpr17 (r.5)
+ stfd f.18, ExFpr18 (r.5)
+ stfd f.19, ExFpr19 (r.5)
+ stfd f.20, ExFpr20 (r.5)
+ stfd f.21, ExFpr21 (r.5)
+ stfd f.22, ExFpr22 (r.5)
+ stfd f.23, ExFpr23 (r.5)
+ stfd f.24, ExFpr24 (r.5)
+ stfd f.25, ExFpr25 (r.5)
+ stfd f.26, ExFpr26 (r.5)
+ stfd f.27, ExFpr27 (r.5)
+ stfd f.28, ExFpr28 (r.5)
+ stfd f.29, ExFpr29 (r.5)
+ stfd f.30, ExFpr30 (r.5)
+ stfd f.31, ExFpr31 (r.5)
+
+//
+// Call the raise exception kernel routine which will marshall the arguments
+// and then call the exception dispatcher.
+//
+// r.3 addr of Exception Record
+// r.4 addr of Context Record
+// r.5 addr of Exception Frame
+// r.6 addr of Trap Frame
+// r.7 "first chance" boolean
+//
+
+ ori r.6, r.12, 0 // move Trap Frame pointer into call arg
+ bl ..KiRaiseException // call Raise Exception routine
+
+//
+// If the raise exception routine returns success, then exit via the exception
+// exit code. Otherwise return to the system service dispatcher.
+//
+
+ lwz r.5, KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.4, rai_tr_frame (r.sp) // parm 2 = Trap Frame addr
+ lwz r.6, TrTrapFrame (r.4) // get old trap frame address
+ cmpwi r.3, 0 // test return value
+ stw r.6, ThTrapFrame (r.5) // restore old trap frame address
+ bne rai10 // branch if dispatch not successful
+
+//
+// Exit the system via exception exit which will restore the nonvolatile
+// machine state.
+//
+
+ la r.3, rai_ex_frame (r.sp) // parm 1 = Exception Frame addr
+ b ..KiExceptionExit // finish in Exception Exit
+
+//
+// The context or exception record is misaligned or not accessible, or the
+// exception was not handled.
+//
+
+rai10:
+ NESTED_EXIT (NtRaiseException,rai_cr_length,0,0)
+
+//++
+//
+// PKTHREAD
+// KeGetCurrentThread (VOID)
+//
+// Routine Description:
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Returns a pointer to the executing thread object.
+//
+//--
+
+ LEAF_ENTRY(KeGetCurrentThread)
+ lwz r.3, KiPcr+PcCurrentThread(r.0)
+ LEAF_EXIT(KeGetCurrentThread) // return
+
+//++
+//
+// KIRQL
+// KeGetCurrentIrql (VOID)
+//
+// Routine Description:
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Returns a pointer to the executing thread object.
+//
+//--
+
+ LEAF_ENTRY(KeGetCurrentIrql)
+ lbz r.3, KiPcr+PcCurrentIrql(r.0)
+ LEAF_EXIT(KeGetCurrentIrql) // return
+
diff --git a/private/ntos/ke/ppc/mpipi.s b/private/ntos/ke/ppc/mpipi.s
new file mode 100644
index 000000000..13f2137b4
--- /dev/null
+++ b/private/ntos/ke/ppc/mpipi.s
@@ -0,0 +1,657 @@
+// TITLE("Interprocessor Interrupt support routines")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+// Copyright (c) 1994 Motorola
+// Copyright (c) 1994 IBM Corporation
+//
+// Module Name:
+//
+// mpipi.s
+//
+// Abstract:
+//
+// This module implements the PPC specific functions required to
+// support multiprocessor systems.
+//
+// Author:
+//
+// Pat Carr
+//
+// Based on: ke\mips\x4mpipi.s, authored by David N. Cutler (davec)
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+//list(off)
+#include "ksppc.h"
+//list(on)
+
+ .extern ..KiFreezeTargetExecution
+ .extern __imp_HalRequestIpi
+
+ .extern KiProcessorBlock
+
+
+ SBTTL("Interprocess Interrupt Processing")
+//++
+//
+// VOID
+// KeIpiInterrupt (
+// IN PKTRAP_FRAME TrapFrame
+// );
+//
+// Routine Description:
+//
+// This routine is entered as the result of an interprocessor interrupt.
+// Its function is to process all interprocess immediate and packet
+// requests.
+//
+// This routine is entered at IPI_LEVEL.
+//
+// Arguments:
+//
+// TrapFrame (r.3) - Supplies a pointer to a trap frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ipi_int_hdr: .space StackFrameHeaderLength
+ipi_int_ex_frm: .space ExceptionFrameLength
+ .align 3
+ipi_int_frm_len:
+
+
+ SPECIAL_ENTRY_S(KeIpiInterrupt, _TEXT$01)
+
+#if !defined(NT_UP)
+
+ stw r.31, ExGpr31+ipi_int_ex_frm-ipi_int_frm_len(r.sp)
+ stw r.30, ExGpr30+ipi_int_ex_frm-ipi_int_frm_len(r.sp)
+ mflr r.30
+ stwu r.sp, -ipi_int_frm_len(r.sp)
+
+#endif
+
+ PROLOGUE_END(KeIpiInterrupt)
+
+#if !defined(NT_UP)
+
+ ori r.31, r.3, 0 // save address of trap frame
+
+//
+// Process all interprocessor requests.
+//
+// N.B. KiIpiProcessRequests returns condition register bit 29 set
+// if freeze requested.
+//
+
+ bl ..KiIpiProcessRequests // process requests
+ bf 29, ipi_int_fini // jif no freeze requested
+
+//
+// Save the volatile floating state.
+//
+
+ SAVE_VOLATILE_FLOAT_STATE(r.31)
+
+//
+// Save the nonvolatile state: integer registers and floating registers
+//
+
+ la r.4, ipi_int_ex_frm(r.sp) // address of exception frame
+
+ stw r.13, ExGpr13(r.4) // save non-volatile GPRs
+ stw r.14, ExGpr14(r.4)
+ stw r.15, ExGpr15(r.4)
+ stw r.16, ExGpr16(r.4)
+ stw r.17, ExGpr17(r.4)
+ stw r.18, ExGpr18(r.4)
+ stw r.19, ExGpr19(r.4)
+ stw r.20, ExGpr20(r.4)
+ stw r.21, ExGpr21(r.4)
+ stw r.22, ExGpr22(r.4)
+ stw r.23, ExGpr23(r.4)
+ stw r.24, ExGpr24(r.4)
+ stw r.25, ExGpr25(r.4)
+ stw r.26, ExGpr26(r.4)
+ stw r.27, ExGpr27(r.4)
+ stw r.28, ExGpr28(r.4)
+ stw r.29, ExGpr29(r.4)
+
+ stfd f.14, ExFpr14(r.4) // save non-volatile FPRs
+ stfd f.15, ExFpr15(r.4)
+ stfd f.16, ExFpr16(r.4)
+ stfd f.17, ExFpr17(r.4)
+ stfd f.18, ExFpr18(r.4)
+ stfd f.19, ExFpr19(r.4)
+ stfd f.20, ExFpr20(r.4)
+ stfd f.21, ExFpr21(r.4)
+ stfd f.22, ExFpr22(r.4)
+ stfd f.23, ExFpr23(r.4)
+ stfd f.24, ExFpr24(r.4)
+ stfd f.25, ExFpr25(r.4)
+ stfd f.26, ExFpr26(r.4)
+ stfd f.27, ExFpr27(r.4)
+ stfd f.28, ExFpr28(r.4)
+ stfd f.29, ExFpr29(r.4)
+ stfd f.30, ExFpr30(r.4)
+ stfd f.31, ExFpr31(r.4)
+
+//
+// Freeze the execution of the current processor.
+//
+
+ ori r.3, r.31, 0 // address of trap frame
+// la r.4, ipi_int_ex_frm(r.sp) // address of exception frame
+ bl ..KiFreezeTargetExecution // freeze current processor execution
+
+//
+// Restore the nonvolatile state: floating registers and integer registers
+//
+
+ la r.3, ipi_int_ex_frm(r.sp) // address of exception frame
+
+ lfd f.14, ExFpr14 (r.3) // restore non-volatile FPRs
+ lfd f.15, ExFpr15 (r.3)
+ lfd f.16, ExFpr16 (r.3)
+ lfd f.17, ExFpr17 (r.3)
+ lfd f.18, ExFpr18 (r.3)
+ lfd f.19, ExFpr19 (r.3)
+ lfd f.20, ExFpr20 (r.3)
+ lfd f.21, ExFpr21 (r.3)
+ lfd f.22, ExFpr22 (r.3)
+ lfd f.23, ExFpr23 (r.3)
+ lfd f.24, ExFpr24 (r.3)
+ lfd f.25, ExFpr25 (r.3)
+ lfd f.26, ExFpr26 (r.3)
+ lfd f.27, ExFpr27 (r.3)
+ lfd f.28, ExFpr28 (r.3)
+ lfd f.29, ExFpr29 (r.3)
+ lfd f.30, ExFpr30 (r.3)
+ lfd f.31, ExFpr31 (r.3)
+
+ lwz r.14, ExGpr14 (r.3) // restore non-volatile GPRs
+ lwz r.15, ExGpr15 (r.3)
+ lwz r.16, ExGpr16 (r.3)
+ lwz r.17, ExGpr17 (r.3)
+ lwz r.18, ExGpr18 (r.3)
+ lwz r.19, ExGpr19 (r.3)
+ lwz r.20, ExGpr20 (r.3)
+ lwz r.21, ExGpr21 (r.3)
+ lwz r.22, ExGpr22 (r.3)
+ lwz r.23, ExGpr23 (r.3)
+ lwz r.24, ExGpr24 (r.3)
+ lwz r.25, ExGpr25 (r.3)
+ lwz r.26, ExGpr26 (r.3)
+ lwz r.27, ExGpr27 (r.3)
+ lwz r.28, ExGpr28 (r.3)
+ lwz r.29, ExGpr29 (r.3)
+
+//
+// Restore the volatile floating state.
+//
+
+ RESTORE_VOLATILE_FLOAT_STATE(r.31)
+
+ipi_int_fini:
+ mtlr r.30
+ lwz r.31, ExGpr31+ipi_int_ex_frm(r.sp)
+ lwz r.30, ExGpr30+ipi_int_ex_frm(r.sp)
+ addi r.sp, r.sp, ipi_int_frm_len
+
+#endif
+ SPECIAL_EXIT(KeIpiInterrupt)
+
+ SBTTL("Processor Request")
+//++
+//
+// ULONG
+// KiIpiProcessRequests (
+// VOID
+// );
+//
+// Routine Description:
+//
+// This routine processes interprocessor requests and returns a summary
+// of the requests that were processed.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// The request summary is returned as the function value.
+// CR.7 contains the 4 LSBs of request summary, specifically,
+// CR bit 29 is set if freeze is requested.
+//
+//--
+ .struct 0
+ .space StackFrameHeaderLength
+PrTocSave: .space 4
+PrLrSave: .space 4
+Pr30Save: .space 4
+ .align 3
+PrFrameLength:
+
+ SPECIAL_ENTRY_S(KiIpiProcessRequests, _TEXT$01)
+
+#if !defined(NT_UP)
+
+ mflr r.0 // get return address
+ stwu r.sp, -PrFrameLength(r.sp) // buy stack frame
+ stw r.30, Pr30Save(r.sp) // save reg 30
+ stw r.toc, PrTocSave(r.sp) // save our toc
+ lwz r.30, KiPcr+PcPrcb(r.0) // get processor control block
+ stw r.0, PrLrSave(r.sp) // save return address
+
+#endif
+
+ PROLOGUE_END(KiIpiProcessRequests)
+
+#if !defined(NT_UP)
+
+//
+// Check for Packet ready.
+//
+// If a packet is ready, then get the address of the requested function
+// and call the function passing the address of the packet as a parameter.
+//
+// N.B. We do not need to check/clear the SignalDone field using
+// atomic operations because this processor is the only processor
+// attempting to clear this field and only clears it when it takes
+// work from it. Other processors will only write to this field
+// when it is zero (they must use atomic operations to update it).
+//
+
+kipr_10:
+ lwz r.3, PbSignalDone(r.30) // get packet source prcb
+ li r.6, 0 // either way we need zero
+ cmpwi r.3, 0 // check for packet ready
+ addi r.30, r.30, PbRequestSummary
+ beq kipr_20 // if eq, no packet ready
+
+//
+// Packet ready. Clear SignalDone then call the requested function.
+// r.3 now contains the address of the PRCB of the processor which
+// made the request. That PRCB contains the function address and
+// parameters.
+//
+
+ stw r.6, PbSignalDone-PbRequestSummary(r.30)
+ lwz r.6, PbWorkerRoutine(r.3)// get &worker function fn desc
+ lwz r.4, PbCurrentPacket(r.3)// get request parameters
+ lwz r.0, 0(r.6) // get worker entry point
+ lwz r.5, PbCurrentPacket+4(r.3)
+ mtlr r.0 // set entry address
+ lwz r.toc, 4(r.6) // get worker's toc
+ lwz r.6, PbCurrentPacket+8(r.3)
+ blrl // call worker routine
+ lwz r.0, PrLrSave(r.sp) // get return address
+ lwz r.toc, PrTocSave(r.sp) // restore kernel toc
+ li r.6, 0 // need zero again
+
+#if NT_INST
+
+//
+// Increment number of packet requests
+//
+
+ lwz r.3, PbIpiCounts-PbRequestSummary+IcPacket(r.30)
+ addi r.3, r.3, 1
+ stw r.3, PbIpiCounts-PbRequestSummary+IcPacket(r.30)
+
+#endif
+
+ mtlr r.0 // reset return address
+
+//
+// Read request summary and write a zero result interlocked.
+//
+
+kipr_20:
+ lwarx r.3, 0, r.30 // get request summary
+ stwcx. r.6, 0, r.30 // zero request summary
+ bne- kipr_20 // if ne, store conditional failed
+
+//
+// WARNING: For speed we are just going to move the request summary
+// into the condition register field 7. The following code
+// is dependent on the following values-
+//
+// IPI_APC 1 (condition register bit 31)
+// IPI_DPC 2 (condition register bit 30)
+// IPI_FREEZE 4 (condition register bit 29)
+//
+
+ mtcrf 0x01, r.3 // set appropriate CR bits
+ li r.4, 1 // will need 1 if apc or dpc
+
+//
+// Check for APC interrupt request.
+//
+// If an APC interrupt is requested, then request a software interrupt at
+// APC level on the current processor.
+//
+
+
+ bf 31, kipr_25 // jif no APC requested
+ stb r.4, KiPcr+PcApcInterrupt(r.0) // set APC interrupt request
+
+#if NT_INST
+
+//
+// Increment number of APC requests
+//
+
+ lwz r.5, PbIpiCounts-PbRequestSummary+IcAPC(r.30)
+ addi r.5, r.5, 1
+ stw r.5, PbIpiCounts-PbRequestSummary+IcAPC(r.30)
+
+#endif
+
+//
+// Check for DPC interrupt request.
+//
+// If an DPC interrupt is requested, then request a software interrupt at
+// DPC level on the current processor.
+//
+
+kipr_25:
+ bf 30, kipr_30 // jif no DPC requested
+ stb r.4, KiPcr+PcDispatchInterrupt(r.0) // set DPC interrupt request
+
+#if NT_INST
+
+//
+// Increment number of DPC requests
+//
+
+ lwz r.5, PbIpiCounts-PbRequestSummary+IcDPC(r.30)
+ addi r.5, r.5, 1
+ stw r.5, PbIpiCounts-PbRequestSummary+IcDPC(r.30)
+
+#endif
+
+//
+// Set function return value, restores registers, and return.
+//
+
+kipr_30:
+
+#if NT_INST
+
+ bf 29, kipr_40 // jif no freeze requested
+
+//
+// Increment number of freeze requests
+//
+
+ lwz r.5, PbIpiCounts-PbRequestSummary+IcFreeze(r.30)
+ addi r.5, r.5, 1
+ stw r.5, PbIpiCounts-PbRequestSummary+IcFreeze(r.30)
+
+#endif
+
+kipr_40:
+
+//
+// N.B. Returning RequestSummary in r.3 (history), CR bit 29 set
+// if freeze requested.
+//
+
+ lwz r.30, Pr30Save(r.sp) // restore reg 30
+ addi r.sp, r.sp, PrFrameLength
+
+#endif
+
+ SPECIAL_EXIT(KiIpiProcessRequests)
+
+ SBTTL("Send Interprocess Request")
+//++
+//
+// VOID
+// KiIpiSend (
+// IN KAFINITY TargetProcessors,
+// IN KIPI_REQUEST IpiRequest
+// );
+//
+// Routine Description:
+//
+// This routine requests the specified operation on the target set of
+// processors.
+//
+// Arguments:
+//
+// TargetProcessors (r.3) - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// IpiRequest (r.4) - Supplies the request operation mask.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .struct 0
+ .space StackFrameHeaderLength
+SpTocSave: .space 4
+Sp31Save: .space 4
+ .align 3
+SpFrameLength:
+
+ SPECIAL_ENTRY_S(KiIpiSend, _TEXT$01)
+
+#if !defined(NT_UP)
+
+ lwz r.9, [toc]__imp_HalRequestIpi(r.toc)
+ stwu r.sp, -SpFrameLength(r.sp) // buy stack frame
+ stw r.31, Sp31Save(r.sp) // save r.31
+ mflr r.31 // save return address in r.31
+ lwz r.9, 0(r.9) // get HalRequestIpi fn descr
+ rlwinm. r.5, r.3, 0, 1 // check if cpu 0 is a target
+ lwz r.7, [toc]KiProcessorBlock(r.toc)// get &processor block array
+ stw r.2, SpTocSave(r.sp) // save kernel's TOC
+
+#endif
+
+ PROLOGUE_END(KiIpiSend)
+
+#if !defined(NT_UP)
+
+ ori r.6, r.3, 0 // copy target processors
+ lwz r.0, 0(r.9) // get HalRequestIpi entry
+ li r.10,PbRequestSummary // offset to RequestSummary in PRCB
+
+kis10: beq kis30 // if eq, target not specified
+
+ lwz r.5, 0(r.7) // get target processor block address
+
+kis20: lwarx r.8, r.10, r.5 // get request summary of target
+ or r.8, r.8, r.4 // merge current request with summary
+ stwcx. r.8, r.10, r.5 // store request summary
+ bne- kis20 // if ne, store conditional failed
+
+kis30: addi r.7, r.7, 4 // advance to next array element
+ srwi. r.6, r.6, 1 // shift out target bit
+ beq kis40 // if eq, no more targets requested
+ rlwinm. r.5, r.6, 0, 1 // check if target bit set
+ b kis10
+
+kis40: mtlr r.0 // set HalRequestIpi entry
+ lwz r.2, 4(r.9) // get HAL's toc
+ blrl // request IPI interrupt on targets
+
+ mtlr r.31 // set return address
+ lwz r.toc, SpTocSave(r.sp) // restore kernel's toc
+ lwz r.31, Sp31Save(r.sp) // restore r.31
+ addi r.sp, r.sp, SpFrameLength
+
+#endif
+
+ SPECIAL_EXIT(KiIpiSend)
+
+ SBTTL("Send Interprocess Request Packet")
+//++
+//
+// VOID
+// KiIpiSendPacket (
+// IN KAFINITY TargetProcessors,
+// IN PKIPI_WORKER WorkerFunction,
+// IN PVOID Parameter1,
+// IN PVOID Parameter2,
+// IN PVOID Parameter3
+// );
+//
+// Routine Description:
+//
+// This routine executes the specified worker function on the specified
+// set of processors.
+//
+// Arguments:
+//
+// TargetProcessors - Supplies the set of processors on which the
+// specified operation is to be executed.
+//
+// WorkerFunction - Supplies the address of the worker function.
+//
+// Parameter1 - Parameter3 - Supplies worker function specific parameters.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ SPECIAL_ENTRY_S(KiIpiSendPacket, _TEXT$01)
+
+#if !defined(NT_UP)
+
+ lwz r.9, [toc]__imp_HalRequestIpi(r.toc)
+ stwu r.sp, -SpFrameLength(r.sp) // buy stack frame
+ stw r.31, Sp31Save(r.sp) // save r.31
+ mflr r.31 // save return address in r.31
+ lwz r.9, 0(r.9) // get HalRequestIpi fn descr
+ stw r.2, SpTocSave(r.sp) // save kernel's TOC
+
+#endif
+
+ PROLOGUE_END(KiIpiSendPacket)
+
+#if !defined(NT_UP)
+
+ lwz r.12, KiPcr+PcPrcb(r.0) // get this processor's PRCB
+ ori r.11, r.3, 0 // copy target processor set
+ lwz r.0, 0(r.9) // get HalRequestIpi entry addr
+
+//
+// Store function address and parameters in the packet area of the PRCB on
+// the current processor.
+//
+
+ stw r.3,PbTargetSet(r.12) // set target processor set
+ stw r.4,PbWorkerRoutine(r.12) // set worker function address
+ stw r.5,PbCurrentPacket(r.12) // store worker function parameters
+ stw r.6,PbCurrentPacket+4(r.12)//
+ stw r.7,PbCurrentPacket+8(r.12)//
+
+// GPRs r.4, - r.7 now available ...
+
+ lwz r.4, [toc]KiProcessorBlock(r.toc)// get &processor block array
+ mtlr r.0 // set addr of HalRequestIpi entry
+
+//
+// Ensure above stores complete w.r.t. memory prior to allowing any
+// processor to begin this request.
+//
+
+ eieio
+
+//
+// Loop through the target processors and send the packet to the specified
+// recipients.
+//
+
+kisp10:
+ lwz r.10, 0(r.4) // get target processor block address
+ rlwinm. r.8, r.11, 0, 1 // check if target bit set
+ srwi r.11, r.11, 1 // shift out target processor
+ addi r.10, r.10, PbSignalDone // get packet lock address
+ beq kisp30 // if eq, target not specified
+
+//
+// PowerPC uses the SignalDone field in the PRCB to indicate packet
+// status. Non zero implies packet busy. This saves us having to
+// update both the RequestSummary and the SignalDOne fields in an
+// atomic manner.
+//
+// N.B. We write this like it's a spin lock, even though it isn't, quite.
+//
+
+ ACQUIRE_SPIN_LOCK(r.10, r.12, r.6, kisp20, kisp40)
+
+kisp30: cmpwi r.11, 0
+ addi r.4, r.4, 4 // advance to get array element
+ bne kisp10 // if ne, more targets requested
+
+ lwz r.2, 4(r.9) // get HAL's toc
+ blrl // call HalRequestIpi
+
+ mtlr r.31 // set return address
+ lwz r.toc, SpTocSave(r.sp) // restore kernel's toc
+ lwz r.31, Sp31Save(r.sp) // restore r.31
+ addi r.sp, r.sp, SpFrameLength
+
+ blr
+
+ SPIN_ON_SPIN_LOCK(r.10, r.6, kisp20, kisp40)
+#endif
+
+ DUMMY_EXIT(KiIpiSendPacket)
+
+ SBTTL("Signal Packet Done")
+//++
+//
+// VOID
+// KeIpiSignalPacketDone (
+// IN PVOID SignalDone
+// );
+//
+// Routine Description:
+//
+// This routine signals that a processor has completed a packet by
+// clearing the calling processor's set member of the requesting
+// processor's packet.
+//
+// Arguments:
+//
+// SignalDone (r.3) - Supplies a pointer to the processor block of the
+// sending processor.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiIpiSignalPacketDone)
+
+ li r.9, PbTargetSet // offset to target set in prcb
+ lwz r.4, KiPcr+PcNotMember(r.0) // get processor set member
+sigdn: lwarx r.5, r.9, r.3 // get request target set
+ and r.5, r.5, r.4 // clear processor set member
+ stwcx. r.5, r.9, r.3 // store target set
+ bne- sigdn // if ne, store conditional failed
+
+ LEAF_EXIT(KiIpiSignalPacketDone)
diff --git a/private/ntos/ke/ppc/pcr.s b/private/ntos/ke/ppc/pcr.s
new file mode 100644
index 000000000..17bdaaba5
--- /dev/null
+++ b/private/ntos/ke/ppc/pcr.s
@@ -0,0 +1,80 @@
+// TITLE("PCR access")
+//++
+//
+// Copyright (c) 1995 Microsoft Corporation
+//
+// Module Name:
+//
+// pcr.s
+//
+// Abstract:
+//
+// This module implements the routines for accessing PCR fields.
+// Specifically, routines that need multiple-instruction access
+// to the PCR and its related structures, and need to run with
+// interrupts disabled.
+//
+// Author:
+//
+// Chuck Lenzmeier (chuckl) 5-Apr-95
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+
+ SBTTL("KeIsExecutingDpc")
+//++
+//
+// BOOLEAN
+// KeIsExecutingDpc (
+// VOID
+// )
+//
+// Routine Description:
+//
+// This function returns the DPC Active flag on the current processor.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// Current DPC Active flag. This flag indicates if a DPC routine is
+// currently running on this processor.
+//
+//--
+
+ LEAF_ENTRY(KeIsExecutingDpc)
+
+#if !defined(NT_UP)
+ DISABLE_INTERRUPTS(r9, r8)
+#endif
+
+//
+// Get the DPC active indicator and return a BOOLEAN. Note that
+// DpcRoutineActive holds 0 or some nonzero value (usually the
+// stack pointer), so it does not conform to the C TRUE/FALSE values.
+//
+
+ lwz r4, KiPcr+PcPrcb(r0) // get PRCB address
+ li r3, FALSE // assume DPC not active
+ lwz r4, PbDpcRoutineActive(r4) // get DPC active indicator
+ cmpwi r4, 0 // DPC active?
+ beq kied10 // branch if not
+ li r3, TRUE // indicate DPC active
+kied10:
+
+#if !defined(NT_UP)
+ ENABLE_INTERRUPTS(r9)
+#endif
+
+ LEAF_EXIT(KeIsExecutingDpc) // return
+
diff --git a/private/ntos/ke/ppc/procstat.s b/private/ntos/ke/ppc/procstat.s
new file mode 100644
index 000000000..d1ff66650
--- /dev/null
+++ b/private/ntos/ke/ppc/procstat.s
@@ -0,0 +1,309 @@
+//++
+//
+// Copyright (c) 1989 Microsoft Corporation
+//
+// Module Name:
+//
+// procstat.asm
+//
+// Abstract:
+//
+// This module implements procedures for saving and restoring
+// processor control state.
+//
+// These procedures support debugging of UP and MP systems.
+//
+// Author:
+//
+// Chuck Bauman (v-cbaum@microsoft.com) 7-Nov-1994
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+// Supported PPC versions
+
+ .set PV601, 1
+ .set PV603, 3
+ .set PV604, 4
+ .set PV603p, 6
+ .set PV603pp, 7
+ .set PV613, 8
+ .set PV604p, 9
+
+
+// 601 special purpose register names
+ .set hid1, 1009
+
+// special purpose register names (601, 603 and 604)
+ .set iabr, 1010
+
+// special purpose register names (601, 604)
+ .set dabr, 1013
+
+ .extern KiBreakPoints
+
+//++
+//
+// KiSaveProcessorControlState(
+// PKPROCESSOR_STATE ProcessorState
+// );
+//
+// Routine Description:
+//
+// This routine saves the control subset of the processor state.
+// Called by the debug subsystem, and KiSaveProcessorState()
+//
+// N.B. This procedure will save the debug registers and then turn off
+// the appropriate debug registers at the hardware. This prevents
+// recursive hardware trace breakpoints and allows debuggers
+// to work.
+//
+// Arguments:
+//
+// ProcessorState (r.3)
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiSaveProcessorControlState)
+
+ lwz r.5, [toc]KiBreakPoints(r.2) // Available Breakpoints Addr
+ addi r.3, r.3, PsSpecialRegisters
+ lwz r.5, 0(r.5) // # available breakpoints
+ lwz r.7, SrKernelDr7(r.3) // Get Dr state
+ lwz r.6, SrKernelDr6(r.3)
+ rlwinm. r.7, r.7, 0, 0xff // KD set Drs?
+ or r.5, r.5, r.6 // Return # DRs in Dr6
+ stw r.5, SrKernelDr6(r.3) // return allowed Drs
+ beq getsregs // Leave if no DR set
+
+ mfpvr r.4
+ li r.9, 0 // Initialize Dr7
+ li r.8, 0 // Turn off Drs
+ rlwinm r.4, r.4, 16, 0xffff // isolate processor type
+ cmpwi r.4, PV604
+ beq ss.604 // jif 604
+ cmpwi r.4, PV603p
+ beq ss.603 // jif Stretch (603+)
+ cmpwi r.4, PV604p
+ beq ss.604 // jif Sirocco (604+)
+ cmpwi r.4, PV603
+ beq ss.603 // jif 603
+ cmpwi r.4, PV603pp
+ beq ss.603 // jif 603++
+ cmpwi r.4, PV613
+ beq ss.604 // jif 613
+ cmpwi r.4, PV601
+ li r.10, 0x0080 // Normal, run mode (601)
+ beq ss.601 // jif 601
+ stw r.9, SrKernelDr7(r.3) // Drs not supported
+ b getsregs // return
+ // No DRs supported
+
+ss.601: // 601 SPECIFIC
+ mtspr hid1, r.10 // turn off trace mode
+
+ss.604: // 601/604 SPECIFIC
+ mfspr r.4, iabr // Load the IABR (Dr0)
+ rlwinm. r.4, r.4, 0, 0xfffffffc // IABR(DR0) set?
+ stw r.4, SrKernelDr0(r.3)
+ mfspr r.4, dabr // Load the DABR (Dr1)
+ beq ssnoiabr.1 // jiff Dr0 not set
+ li r.9, 0x2 // Set GE0 in Dr7
+
+ssnoiabr.1:
+ rlwimi r.9, r.4, 19, 11, 11 // Interchange R/W1 bits
+ rlwimi r.9, r.4, 21, 10, 10 // and move to Dr7
+ rlwinm. r.4, r.4, 0, 0xfffffff8 // Sanitize Dr1
+ stw r.4, SrKernelDr1(r.3) // Store Dr1 in trap frame
+ beq ssnodabr.1 // jiff Dr1 not set
+ ori r.9, r.9, 0x8 // Set GE1 in Dr7
+
+ssnodabr.1:
+ stw r.9, SrKernelDr7(r.3) // Initialize if no DRs set
+ rlwinm. r.5, r.9, 0, 0xf // Any DRs set?
+ mtspr dabr, r.8
+ mtspr iabr, r.8 // Turn off DRs
+ isync
+ ori r.9, r.9, 0x200 // Set GE bit in Dr7
+ beq getsregs // exit if not set
+ stw r.9, SrKernelDr7(r.3)
+ b getsregs
+
+ss.603: // 603 SPECIFIC
+ mfspr r.4, iabr // Load the IABR (Dr0)
+ rlwinm. r.4, r.4, 0, 0xfffffffc // Sanitize Dr0
+ beq ssnoiabr.3 // jiff Dr0 not set
+ li r.9, 0x202 // Initialize Dr7
+
+ssnoiabr.3:
+ stw r.4, SrKernelDr0(r.3) // Store Dr0
+ stw r.9, SrKernelDr7(r.3)
+ mtspr iabr, r.8 // Turn off DRs
+
+getsregs:
+ mfsr r.4, 0
+ mfsr r.5, 1
+ mfsr r.6, 2
+ mfsr r.7, 3
+ stw r.4, SrSr0(r.3)
+ mfsr r.4, 4
+ stw r.5, SrSr1(r.3)
+ mfsr r.5, 5
+ stw r.6, SrSr2(r.3)
+ mfsr r.6, 6
+ stw r.7, SrSr3(r.3)
+ mfsr r.7, 7
+ stw r.4, SrSr4(r.3)
+ mfsr r.4, 8
+ stw r.5, SrSr5(r.3)
+ mfsr r.5, 9
+ stw r.6, SrSr6(r.3)
+ mfsr r.6, 10
+ stw r.7, SrSr7(r.3)
+ mfsr r.7, 11
+ stw r.4, SrSr8(r.3)
+ mfsr r.4, 12
+ stw r.5, SrSr9(r.3)
+ mfsr r.5, 13
+ stw r.6, SrSr10(r.3)
+ mfsr r.6, 14
+ stw r.7, SrSr11(r.3)
+ mfsr r.7, 15
+ stw r.4, SrSr12(r.3)
+ stw r.5, SrSr13(r.3)
+ stw r.6, SrSr14(r.3)
+ stw r.7, SrSr15(r.3)
+
+ mfsdr1 r.0
+ stw r.0, SrSdr1(r.3)
+
+ mfibatl r.4, 0
+ mfibatu r.5, 0
+ mfibatl r.6, 1
+ mfibatu r.7, 1
+ stw r.4, SrIBAT0L(r.3)
+ mfibatl r.4, 2
+ stw r.5, SrIBAT0U(r.3)
+ mfibatu r.5, 2
+ stw r.6, SrIBAT1L(r.3)
+ mfibatl r.6, 3
+ stw r.7, SrIBAT1U(r.3)
+ mfibatu r.7, 3
+ stw r.4, SrIBAT2L(r.3)
+ stw r.5, SrIBAT2U(r.3)
+ stw r.6, SrIBAT3L(r.3)
+ stw r.7, SrIBAT3U(r.3)
+
+ mfpvr r.4
+ cmpwi r.4, PV601
+ beqlr // exit if 601 (no DBATs)
+
+ mfdbatl r.4, 0
+ mfdbatu r.5, 0
+ mfdbatl r.6, 1
+ mfdbatu r.7, 1
+ stw r.4, SrDBAT0L(r.3)
+ mfdbatl r.4, 2
+ stw r.5, SrDBAT0U(r.3)
+ mfdbatu r.5, 2
+ stw r.6, SrDBAT1L(r.3)
+ mfdbatl r.6, 3
+ stw r.7, SrDBAT1U(r.3)
+ mfdbatu r.7, 3
+ stw r.4, SrDBAT2L(r.3)
+ stw r.5, SrDBAT2U(r.3)
+ stw r.6, SrDBAT3L(r.3)
+ stw r.7, SrDBAT3U(r.3)
+
+ LEAF_EXIT(KiSaveProcessorControlState)
+
+
+//++
+//
+// KiRestoreProcessorControlState(
+// );
+//
+// Routine Description:
+//
+// This routine restores the control subset of the processor state.
+// (Restores the same information as KiRestoreProcessorState EXCEPT that
+// data in TrapFrame/ExceptionFrame=Context record is NOT restored.)
+// Called by the debug subsystem, and KiRestoreProcessorState()
+//
+// Arguments:
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY(KiRestoreProcessorControlState)
+
+ addi r.3, r.3, PsSpecialRegisters
+ lwz r.4, SrKernelDr7(r.3) // Active DRs
+ mfpvr r.7
+ rlwinm. r.6, r.4, 0, 0xff // Drs Set?
+ bne DRon // jif DRs active
+ blr // exit No DRs set
+
+DRon:
+ rlwinm r.7, r.7, 16, 0xffff // isolate processor type
+ lwz r.5, SrKernelDr1 (r.3) // Get kernel DABR (Dr1)
+ lwz r.6, SrKernelDr0 (r.3) // Get kernel IABR (Dr0)
+ ori r.5, r.5, 0x4 // Sanitize DABR (Dr1) 604
+ ori r.6, r.6, 0x3 // Sanitize IABR (Dr0) 604
+ cmpwi r.7, PV604
+ beq rs.604 // jif 604
+ cmpwi r.7, PV603p
+ beq rs.603 // jif 603+
+ cmpwi r.7, PV604p
+ beq rs.604 // jif 604+
+ cmpwi r.7, PV603
+ beq rs.603 // jif 603
+ cmpwi r.7, PV603pp
+ beq rs.603 // jif 603++
+ cmpwi r.7, PV613
+ beq rs.604 // jif 613
+ cmpwi r.7, PV601
+ lis r.10, 0x6080 // Full cmp. trace mode (601)
+ beq rs.601 // jif 601
+ blr // return
+ // No DRs supported
+
+rs.601: // 601 SPECIFIC
+ rlwinm r.6, r.6, 0, 0xfffffffc // Sanitze IABR (Dr0) undo 604
+ rlwinm r.5, r.5, 0, 0xfffffff8 // Sanitze DABR (Dr0) undo 604
+ mtspr hid1, r.10 // turn on full cmp
+
+rs.604:
+ rlwinm. r.9, r.4, 0, 0x0000000c // LE1/GE1 set?
+ beq rsnodabr.1 // jiff Dr1 not set
+ rlwimi r.5, r.4, 13, 30, 30 // Interchange R/W1 bits
+ rlwimi r.5, r.4, 11, 31, 31
+ mtspr dabr, r.5
+
+rsnodabr.1:
+ rlwinm. r.4, r.4, 0, 0x00000003 // LE0/GE0 set?
+ beqlr
+ mtspr iabr, r.6
+ isync
+ blr
+
+rs.603: // 603 SPECIFIC
+ rlwinm r.6, r.6, 0, 0xfffffffc // Sanitize IABR
+ ori r.6, r.6, 0x2
+ mtspr iabr, r.6
+
+ LEAF_EXIT(KiRestoreProcessorControlState)
diff --git a/private/ntos/ke/ppc/real0.s b/private/ntos/ke/ppc/real0.s
new file mode 100644
index 000000000..e270bcdd8
--- /dev/null
+++ b/private/ntos/ke/ppc/real0.s
@@ -0,0 +1,6289 @@
+//--------------------------------------------------------------------------
+//
+// real0.s
+//
+// Zeroth-level interrupt handling code for PowerPC Little-Endian.
+// This code must reside in real storage beginning at location 0.
+//
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+
+ .file "real0.s"
+
+//--------------------------------------------------------------------------
+//
+// Author: Rick Simpson
+// IBM Thomas J. Watson Research Center
+// Yorktown Heights, NY
+// simpson@watson.ibm.com
+//
+// Peter Johnston
+// IBM - Kirkland Programming Center
+// 3600 Carillon Point
+// Kirkland, WA 98033
+// plj@vnet.ibm.com
+//
+// Mark Mergen
+// IBM Thomas J. Watson Research Center
+// Yorktown Heights, NY 10598
+// mergen@watson.ibm.com
+//
+// Pat Carr
+// RISC Software, Motorola SPS
+// Austin TX 78735
+// patcarr@pets.sps.mot.com
+//
+// Ying Chan
+// RISC Software, Motorola SPS
+// Austin TX 78735
+// zulc@pets.sps.mot.com
+//
+//--------------------------------------------------------------------------
+//
+// Fixed mapping of low storage,
+// from PowerPC Operating Environment Architecture
+//
+// 0x0000 - 0x00FF (reserved)
+// 0x0100 - 0x01FF System Reset interrupt handler
+// 0x0200 - 0x02FF Machine Check interrupt handler
+// 0x0300 - 0x03FF Data Storage interrupt handler
+// 0x0400 - 0x04FF Instruction Storage interrupt handler
+// 0x0500 - 0x05FF External interrupt handler
+// 0x0600 - 0x06FF Alignment interrupt handler
+// 0x0700 - 0x07FF Program interrupt handler (a.k.a. program check)
+// 0x0800 - 0x08FF Floating Point Unavailable interrupt handler
+// 0x0900 - 0x09FF Decrementer interrupt handler
+// 0x0A00 - 0x0BFF (reserved)
+// 0x0C00 - 0x0CFF System Call interrupt handler
+// 0x0D00 - 0x0DFF Trace interrupt handler
+// 0x0E00 - 0x0EFF Floating Point Assist interrupt handler
+// 0x0F00 - 0x0FFF PMI handler
+//
+// The next several handlers are specific to the 603:
+//
+// 0x1000 - 0x10FF Instruction Translation Miss handler
+// 0x1100 - 0x11FF Data Store Translation Miss handler -- Load
+// 0x1200 - 0x12FF Data Store Translation Miss handler -- Store
+// 0x1300 - 0x13FF Instruction Address Breakpoint handler
+// 0x1400 - 0x14FF System Management Interrupt handler
+//
+// 0x1500 - 0x2FFF (reserved)
+//
+// This module is loaded into low-storage at real memory address 0.
+// System memory's address space begins at address 0x80000000 which
+// is mapped to real memory address 0 (see KiSystemInitialization()).
+// This module is designed to be entered in real mode and to switch
+// to virtual mode asap. Code is compiled to run at VMA 0x80000000.
+//
+//--------------------------------------------------------------------------
+
+#define mtbatl mtibatl
+#define mtbatu mtibatu
+
+#include "ksppc.h"
+
+// Symbolic names for SPRG registers
+
+ .set sprg.0, 0
+ .set sprg.1, 1
+ .set sprg.2, 2
+ .set sprg.3, 3
+
+// Names for the four bits of a CR field
+
+ .set LT, 0
+ .set GT, 1
+ .set EQ, 2
+ .set OV, 3
+
+// 601 special purpose register names
+ .set hid1, 1009
+
+// special purpose register names (601, 603 and 604)
+ .set hid0, 1008
+ .set iabr, 1010
+
+// special purpose register names (601, 604)
+ .set dabr, 1013
+
+// 603 special purpose register names
+
+ .set dmiss, 976
+ .set imiss, 980
+ .set icmp, 981
+ .set rpa, 982
+
+// 604 hid0 bits
+
+ .set h0_604_ice, 0x8000 // I-Cache Enable
+ .set h0_604_dce, 0x4000 // D-Cache Enable
+ .set h0_604_icl, 0x2000 // I-Cache Lock
+ .set h0_604_dcl, 0x1000 // D-Cache Lock
+ .set h0_604_icia, 0x0800 // I-Cache Invalidate All
+ .set h0_604_dcia, 0x0400 // D-Cache Invalidate All
+ .set h0_604_sse, 0x0080 // Super Scalar Enable
+ .set h0_604_bhte, 0x0004 // Branch History Table enable
+
+ .set h0_604_prefered, h0_604_ice+h0_604_dce+h0_604_sse+h0_604_bhte
+
+// 613 hid0 bits
+
+ .set h0_613_ice, 0x8000 // I-Cache Enable
+ .set h0_613_dce, 0x4000 // D-Cache Enable
+ .set h0_613_icia, 0x0800 // I-Cache Invalidate All
+ .set h0_613_dcia, 0x0400 // D-Cache Invalidate All
+ .set h0_613_sge, 0x0080 // Store Gathering enable
+ .set h0_613_dcfa, 0x0040 // Data Cache Flush Assist
+ .set h0_613_btic, 0x0020 // Branch Target Instr enable
+ .set h0_613_bhte, 0x0004 // Branch History Table enable
+
+//
+// What we're really going to want in hid0 is
+//
+// h0_613_ice+h0_613_dce+h0_613_sge+h0_613_dcfa+h0_613_btic+h0_613_bhte
+//
+// but these are untried features with currently unknown performance impact,
+// so for now use
+//
+// h0_613_ice+h0_613_dce++h0_613_btic+h0_613_bhte
+//
+ .set h0_613_preferred, h0_613_ice+h0_613_dce+h0_613_btic++h0_613_bhte
+
+// Known PPC versions:
+
+ .set PV601, 1
+ .set PV603, 3
+ .set PV604, 4
+ .set PV603p, 6 // 603e, Stretch
+ .set PV603pp, 7 // 603ev, Valiant
+ .set PV613, 8 // 613, aka Arthur
+ .set PV604p, 9 // 604+
+ .set PV620, 20
+
+//--------------------------------------------------------------------------
+//
+// Globally-used constants and variables
+//
+//--------------------------------------------------------------------------
+
+// external variables
+
+ .extern KdpOweBreakpoint
+ .extern KeGdiFlushUserBatch
+ .extern KeServiceDescriptorTable
+ .extern KeTickCount
+ .extern KiBreakPoints
+ .extern PsWatchEnabled
+
+
+// external procedures in ntoskrnl
+
+ .extern ..DbgBreakPoint
+ .extern ..KdSetOwedBreakpoints
+ .extern ..KeBugCheck
+ .extern ..KeBugCheckEx
+ .extern ..KiDeliverApc
+ .extern ..KiDispatchException
+ .extern ..KiDispatchSoftwareIntDisabled
+ .extern ..KiIdleLoop
+ .extern ..KiInitializeKernel
+ .extern ..MmAccessFault
+ .extern ..PsConvertToGuiThread
+ .extern ..PsWatchWorkingSet
+ .extern ..RtlpRestoreContextRfiJump
+
+
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+//
+// Beginning of fixed-storage area (real page 0)
+//
+// Zeroth-Level Interrupt Handlers
+//
+//
+// These routines are located at hardware-mandated addresses. Each
+// one is the target of a particular interrupt: the hardware saves
+// the current instruction address in SRR0, the MSR (or most of it)
+// in SRR1, and branches to the start of one of these routines.
+//
+// When entered, each of these routines is running with Instruction
+// Relocate OFF, Data Relocate OFF, and External Interrupts disabled.
+// It is the task of each Zeroth-Level Interrupt Handler to get back
+// into "relocate on" (both IR and DR) as soon as possible. Turning
+// on IR is tricky because the kernel is not mapped V=R. The "ZLIHs"
+// must be in Real Page 0, but Virtual Page 0 belongs to user space;
+// the kernel resides at Virtual address 0x80000000. If the ZLIH
+// just turns on IR and DR, it will suddenly start executing code at
+// address 0x100 or so in user space (in supervisor state!). On the
+// other hand, the ZLIH can't branch to the kernel at 0x80000000,
+// because that address doesn't even exist while IR is off.
+//
+// The trick is to save the incoming SRR0 and SRR1, load up SRR0 and
+// SRR1 with a "new PSW" pointing to a First-Level Interrupt Handler
+// in the kernel's virtual space, and use "return from interrupt" to
+// both set IR (and DR) to 1 and branch to the proper virtual address
+// all in one go.
+//
+// The code assembled here must reside at Real Address 0, and also in
+// the kernel's virtual space (presumably at 0x80000000, but that is
+// not required).
+//
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+
+//
+// The following macros, zlih() and short_zlih(), generate the body of the
+// code for the Zeroth-Level Interrupt Handlers.
+//
+// Each must be preceeded by an ".org" to the proper machine-mandated
+// address. The ".org" can be followed by special fast-path interrupt
+// handling code, if appropriate, before the zlih() or short_zlih() is
+// coded. (Only System Call and D/I Storage interrupts do this at present.)
+//
+// zlih(code)
+// short_zlih(code)
+//
+// code: constant identifying the exception type
+//
+// on Entry
+// MSR: External interrupts disabled
+// Instruction Relocate OFF
+// Data Relocate OFF
+// SRR0: Next instruction address at time of interrupt
+// SRR1: MSR at time of interrupt
+//
+// Exits to First-Level Interrupt Handler, with
+// MSR: External interrupts disabled
+// Instruction Relocate ON
+// Data Relocate ON
+// GP registers:
+// r.2: Constant identifying the interrupt type
+// r.3: Saved SRR0 (interrupt address)
+// r.4: Saved SRR1 (MSR value)
+// r.5: -available-
+// r.11: -available-
+// In the PCR:
+// PcGprSave[0]: Saved r.2
+// PcGprSave[1]: Saved r.3
+// PcGprSave[2]: Saved r.4
+// PcGprSave[3]: Saved r.5
+// PcGprSave[5]: Saved r.11
+//
+// Nothing is left in the SPRG's
+
+ .set PCR_SAVE2, PcGprSave + 0
+ .set PCR_SAVE3, PcGprSave + 4
+ .set PCR_SAVE4, PcGprSave + 8
+ .set PCR_SAVE5, PcGprSave + 12
+ .set PCR_SAVE6, PcGprSave + 16
+ .set PCR_SAVE11, PcGprSave + 20
+
+ .set FLIH_MSR, 0x00013031 // ILE, FP, ME, IR, DR, LE bits in MSR
+ .set INT_ENA, MASK_SPR(MSR_EE,1) // MSR External Interrupt Enable
+
+//
+// Note: propagate MSR[PM] bit into the MSR we load.
+//
+#if !DBG_STORE
+#define zlih(code) \
+ mtsprg sprg.2, r.5 ;\
+ mfsprg r.5, sprg.0 ;\
+ stw r.4, PCR_SAVE4 (r.5) ;\
+ lwz r.4, common_exception_entry.ptr - real0 (0) ;\
+ stw r.3, PCR_SAVE3 (r.5) ;\
+ mfsrr0 r.3 ;\
+ mtsrr0 r.4 ;\
+ mfsrr1 r.4 ;\
+ stw r.2, PCR_SAVE2 (r.5) ;\
+ lis r.2, FLIH_MSR >> 16 ;\
+ ori r.2, r.2, FLIH_MSR & 0xFFFF ;\
+ rlwimi r.2, r.4, 0, MSR_PM, MSR_PM ;\
+ mtsrr1 r.2 ;\
+ stw r.11, PCR_SAVE11 (r.5) ;\
+ mfsprg r.11, sprg.2 ;\
+ li r.2, code ;\
+ stw r.11, PCR_SAVE5 (r.5) ;\
+ rfi
+#else
+#define zlih(code) \
+ mtsprg sprg.2, r.5 ;\
+ mfsprg r.5, sprg.0 ;\
+ stw r.4, PCR_SAVE4 (r.5) ;\
+ stw r.3, PCR_SAVE3 (r.5) ;\
+ DBGSTORE_I_R(r3,r4,code) ;\
+ lwz r.4, common_exception_entry.ptr - real0 (0) ;\
+ mfsrr0 r.3 ;\
+ mtsrr0 r.4 ;\
+ mfsrr1 r.4 ;\
+ stw r.2, PCR_SAVE2 (r.5) ;\
+ lis r.2, FLIH_MSR >> 16 ;\
+ ori r.2, r.2, FLIH_MSR & 0xFFFF ;\
+ rlwimi r.2, r.4, 0, MSR_PM, MSR_PM ;\
+ mtsrr1 r.2 ;\
+ stw r.11, PCR_SAVE11 (r.5) ;\
+ mfsprg r.11, sprg.2 ;\
+ li r.2, code ;\
+ stw r.11, PCR_SAVE5 (r.5) ;\
+ rfi
+#endif
+
+#if !DBG_STORE
+#define short_zlih(code) \
+ mtsprg sprg.3, r.2 ;\
+ li r.2, code ;\
+ b short_zlih_continue
+#else
+#define short_zlih(code) \
+ mtsprg sprg.3, r.2 ;\
+ mtsprg sprg.2, r.3 ;\
+ DBGSTORE_I_R(r2,r3,code) ;\
+ mfsprg r.3, sprg.2 ;\
+ li r.2, code ;\
+ b short_zlih_continue
+#endif
+
+//--------------------------------------------------------------------------
+//
+// List of internal codes used to distinguish types of interrupts
+// within real0.s, before converting to standard Windows NT
+// "STATUS_..." code for KiDispatchException.
+//
+// These values are offsets into a branch table in common_exception_entry.
+// That table MUST be updated if any entries here are added/deleted/changed.
+//
+ .set CODE_MACHINE_CHECK, 0
+ .set CODE_EXTERNAL, 4
+ .set CODE_DECREMENTER, 8
+ .set CODE_STORAGE_ERROR, 12 // after dsi or isi tests
+ .set CODE_PAGE_FAULT, 16 // after dsi or isi hpt miss code
+ .set CODE_ALIGNMENT, 20
+ .set CODE_PROGRAM, 24
+ .set CODE_FP_UNAVAIL, 28
+ .set CODE_DIRECT_STORE, 32
+ .set CODE_SYSTEM_CALL, 36
+ .set CODE_TRACE, 40
+ .set CODE_FP_ASSIST, 44
+ .set CODE_RUN_MODE, 48
+ .set CODE_PANIC, 52
+ .set CODE_SYSTEM_MGMT, 56
+ .set CODE_DATA_BREAKPOINT,60
+ .set CODE_PMI, 64
+
+
+//
+// Code from here thru end_of_code_to_move is copied to low memory
+// at system initialization. This code is declared in the INIT
+// section so the space can be used for other purposes after system
+// initialization.
+//
+ .new_section INIT,"rcx6" // force 64 byte alignment
+ // for text in this module.
+ .section INIT,"rcx6"
+ .globl real0
+ .org 0
+real0:
+ .asciiz "PowerPC"
+
+//-------------------------------------------------------------
+//
+// Machine Check Interrupt
+//
+// Machine check zeroth level interrupt handler is the same
+// as handlers using the macro EXCEPT that we don't reenable
+// machine check exceptions.
+//
+//-------------------------------------------------------------
+
+ .org 0x200
+
+ mtsprg sprg.2, r.5
+ mfsprg r.5, sprg.0
+ stw r.4, PCR_SAVE4 (r.5)
+#if !DBG_STORE
+ lwz r.4, common_exception_entry.ptr - real0 (0)
+ stw r.3, PCR_SAVE3 (r.5)
+#else
+ stw r.3, PCR_SAVE3 (r.5)
+ DBGSTORE_I_R(r3,r4,0x200)
+ lwz r.4, common_exception_entry.ptr - real0 (0)
+#endif
+ mfsrr0 r.3
+ mtsrr0 r.4
+ mfsrr1 r.4
+ stw r.2, PCR_SAVE2 (r.5)
+ LWI(r.2,(FLIH_MSR&~MASK_SPR(MSR_ME,1))) // don't reenable machine check
+ rlwimi r.2, r.4, 0, MSR_PM, MSR_PM // Preserve MSR[PM] bit
+ mtsrr1 r.2
+ stw r.11, PCR_SAVE11 (r.5)
+ mfsprg r.11, sprg.2
+ li r.2, CODE_MACHINE_CHECK
+ stw r.11, PCR_SAVE5 (r.5)
+ rfi
+
+//-------------------------------------------------------------
+//
+// Data Storage Interrupt
+//
+//-------------------------------------------------------------
+
+ .set K_BASE,0x8000 // virtual address of kernel
+ .set SREG_INVAL,0x80 // software invalid sreg bit (really 0x00800000)
+ .set PTE_VALID,4 // software pte valid bit
+ .set HPT_LOCK,0x04fc // real addr of hpt lock word
+ .set PTE_CHANGE, 0x0080 // TLB Change bit
+ .set PTE_COHERENCY, 0x0010 // Coherency required (WIMG(M)=1)
+ .set PTE_GUARDED, 0x8 // Guarded Storage (WIMG[G] == 1)
+
+ .org 0x300
+
+ mtsprg sprg.2,r.1 // save gpr 1
+ mfsprg r.1,sprg.0 // get addr of processor ctl region
+ stw r.4,PcSiR4(r.1) // save gpr 4
+ stw r.2,PcSiR2(r.1) // save gpr 2
+ INC_CTR(CTR_DSI,r1,r2)
+ DBGSTORE_I_R(r2,r4,0x300)
+ mfcr r.4 // save condition reg
+ mfdsisr r.2 // get data stg int status reg
+ stw r.0,PcSiR0(r.1) // save gpr 0
+ andis. r.0,r.2,0x8cf0 // dsi other than page translation?
+ mfdar r.0 // get failing addr in data addr reg
+ rlwimi r.0,r.2,7,0x00000001 // save st/l in low failing addr bit
+ bne- dsioth // branch if yes
+
+ INC_CTR(CTR_DSI_HPT_MISS,r1,r2)
+
+ b tpte
+
+//-------------------------------------------------------------
+//
+// Instruction Storage Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x400
+
+ mtsprg sprg.2,r.1 // save gpr 1
+ mfsprg r.1,sprg.0 // get addr of processor ctl region
+ stw r.4,PcSiR4(r.1) // save gpr 4
+ stw r.2,PcSiR2(r.1) // save gpr 2
+ INC_CTR(CTR_ISI,r1,r2)
+ DBGSTORE_I_R(r2,r4,0x400)
+ mfcr r.4 // save condition reg
+ mfsrr1 r.2 // get save/restore reg 1
+ stw r.0,PcSiR0(r.1) // save gpr 0
+ andis. r.0,r.2,0x1820 // isi other than page translation?
+ mfsrr0 r.0 // get failing addr in sav/res reg 0
+ bne isioth // branch if yes
+
+ INC_CTR(CTR_ISI_HPT_MISS,r1,r2)
+
+ b tpte // goto test page table entry
+
+ .org 0x4fc // HPT_LOCK in real0.s, miscasm.s
+ .long 0 // hash page table lock word
+
+//-------------------------------------------------------------
+//
+// External Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x500
+
+ zlih(CODE_EXTERNAL)
+
+//-------------------------------------------------------------
+//
+// Alignment Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x600
+
+ mtsprg sprg.2,r.2 // save r2
+ mtsprg sprg.3,r.1 // save r1
+ DBGSTORE_I_R(r1,r2,0x600)
+ mfsprg r.1,sprg.0 // get PCR addr
+ mfdar r.2 // save DAR
+ stw r.2,PcSavedV0(r.1) // in PCR
+ mfdsisr r.2 // save DSISR
+ stw r.2,PcSavedV1(r.1) // in PCR
+ mfsprg r.2,sprg.2 // reload r2
+ mfsprg r.1,sprg.3 // reload r1
+ zlih(CODE_ALIGNMENT)
+
+//-------------------------------------------------------------
+//
+// Program Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x700
+
+ zlih(CODE_PROGRAM)
+
+
+//
+// The following word contains the absolute address of
+// an instruction in the routine SwapContext. It is
+// here so we can find it while we have almost no GPRs
+// available during the early stage of exception processing.
+//
+KepSwappingContextAddr:
+ .extern KepSwappingContext
+ .long KepSwappingContext
+
+//-------------------------------------------------------------
+//
+// Floating Point Unavailable Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x800
+
+//
+// For now, we don't attempt to lock the floating point unit.
+// If a floating point instruction is issued with FP unavailable,
+// it will interrupt to this location. We turn on the FP availability
+// bit and resume execution.
+//
+
+ mtsprg sprg.2, r.3
+#if DBG_STORE
+ mtsprg sprg.3,r4
+ DBGSTORE_I_R(r3,r4,0x800)
+ mfsprg r4,sprg.3
+#endif
+ mfsrr1 r.3
+ ori r.3, r.3, 0x2000
+ mtsrr1 r.3
+ mfsprg r.3, sprg.2
+ rfi
+
+
+// zlih(CODE_FP_UNAVAIL)
+
+//-------------------------------------------------------------
+//
+// Decrementer Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0x900
+
+ zlih(CODE_DECREMENTER)
+
+//-------------------------------------------------------------
+//
+// Direct Store Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0xA00
+
+ zlih(CODE_DIRECT_STORE)
+
+//-------------------------------------------------------------
+//
+// System Call Interrupt
+//
+// Since System Call is really a "call", we need not preserve
+// volatile registers as the other interrupt handlers must.
+//
+// Also, return from system call is to address in Link Register
+// so no need to save srr0 (exception address).
+//
+// However, arguments are in r.3 thru r.10 so don't trash them.
+//
+// Incoming value in r.2 (normally the TOC pointer) indicates
+// the system service being requested.
+//
+//-------------------------------------------------------------
+
+ .org 0xC00
+
+ DBGSTORE_I_R(r12,r11,0xc00)
+ lwz r.0, system_service_dispatch.ptr-real0(0)
+ mfsrr1 r.12 // save previous mode
+ li r.11, FLIH_MSR & 0xffff // set low 16 bits of kernel mode
+ rlwimi r.11, r.12, 0, MSR_PM, MSR_PM // propagate MSR[PM]
+ mtsrr1 r.11
+ mtsrr0 r.0 // set kernel entry address
+ extrwi. r.11, r.12, 1, MSR_PR // extract user mode
+ rfi // enter kernel
+
+
+//-------------------------------------------------------------
+//
+// Trace Interrupt
+//
+//-------------------------------------------------------------
+
+ .org 0xD00
+
+ zlih(CODE_TRACE)
+
+//--------------------------------------------------------------------------
+//
+// Floating Point Assist Zeroth-Level Interrupt Handler (optional; not 601)
+//
+//--------------------------------------------------------------------------
+
+ .org 0xE00
+
+ short_zlih(CODE_FP_ASSIST)
+
+
+//--------------------------------------------------------------------------
+//
+// PMI Interrupt (604)
+//
+// N.B. Some versions of the 604 do not turn off ENINT in MMCR0 when
+// signaling the PM interrupt. Therefore interrupts must not be
+// enabled before the spot in the (external) PM interrupt handler
+// where ENINT is turned off. This implies that one must not set
+// breakpoints or make calls to DbgPrint anywhere along the path
+// from here to the PM interrupt handler.
+//
+//--------------------------------------------------------------------------
+
+ .org 0xF00
+
+ short_zlih(CODE_PMI)
+
+//--------------------------------------------------------------------------
+//
+// Instruction Translation Miss (603 only)
+//
+//--------------------------------------------------------------------------
+
+ .org 0x1000
+
+ DBGSTORE_I_R(r1,r2,0x1000)
+ mfsprg r.1,sprg.0 // get physical address of PCR
+ INC_CTR(CTR_ITLB_MISS,r1,r2)
+ mfspr r.0,imiss // get faulting address
+ lwz r.2,PcPgDirRa(r.1) // get process' PDE page
+ mfsrin r.3,r.0 // get sreg of failing addr
+ andis. r.3,r.3,SREG_INVAL // sreg invalid?
+ bne stgerr603 // branch if yes
+ rlwimi r.2,r.0,12,0x00000ffc // calculate effective PDE address
+ lwz r.2,0(r.2) // get effective PDE
+ andi. r.3,r.2,PTE_VALID // check for valid PDE
+ beq pgf603 // invalid --> can't just load TLB
+ rlwinm r.2,r.2,0,0xfffff000 // get real addr of page table page
+ rlwimi r.2,r.0,22,0x00000ffc // calculate effective PTE address
+ lwz r.2,0(r.2) // get effective PTE
+ andi. r.3,r.2,PTE_VALID // check for valid PTE
+ beq pgf603 // invalid --> can't just load TLB
+
+ INC_CTR(CTR_ITLB_MISS_VALID_PTE,r1,r3)
+ mtspr rpa,r.2 // present translation information
+ mfsrr1 r.2
+ tlbli r.0 // set translation for fault addr
+ mtcrf 0x80,r.2 // restore CR0 at time of miss
+ rfi
+
+//--------------------------------------------------------------------------
+//
+// Data Load Translation Miss (603 only)
+//
+//--------------------------------------------------------------------------
+
+ .org 0x1100
+
+ DBGSTORE_I_R(r1,r2,0x1100)
+dtlbmiss:
+ mfsprg r.1,sprg.0 // get physical address of PCR
+ INC_CTR(CTR_DTLB_MISS,r1,r2)
+ mfspr r.0,dmiss // get faulting address
+ lwz r.2,PcPgDirRa(r.1) // get process' PDE page
+ mfsrin r.3,r.0 // get sreg of failing addr
+ andis. r.3,r.3,SREG_INVAL // sreg invalid?
+ bne dstgerr603 // branch if yes
+s15ok603:
+ rlwimi r.2,r.0,12,0x00000ffc // calculate effective PDE address
+ lwz r.2,0(r.2) // get effective PDE
+ andi. r.3,r.2,PTE_VALID // check for valid PDE
+ beq pgf603 // invalid --> can't just load TLB
+ rlwinm r.2,r.2,0,0xfffff000 // get real addr of page table page
+ rlwimi r.2,r.0,22,0x00000ffc // calculate effective PTE address
+ lwz r.2,0(r.2) // get effective PTE
+ andi. r.3,r.2,PTE_VALID // check for valid PTE
+ beq pgf603 // invalid --> can't just load TLB
+
+//
+// Blindly set the change bit in the PTE so the h/w won't feel obliged
+// to interrupt to let us know a page has been written to. Also, set
+// the Coherency required bit (WIMG(M)=1) because it should be set.
+//
+
+tlbld603:
+//
+// 603e/ev Errata 19 work around. The following instruction is modified
+// at init time to include PTE_GUARDED if this is a 603e/ev.
+//
+ ori r.2,r.2,PTE_CHANGE|PTE_COHERENCY
+ INC_CTR(CTR_DTLB_MISS_VALID_PTE,r1,r3)
+ mtspr rpa,r.2 // present translation information
+ mfsrr1 r.2
+ tlbld r.0 // set translation for fault addr
+ mtcrf 0x80,r.2 // restore CR0 at time of miss
+ rfi
+
+//
+//--------------------------------------------------------------------------
+//
+// Data Store Translation Miss or Change Bit == 0 Exception (603 only)
+//
+//--------------------------------------------------------------------------
+
+ .org 0x1200
+
+ DBGSTORE_I_R(r1,r2,0x1200)
+ b dtlbmiss
+
+
+//--------------------------------------------------------------------------
+//
+// Instruction Address Breakpoint (603 only)
+//
+//--------------------------------------------------------------------------
+
+ .org 0x1300
+
+ zlih(CODE_RUN_MODE)
+
+//--------------------------------------------------------------------------
+//
+// System Management Interrupt (603 only) -- Power Management
+//
+//--------------------------------------------------------------------------
+
+ .org 0x1400
+
+ zlih(CODE_SYSTEM_MGMT)
+
+//--------------------------------------------------------------------------
+//
+// Run Mode Zeroth-Level Interrupt Handler (601 specific)
+//
+//--------------------------------------------------------------------------
+
+ .org 0x2000
+
+ zlih(CODE_RUN_MODE)
+
+//--------------------------------------------------------------------------
+//
+// Reserved space from end of FLIHs to location 0x3000
+//
+//--------------------------------------------------------------------------
+
+ .org 0x3000
+
+//--------------------------------------------------------------------------
+//
+// Address constants needed in low memory (ie memory which can
+// be addressed absolutely without difficulty).
+//
+//--------------------------------------------------------------------------
+
+ .align 6 // ensure cache line alignment
+
+common_exception_entry.ptr:
+ .long common_exception_entry
+system_service_dispatch.ptr:
+ .long system_service_dispatch
+FpZero:
+ .double 0 // doubleword of 0's for clearing FP regs
+
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+//
+// End fixed-storage area
+//
+// Beyond this point nothing need appear at machine-dictated addresses
+//
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+
+//--------------------------------------------------------------------------
+//
+// Continuation of Data Storage and Instruction Storage interrupts
+//
+//--------------------------------------------------------------------------
+
+tpte: stw r.3,PcSiR3(r.1) // save gpr 3
+ stw r.5,PcSiR5(r.1) // save gpr 5
+ lwz r.2,PcPgDirRa(r.1) // get real addr of page dir page
+ mfsrin r.1,r.0 // get sreg of failing addr
+ andis. r.3,r.1,SREG_INVAL // sreg invalid?
+ bne stgerr // branch if yes
+s15ok:
+ rlwimi r.2,r.0,12,0x00000ffc // insert pde index in pd page addr
+ lwz r.2,0(r.2) // get page directory entry
+ andi. r.3,r.2,PTE_VALID // pde valid?
+ rlwinm r.5,r.2,0,0xfffff000 // get real addr of page table page
+ beq pfault // branch if not
+ rlwimi r.5,r.0,22,0x00000ffc // insert pte index in pt page addr
+ lwz r.2,0(r.5) // get page table entry
+ andi. r.3,r.2,PTE_VALID // pte valid?
+ beq pfault // branch if not
+lpte:
+ mtsprg sprg.3,r.6 // save gpr 6
+ rlwinm r.3,r.0,20,0x0000ffff // align failing vpi with vsid
+ ori r.2,r.2,0x190 // force RC and M bits in PTE
+ xor r.3,r.1,r.3 // hash - exclusive or vsid with vpi
+ rlwimi r.1,r.0,3,0x7e000000 // insert api into reg with vsid
+ rlwinm r.1,r.1,7,0xffffffbf // align vsid,api as 1st word hpte
+ mfsdr1 r.6 // get storage description reg
+ oris r.1,r.1,0x8000 // set valid bit in 1st word hpte
+ rlwinm r.0,r.6,10,0x0007fc00 // align hpt mask with upper hash
+ ori r.0,r.0,0x03ff // append lower one bits to mask
+ and r.0,r.0,r.3 // take hash modulo hpt size
+ rlwinm r.0,r.0,6,0x01ffffc0 // align hash as hpt group offset
+#if !defined(NT_UP)
+ li r.3,HPT_LOCK // get hpt lock address
+getlk: lwarx r.5,0,r.3 // load and reserve lock word
+ cmpwi r.5,0 // is lock available?
+ mfsprg r.5,sprg.0 // get processor ctl region addr
+ bne- getlk_spin // loop if lock is unavailable
+ stwcx. r.5,0,r.3 // store conditional to lock word
+ bne- getlk_spin // loop if lost reserve
+ isync // context synchronize
+#endif // NT_UP
+ rlwinm r.3,r.6,0,0xffff0000 // get real addr of hash page table
+ or r.3,r.0,r.3 // or with offset to get group addr
+ INC_GRP_CTR_R(GRP_CTR_DSI_VALID_PTE,r3)
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+ ori r.3,r.3,0x38 // point to last entry in group
+ li r.6,0 // set no invalid hpte found
+#endif
+ b thpte // goto test hash page table entry
+
+#if !defined(NT_UP)
+getlk_spin:
+ lwz r.5,0(r.3)
+ cmpwi r.5,0
+ beq+ getlk
+ b getlk_spin
+#endif
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+sirem:
+ ori r.6,r.3,0 // remember invalid hpte address
+phpte: andi. r.0,r.3,0x003f // tested all hptes in prim group?
+ subi r.3,r.3,8 // decrement to previous hpte
+ beq sinom // branch if yes
+thpte: lwz r.0,4(r.3) // get 1st(be) word of hpte
+ andis. r.5,r.0,0x8000 // hpte valid?
+ beq sirem // jump if no to remember
+ cmplw r.1,r.0 // does hpte match search arg?
+ bne phpte // loop if no to previous hpte
+#if 0
+ lwz r2,0x3900(0)
+ addi r2,r2,1
+ stw r2,0x3900(0)
+#endif
+ INC_GRP_CTR_R(GRP_CTR_DSI_FOUND,r3)
+ b skiphpte // hpte already present -- nothing to do
+#else
+thpte: lwz r.0,4(r.3) // get 1st(be) word of hpte
+ andis. r.5,r.0,0x8000 // hpte valid?
+ beq siinv // jump if no
+ cmplw r.1,r.0 // does hpte match search arg?
+ bne sisto // jump if no
+ INC_GRP_CTR_R(GRP_CTR_DSI_FOUND,r3)
+ b skiphpte // hpte already present -- nothing to do
+sisto:
+ clrlwi r.0,r.0,1 // turn off valid bit
+ stw r.0,4(r.3) // invalidate 1st(be) wd victim hpte
+ sync // ensure 1st word stored
+siinv:
+ stw r.2,0(r.3) // store pte as 2nd(be) wd hpte
+ sync // ensure 2nd word stored
+ stw r.1,4(r.3) // store vsid,api as 1st(be) wd hpte
+#endif
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+sinom:
+ cmplwi r.6,0 // was an invalid hpte found?
+ beq primov // branch if not
+shpte: stw r.2,0(r.6) // store pte as 2nd(be) wd hpte
+ sync // ensure 2nd word stored
+ stw r.1,4(r.6) // store vsid,api as 1st(be) wd hpte
+#endif
+skiphpte:
+
+#if !defined(NT_UP)
+ li r.0,0 // get a zero value
+ sync // ensure all previous stores done
+ stw r.0,HPT_LOCK(0) // store zero in hpt lock word
+#endif // NT_UP
+ mfsprg r.6,sprg.3 // reload saved gpr 6
+ mfsprg r.1,sprg.0 // get addr of processor ctl region
+ mtcrf 0xff,r.4 // reload condition reg
+ lwz r.5,PcSiR5(r.1) // reload saved gpr 5
+ lwz r.4,PcSiR4(r.1) // reload saved gpr 4
+ lwz r.3,PcSiR3(r.1) // reload saved gpr 3
+ lwz r.2,PcSiR2(r.1) // reload saved gpr 2
+ lwz r.0,PcSiR0(r.1) // reload saved gpr 0
+ mfsprg r.1,sprg.2 // reload saved gpr 1
+ rfi // return from interrupt
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+primov: mfdec r.5 // get decrementer
+ addi r.6,r.3,8 // recompute primary hpt group addr
+ INC_GRP_CTR_R(GRP_CTR_DSI_FULL,r6)
+ rlwimi r.6,r.5,28,0x00000038 // choose 1 of 8 hptes as victim
+#if !defined(PRESERVE_HPTE_CONTENTS)
+ li r.0,0
+#else
+ lwz r.0,4(r.6) // get 1st(be) word of victim hpte
+ clrlwi r.0,r.0,1 // turn off valid bit
+#endif
+ stw r.0,4(r.6) // invalidate 1st(be) wd victim hpte
+ sync // ensure 1st word stored
+ b shpte
+#endif
+
+dsioth: rlwinm r.2,r.2,16,0x0000ffff // rotate dsisr bits into low half
+ cmplwi r.2,0x0a00 // dsi from protect against store?
+ beq+ sfault // branch if yes
+ andi. r.2,r.2,0x40 // check for data store bkp
+ bne dsbkp // branch if data store bkp
+ b isioth // goto join other error processing
+stgerr:
+ clrrwi r.3,r.0,PAGE_SHIFT // get page address of fault
+ mfsprg r.2,sprg.0 // get phys addr of processor ctl region
+ cmpwi r.3,0xffffd000 // is fault in PCR page?
+#if !COLLECT_PAGING_DATA
+ ori r.2, r.2, 1 // user readonly
+ beq lpte // branch if yes
+#else
+ bne stgerr_not_pcr
+ INC_CTR(CTR_PCR,r2,r3)
+ ori r.2, r.2, 1 // user readonly
+ b lpte
+stgerr_not_pcr:
+#endif
+ clrrwi r.2, r.2, 1 // get PCR address back
+ lwz r.2, PcPcrPage2(r.2) // get phys addr of PCR2
+ cmpwi r.3,0xffffe000 // is fault in PCR2 page?
+#if !COLLECT_PAGING_DATA
+ ori r.2, r.2, 1 // user readonly
+ beq lpte // branch if yes
+#else
+ bne stgerr_not_pcr2
+ mfsprg r.2,sprg.0 // get phys addr of processor ctl region
+ INC_CTR(CTR_PCR2,r2,r3)
+ lwz r.2, PcPcrPage2(r.2) // get phys addr of PCR2
+ ori r.2, r.2, 1 // user readonly
+ b lpte
+stgerr_not_pcr2:
+#endif
+ mfsprg r.2,sprg.0 // get phys addr of PCR
+ rlwinm r.3, r.3, 4, 0xf // Check sreg #
+ lwz r.2, PcPgDirRa(r.2) // Page Directory addr
+ cmpwi r.3, 0xf // sreg 15
+ beq s15ok
+
+ mfsprg r.1,sprg.0 // get addr of processor ctl region
+ lwz r.3,PcSiR3(r.1) // reload saved gpr 3
+ lwz r.5,PcSiR5(r.1) // reload saved gpr 5
+isioth: stw r.0,PcBadVaddr(r.1) // save failing addr and st/l bit
+ mtcrf 0xff,r.4 // reload condition reg
+ INC_CTR (CTR_STORAGE_ERROR,r1,r4)
+ lwz r.4,PcSiR4(r.1) // reload saved gpr 4
+ lwz r.2,PcSiR2(r.1) // reload saved gpr 2
+ lwz r.0,PcSiR0(r.1) // reload saved gpr 0
+ mfsprg r.1,sprg.2 // reload saved gpr 1
+ short_zlih(CODE_STORAGE_ERROR)
+
+pfault: mfsprg r.1,sprg.0 // get addr of processor ctl region
+ lwz r.3,PcSiR3(r.1) // reload saved gpr 3
+ lwz r.5,PcSiR5(r.1) // reload saved gpr 5
+sfault: stw r.0,PcBadVaddr(r.1) // save failing addr and st/l bit
+ mtcrf 0xff,r.4 // reload condition reg
+ INC_CTR (CTR_PAGE_FAULT,r1,r4)
+ lwz r.4,PcSiR4(r.1) // reload saved gpr 4
+ lwz r.2,PcSiR2(r.1) // reload saved gpr 2
+ lwz r.0,PcSiR0(r.1) // reload saved gpr 0
+ mfsprg r.1,sprg.2 // reload saved gpr 1
+ short_zlih(CODE_PAGE_FAULT)
+
+dsbkp: stw r.0,PcBadVaddr(r.1) // save failing addr and st/l bit
+ mtcrf 0xff,r.4 // reload condition reg
+ lwz r.4,PcSiR4(r.1) // reload saved gpr 4
+ lwz r.2,PcSiR2(r.1) // reload saved gpr 2
+ lwz r.0,PcSiR0(r.1) // reload saved gpr 0
+ mfsprg r.1,sprg.2 // reload saved gpr 1
+ short_zlih(CODE_DATA_BREAKPOINT)
+
+//--------------------------------------------------------------------------
+//
+// Continuation of Translation Miss interrupts (603)
+//
+//--------------------------------------------------------------------------
+
+pgf603:
+ INC_CTR (CTR_PAGE_FAULT,r1,r3)
+ mfsrr1 r.2
+ rlwimi r.0,r.2,16,0x00000001 // stuff in S/L bit
+ stw r.0,PcBadVaddr(r.1) // save fault address and st/l bit
+ mtcrf 0x80,r.2 // restore CR0
+ mfmsr r.2 // turn off use of temporary regs
+ rlwinm r.2,r.2,0x0,0xfffdffff // clear bit 14, MSR[TGPR]
+ mtmsr r.2 // now have access to "real" GPRs
+ isync
+ short_zlih(CODE_PAGE_FAULT)
+
+dstgerr603:
+ clrrwi r.3,r.0,PAGE_SHIFT // get page address of fault
+ ori r.2,r.1,0 // copy PCR physical address
+ cmpwi r.3,0xffffd000 // is fault in PCR page?
+ ori r.2, r.2, 1 // user readonly
+ beq tlbld603 // branch if yes
+ lwz r.2, PcPcrPage2(r.1) // get phys addr of PCR2
+ cmpwi r.3,0xffffe000 // is fault in PCR2 page?
+ ori r.2, r.2, 1 // user readonly
+ beq tlbld603 // branch if yes
+ lwz r.2, PcPgDirRa(r.1) // Page Directory addr
+ rlwinm r.3, r.3, 4, 0xf // Check sreg #
+ cmpwi r.3, 0xf // sreg 15
+ beq s15ok603
+
+stgerr603:
+ INC_CTR (CTR_STORAGE_ERROR,r1,r3)
+ mfsrr1 r.2
+ rlwimi r.0,r.2,16,0x00000001 // stuff S/L bit in fault address
+ stw r.0,PcBadVaddr(r.1) // save fault address and st/l bit
+ mtcrf 0x80,r.2 // restore CR0
+ mfmsr r.2 // turn off use of temporary regs
+ rlwinm r.2,r.2,0x0,0xfffdffff // clear bit 14, MSR[TGPR]
+ mtmsr r.2 // now have access to "real" GPRs
+ isync
+ short_zlih(CODE_STORAGE_ERROR)
+
+
+//--------------------------------------------------------------------------
+//
+// Short Zero Level Interrupt Continue (short_zlih_continue)
+//
+// Branched-to by zhort_zlih() macro, with:
+// MSR: External interrupts disabled
+// Instruction Relocate OFF
+// Data Relocate OFF
+// SRR0: Next instruction address at time of interrupt
+// SRR1: MSR at time of interrupt
+// SPRG3: Saved r.2
+// r.2: Code number indicating type of interrupt
+//
+// Exits to common_exception_entry, with
+// MSR: External interrupts disabled
+// Instruction Relocate ON
+// Data Relocate ON
+// GP registers:
+// r.2: Constant identifying the exception type
+// r.3: Saved SRR0 (interrupt address)
+// r.4: Saved SRR1 (MSR value)
+// r.5: -available-
+// r.11: -available-
+// In the PCR:
+// PcGprSave[0]: Saved r.2
+// PcGprSave[1]: Saved r.3
+// PcGprSave[2]: Saved r.4
+// PcGprSave[3]: Saved r.5
+// PcGprSave[5]: Saved r.11
+//
+// Nothing is left in the SPRG's
+//
+//--------------------------------------------------------------------------
+
+short_zlih_continue:
+
+ mtsprg sprg.2, r.5 // stash r.5 temporarily
+ mfsprg r.5, sprg.0 // r.5 -> KiPcr (real address)
+ stw r.4, PCR_SAVE4 (r.5) // save r.4 in PCR
+ lwz r.4, common_exception_entry.ptr - real0 (0) // load virt addr of common code **TEMP**
+ stw r.3, PCR_SAVE3 (r.5) // save r.3 in PCR
+ mfsrr0 r.3 // save SRR0 (interrupt addr) in r.3
+ mtsrr0 r.4 // set branch address into SRR0
+ mfsrr1 r.4 // save SRR1 (MSR) in r.4
+ stw r.11, PCR_SAVE11 (r.5) // save r.11 in PCR
+ lis r.11, FLIH_MSR >> 16 // load new value for
+ ori r.11, r.11, FLIH_MSR & 0xFFFF // MSR
+ rlwimi r.11, r.4, 0, MSR_PM, MSR_PM // propagate MSR[PM]
+ mtsrr1 r.11 // set new MSR value into SRR1
+ mfsprg r.11, sprg.2 // fetch stashed r.5 value
+ stw r.11, PCR_SAVE5 (r.5) // save r.5 in PCR
+ mfsprg r.11, sprg.3 // fetch stashed r.2 value
+ stw r.11, PCR_SAVE2 (r.5) // save r.2 in PCR
+ rfi // turn on address translation,
+ // branch to common code
+//--------------------------------------------------------------------------
+//
+// End of low memory code. Code from the start of this module to here
+// must all be relocated together if relocation is required. Switch
+// code section to .text as remaining code in this module must exist
+// for the life of the system.
+//
+//--------------------------------------------------------------------------
+end_of_code_to_move:
+
+ .org 0x3800
+
+ .org 0x4000 // gen warning if above overflows.
+
+//
+// Code from here thru Kseg0CodeEnd is copied to KSEG0 at system
+// initialization. This code is declared in the INIT section so the
+// space can be used for other purposes after system initialization.
+//
+
+Kseg0CodeStart:
+
+ .align 6
+
+StartProcessor:
+
+ ori r.31, r.3, 0 // save address of LPB
+
+StartProcessor.LoadKiStartProcessorAddress:
+
+ lis r.30, 0 // load address of KiStartProcessor
+ ori r.30, r.30, 0 // (actual address filled in at
+ // init time by processor 0)
+ mfmsr r.11 // get current state
+ rlwinm r.11, r.11, 0, ~INT_ENA // clear interrupt enable
+ mtmsr r.11 // disable interrupts
+ rlwinm r.11, r.11, 0, ~(MASK_SPR(MSR_IR,1)|MASK_SPR(MSR_DR,1))
+ mtsrr1 r.11 // desired initial state
+ mtsrr0 r.30 // desired return address
+ rfi // switch to real mode and jump
+ // to KiStartProcessor
+
+ .align 6
+
+KiPriorityExitRfi:
+ mtsrr0 r.4 // set target address
+ mtsrr1 r.5 // set target state
+ lwz r.4, PCR_SAVE4 (r.6) // reload r.4, 5 and 6 from PCR
+ lwz r.5, PCR_SAVE5 (r.6)
+ lwz r.6, PCR_SAVE6 (r.6)
+ rfi // resume thread
+
+ .align 6
+
+KiServiceExitKernelRfi:
+ mtsrr0 r.11 // move caller's IAR to SRR0
+ mtsrr1 r.10 // move caller's MSR to SRR1
+ rfi // return from interrupt
+
+ .align 6
+
+KiServiceExitUserRfi:
+ mtsrr0 r.11 // move caller's IAR to SRR0
+ mtsrr1 r.10 // move caller's MSR to SRR1
+ li r.9, 0 // clear the last few
+ li r.12, 0
+ li r.11, 0
+ li r.10, 0 // volatile GP regs
+ rfi // return from interrupt
+
+ .align 6
+
+RtlpRestoreContextRfi:
+ mtsrr0 r.7 // set target address
+ mtsrr1 r.3 // set target state
+ lwz r.3, PCR_SAVE4 (r.8) // reload r.3, 7 and 8 from PCR
+ lwz r.7, PCR_SAVE5 (r.8)
+ lwz r.8, PCR_SAVE6 (r.8)
+ rfi // resume thread
+
+//++
+//
+// VOID
+// KiFlushSingleTb (
+// IN BOOLEAN Invalid,
+// IN PVOID Virtual
+// )
+//
+// Routine Description:
+//
+// This function flushes a single entry from the translation buffer.
+//
+// Arguments:
+//
+// Invalid (r3) - Supplies a boolean variable that determines the reason
+// that the TB entry is being flushed.
+//
+// Virtual (r4) - Supplies the virtual address of the entry that is to
+// be flushed from the translation buffer.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .align 6
+
+FlushSingleTb:
+
+ INC_CTR2(CTR_FLUSH_SINGLE,r5,r6)
+flst0: b flnhpt // default to no hpt
+
+ mfsrin r.5,r.4 // get sreg of virtual addr arg
+ rlwinm r.6,r.4,20,0x0000ffff // align arg vpi with vsid
+ xor r.6,r.5,r.6 // hash - exclusive or vsid with vpi
+ rlwimi r.5,r.4,3,0x7e000000 // insert api into reg with vsid
+ rlwinm r.5,r.5,7,0xffffffbf // align vsid,api as 1st word hpte
+ oris r.5,r.5,0x8000 // set valid bit in 1st word hpte
+ mfsdr1 r.7 // get storage description reg
+ rlwinm r.8,r.7,10,0x0007fc00 // align hpt mask with upper hash
+ ori r.8,r.8,0x03ff // append lower one bits to mask
+ and r.6,r.8,r.6 // take hash modulo hpt size
+ rlwinm r.6,r.6,6,0x01ffffc0 // align hash as hpt group offset
+ rlwinm r.7,r.7,0,0xffff0000 // get real addr of hash page table
+ oris r.7,r.7,K_BASE // or with kernel virtual address
+ or r.6,r.7,r.6 // or with offset to get group addr
+ INC_GRP_CTR(GRP_CTR_FLUSH_SINGLE,r6,r9,r10)
+
+#if !defined(NT_UP)
+
+ li r.9,HPT_LOCK // get hpt lock real address
+ oris r.9,r.9,K_BASE // or with kernel virtual address
+
+ DISABLE_INTERRUPTS(r.10,r.11) // disable ints while lock held
+
+flglk: lwarx r.7,0,r.9 // load and reserve lock word
+ cmpwi r.7,0 // is lock available?
+ mfsprg r.7,sprg.0 // get processor ctl region addr
+ bne- flglkw // loop if lock is unavailable
+ stwcx. r.7,0,r.9 // store conditional to lock word
+ bne- flglkw // loop if lost reserve
+ isync // context synchronize
+
+#endif // NT_UP
+
+ b fltst // goto test hash page table entry
+
+#if !defined(NT_UP)
+flglkw:
+ ENABLE_INTERRUPTS(r.10)
+flglkws:
+ lwz r.7,0(r.9)
+ cmpwi r.7,0
+ bne- flglkws
+ mtmsr r.11
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ b flglk
+#endif
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+flnxt: addi r.6,r.6,8 // increment to next hpte
+ andi. r.7,r.6,0x003f // tested all hptes in prim group?
+ beq flinv // branch if yes
+fltst: lwz r.7,4(r.6) // get 1st(be) word of hpte
+ cmplw r.5,r.7 // does hpte match search arg?
+ bne flnxt // loop if no to next hpte
+#else
+fltst: lwz r.7,4(r.6) // get 1st(be) word of hpte
+ cmplw r.5,r.7 // does hpte match search arg?
+ bne flinv // loop if no to next hpte
+#endif
+ INC_GRP_CTR(GRP_CTR_FLUSH_SINGLE_FOUND,r6,r5,r12)
+#if defined(PRESERVE_HPTE_CONTENTS)
+ clrlwi r.8,r.7,1 // turn off valid bit
+#endif
+ stw r.8,4(r.6) // invalidate 1st(be) wd match hpte
+ sync // ensure 1st word stored
+flinv: tlbie r.4 // invalidate tlb entry
+ sync // ensure invalidate done
+
+#if !defined(NT_UP)
+
+flst1: tlbsync // ensure broadcasts done
+flst2: sync // ensure tlbsync done
+
+ li r.7,0 // get a zero value
+ stw r.7,0(r.9) // store zero in hpt lock word
+
+ ENABLE_INTERRUPTS(r.10) // restore interrupt status
+
+#endif // NT_UP
+
+ blr // return
+
+flnhpt: tlbie r.4 // invalidate tlb entry
+ sync // ensure invalidate done
+
+ blr // return
+
+//++
+//
+// VOID
+// KeFillEntryTb (
+// IN HARDWARE_PTE Pte[],
+// IN PVOID Virtual,
+// IN BOOLEAN Invalid
+// )
+//
+// Routine Description:
+//
+// This function fills a translation buffer entry. If the entry is already
+// in the translation buffer, then the entry is overwritten. Otherwise, a
+// random entry is overwritten.
+//
+// Arguments:
+//
+// Pte (r3) - Supplies a pointer to the page table entry that is to be
+// written into the TB.
+//
+// Virtual (r4) - Supplies the virtual address of the entry that is to
+// be filled in the translation buffer.
+//
+// Invalid (r5) - Supplies a boolean value that determines whether the
+// TB entry should be invalidated.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .align 6
+
+FillEntryTb:
+
+ INC_CTR2(CTR_FILL_ENTRY,r6,r7)
+fiet0: b finhpt // default to no hpt
+
+ lwz r.3,0(r.3) // get page table entry
+ mfsrin r.5,r.4 // get sreg of virtual addr arg
+ ori r.3,r.3,0x0190 // set CR and M
+
+#if DBG
+
+ andi. r.6,r.3,PTE_VALID // pte valid?
+ bne fiptev // branch if yes
+ twi 31,0,KERNEL_BREAKPOINT // break into kernel debugger
+fiptev:
+
+#endif
+
+ rlwinm r.6,r.4,20,0x0000ffff // align arg vpi with vsid
+ xor r.6,r.5,r.6 // hash - exclusive or vsid with vpi
+ rlwimi r.5,r.4,3,0x7e000000 // insert api into reg with vsid
+ rlwinm r.5,r.5,7,0xffffffbf // align vsid,api as 1st word hpte
+ oris r.5,r.5,0x8000 // set valid bit in 1st word hpte
+ mfsdr1 r.7 // get storage description reg
+ rlwinm r.8,r.7,10,0x0007fc00 // align hpt mask with upper hash
+ ori r.8,r.8,0x03ff // append lower one bits to mask
+ and r.6,r.8,r.6 // take hash modulo hpt size
+ rlwinm r.6,r.6,6,0x01ffffc0 // align hash as hpt group offset
+ rlwinm r.7,r.7,0,0xffff0000 // get real addr of hash page table
+ oris r.7,r.7,K_BASE // or with kernel virtual address
+ or r.6,r.7,r.6 // or with offset to get group addr
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+ ori r.6,r.6,0x38 // point to last entry in group
+ li r.8,0 // set no invalid hpte found
+#endif
+ INC_GRP_CTR(GRP_CTR_FILL_ENTRY,r6,r9,r10)
+
+ DISABLE_INTERRUPTS(r.10,r.11) // disable interrupts
+
+#if !defined(NT_UP)
+
+ li r.9,HPT_LOCK // get hpt lock real address
+ oris r.9,r.9,K_BASE // or with kernel virtual address
+figlk:
+ lwarx r.7,0,r.9 // load and reserve lock word
+ cmpwi r.7,0 // is lock available?
+ mfsprg r.7,sprg.0 // get processor ctl region addr
+ bne- figlkw // loop if lock is unavailable
+ stwcx. r.7,0,r.9 // store conditional to lock word
+ bne- figlkw // loop if lost reserve
+ isync // context synchronize
+
+#endif // NT_UP
+
+ b fitst // goto test hash page table entry
+
+#if !defined(NT_UP)
+figlkw:
+ ENABLE_INTERRUPTS(r.10)
+figlkws:
+ lwz r.7,0(r.9)
+ cmpwi r.7,0
+ bne- figlkws
+ mtmsr r.11
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ b figlk
+#endif
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+firem:
+ ori r.8,r.6,0 // remember invalid hpte address
+fiprv:
+ andi. r.7,r.6,0x003f // tested all hptes in prim group?
+ subi r.6,r.6,8 // decrement to previous hpte
+ beq finom // branch if yes
+fitst:
+ lwz r.7,4(r.6) // get 1st(be) word of hpte
+ andis. r.11,r.7,0x8000 // hpte valid?
+ beq firem // jump if no to remember
+ cmplw r.5,r.7 // does hpte match search arg?
+ bne fiprv // loop if no to previous hpte
+ INC_GRP_CTR(GRP_CTR_FILL_ENTRY_FOUND,r6,r5,r7)
+ stw r.3,0(r.6) // store pte 2nd(be) wd match hpte
+ sync // ensure 2nd word stored
+#else
+fitst:
+ lwz r.7,4(r.6) // get 1st(be) word of hpte
+ andis. r.11,r.7,0x8000 // hpte valid?
+ beq fiinv // jump if no
+ clrlwi r.7,r.7,1 // turn off valid bit
+ stw r.7,4(r.6) // invalidate hpte
+ sync // ensure update done
+fiinv:
+ stw r.3,0(r.6) // store pte as 2nd(be) wd hpte
+ sync // ensure 2nd word stored
+ stw r.5,4(r.6) // store vsid,api as 1st(be) wd hpte
+#endif
+fiexi:
+ tlbie r.4 // invalidate tlb entry
+ sync // ensure invalidate done
+
+#if !defined(NT_UP)
+
+fiet1: tlbsync // ensure broadcasts done
+fiet2: sync // ensure tlbsync done
+
+ li r.7,0 // get a zero value
+ stw r.7,0(r.9) // store zero in hpt lock word
+
+#endif // NT_UP
+
+ ENABLE_INTERRUPTS(r.10) // enable interrupts
+
+ blr // return
+
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+finom:
+ cmplwi r.8,0 // was an invalid hpte found?
+ beq fipov // branch if not
+fisto:
+ stw r.3,0(r.8) // store pte as 2nd(be) wd hpte
+ sync // ensure 2nd word stored
+ stw r.5,4(r.8) // store vsid,api as 1st(be) wd hpte
+ b fiexi
+
+fipov:
+ mfdec r.7 // get decrementer
+ addi r.8,r.6,8 // recompute primary hpt group addr
+ rlwimi r.8,r.7,28,0x00000038 // choose 1 of 8 hptes as victim
+ INC_GRP_CTR(GRP_CTR_FILL_ENTRY_FULL,r8,r7,r12)
+#if !defined(PRESERVE_HPTE_CONTENTS)
+ li r.6,0
+#else
+ lwz r.6,4(r.8) // get 1st(be) word of victim hpte
+ clrlwi r.6,r.6,1 // turn off valid bit
+#endif
+ stw r.6,4(r.8) // invalidate 1st(be) wd victim hpte
+ sync // ensure 1st word stored
+ b fisto // goto store new hpte
+#endif
+
+finhpt:
+ andi. r.6,r.5,1 // is Invalid == TRUE?
+ beqlr // return if Invalid == FALSE
+ tlbie r.4 // invalidate tlb entry
+ sync // ensure invalidate done
+
+ blr // return
+
+//++
+//
+// VOID
+// KeFlushCurrentTb (
+// )
+//
+// Routine Description:
+//
+// This function flushes the entire translation buffer.
+//
+// Arguments:
+//
+// None.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ .align 6
+
+FlushCurrentTb:
+
+ INC_CTR2(CTR_FLUSH_CURRENT,r6,r7)
+flct0: li r.6, 128 // default to MAX num congruence
+ // classes.
+
+#if !defined(NT_UP)
+
+flct1: b felk // default to no hpt.
+
+#else
+
+flct1: b feloop // default to no hpt.
+
+#endif
+
+fehpt: mfsdr1 r.5 // get storage description reg
+ addi r.0,r.5,1 // add one to hpt mask
+ rlwinm r.0,r.0,10,0x000ffc00 // align as number of hpt groups
+ rlwinm r.5,r.5,0,0xffff0000 // get real addr of hash page table
+ mtctr r.0 // put number groups in count reg
+ oris r.5,r.5,K_BASE // or with kernel virtual address
+
+#if DBG
+
+ lbz r.3,KiPcr+PcDcacheMode(r.0) // get dcache mode information
+
+#endif
+
+#if !defined(NT_UP)
+
+felk: li r.9,HPT_LOCK // get hpt lock real address
+ oris r.9,r.9,K_BASE // or with kernel virtual address
+
+ DISABLE_INTERRUPTS(r.10,r.11) // disable ints while lock held
+
+feglk: lwarx r.7,0,r.9 // load and reserve lock word
+ cmpwi r.7,0 // is lock available?
+ li r.7,-1 // lock value - hpt clear
+ bne feglkw // jif lock is unavailable (wait)
+feglks: stwcx. r.7,0,r.9 // store conditional to lock word
+ bne- feglkw // loop if lost reserve
+ isync // context synchronize
+flct2: b feloop // default to no hpt
+
+#endif // NT_UP
+
+//
+// Zero the Hashed Page Table
+//
+
+ li r.0, 0
+fenxg:
+#if !defined(HPT_AS_TLB_RELOAD_BUFFER)
+#if !defined(PRESERVE_HPTE_CONTENTS)
+ stw r.0,4(r.5) // invalidate 1st(be) wd 1st hpte
+ stw r.0,12(r.5) // invalidate 1st(be) wd 2nd hpte
+ stw r.0,20(r.5) // invalidate 1st(be) wd 3rd hpte
+ stw r.0,28(r.5) // invalidate 1st(be) wd 4th hpte
+ stw r.0,36(r.5) // invalidate 1st(be) wd 5th hpte
+ stw r.0,44(r.5) // invalidate 1st(be) wd 6th hpte
+ stw r.0,52(r.5) // invalidate 1st(be) wd 7th hpte
+ stw r.0,60(r.5) // invalidate 1st(be) wd 8th hpte
+#else
+ lwz r.0,4(r.5) // get 1st(be) wd 1st hpte
+ lwz r.7,12(r.5) // get 1st(be) wd 2nd hpte
+ lwz r.8,20(r.5) // get 1st(be) wd 3rd hpte
+ lwz r.11,28(r.5) // get 1st(be) wd 4th hpte
+ clrlwi r.0,r.0,1 // turn off valid bit
+ clrlwi r.7,r.7,1 // turn off valid bit
+ clrlwi r.8,r.8,1 // turn off valid bit
+ clrlwi r.11,r.11,1 // turn off valid bit
+ stw r.0,4(r.5) // invalidate 1st(be) wd 1st hpte
+ stw r.7,12(r.5) // invalidate 1st(be) wd 2nd hpte
+ stw r.8,20(r.5) // invalidate 1st(be) wd 3rd hpte
+ stw r.11,28(r.5) // invalidate 1st(be) wd 4th hpte
+ lwz r.0,36(r.5) // get 1st(be) wd 5th hpte
+ lwz r.7,44(r.5) // get 1st(be) wd 6th hpte
+ lwz r.8,52(r.5) // get 1st(be) wd 7th hpte
+ lwz r.11,60(r.5) // get 1st(be) wd 8th hpte
+ clrlwi r.0,r.0,1 // turn off valid bit
+ clrlwi r.7,r.7,1 // turn off valid bit
+ clrlwi r.8,r.8,1 // turn off valid bit
+ clrlwi r.11,r.11,1 // turn off valid bit
+ stw r.0,36(r.5) // invalidate 1st(be) wd 5th hpte
+ stw r.7,44(r.5) // invalidate 1st(be) wd 6th hpte
+ stw r.8,52(r.5) // invalidate 1st(be) wd 7th hpte
+ stw r.11,60(r.5) // invalidate 1st(be) wd 8th hpte
+#endif
+#else
+#if defined(PRESERVE_HPTE_CONTENTS)
+ lwz r.0,4(r.5) // get 1st(be) wd 1st hpte
+ clrlwi r.0,r.0,1 // turn off valid bit
+#endif
+ stw r.0,4(r.5) // invalidate 1st(be) wd 1st hpte
+#endif
+ addi r.5,r.5,64 // increment to next hpt group addr
+ bdnz fenxg // loop through all groups
+ sync // ensure all stores done
+
+//
+// Invalidate all TLB entries
+//
+
+feloop: mtctr r.6 // put number classes in count reg
+fenxt: tlbie r.6 // invalidate tlb congruence class
+ addi r.6,r.6,4096 // increment to next class address
+ bdnz fenxt // loop through all classes
+ sync // ensure all invalidates done
+
+#if !defined(NT_UP)
+
+flct3: tlbsync // ensure broadcasts done
+flct4: sync // ensure tlbsync done
+
+ li r.7,0 // get a zero value
+ stw r.7,0(r.9) // store zero in hpt lock word
+
+ ENABLE_INTERRUPTS(r.10) // restore previous interrupt state
+
+ blr // return
+
+//
+// We come here if the hpt lock is held by another processor
+// when we first attempt to take it. If the lock value is < 0
+// then the lock is held by a processor that is clearing the entire
+// hpt (other processor is in KeFlushCurrentTb). If this happens
+// then all we need do is waitfor the lock to become available (we
+// don't actually need to take it) and return.
+//
+
+feglkw:
+ crmove 4,0 // move -ve bit to cr.1
+ ENABLE_INTERRUPTS(r.10)
+fegwt: lwz r.7,0(r.9) // load lock word
+ cmpwi r.7,0 // is lock available?
+ bne- fegwt // if not available, try again
+ li r.7,-1 // lock value - hpt clear
+ bt 4,feglk_done // jif already cleared
+ mtmsr r.11
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ b feglk
+
+feglk_done:
+ sync // wait till tlb catches up
+
+#endif // NT_UP
+
+ blr // return
+
+Kseg0CodeEnd:
+
+//--------------------------------------------------------------------------
+//
+// KiSystemStartup()
+//
+// This is the system entry point for processor 0. It will copy the
+// exception vectors to low real memory, re-initialize memory mapping
+// for KSEG0 (via BAT0), call the routine to initialize the kernel and
+// fall thru into the idle loop.
+//
+// Arguments:
+//
+// r3 - supplies address of loader parameter block.
+//
+// Return values:
+//
+// None. There is no return from this function.
+//
+// Remarks:
+//
+// ntoskrnl is assumed to be in memory covered by KSEG0.
+//
+//--------------------------------------------------------------------------
+ FN_TABLE(KiSystemBegin,0,0)
+
+ DUMMY_ENTRY_S(KiSystemBegin,INIT)
+
+ stwu r.sp, -(STK_MIN_FRAME+8) (r.sp) // This code is never
+ mflr r.0 // executed, it is here
+ stw r.0, -(STK_MIN_FRAME+8) (r.sp) // so the unwinder can
+ // have a good time.
+
+ PROLOGUE_END(KiSystemBegin)
+
+ ALTERNATE_ENTRY_S(KiSystemStartup,INIT)
+
+ ori r.31, r.3, 0 // save address of LPB
+
+//
+// Disable translation prior to initializing the BAT registers.
+//
+
+ bl ki.real // disable address translation
+c0start: // this address in r.30
+
+
+//
+// Move the kernel trap handlers. They need to start at physical
+// memory address zero.
+//
+// The trap handlers do NOT use relative branches outside of the
+// area bounded by the labels real0 thru end_of_code_to_move.
+//
+
+ li r.8, -4 // target address - 4
+ li r.9, (end_of_code_to_move-real0+3)/4 // num words to move
+ mtctr r.9
+ subi r.7, r.30, c0start-real0+4 // source address - 4
+
+//
+// Get physical address of ntoskrnl's TOC
+//
+
+ lwz r.toc, rm_toc_pointer-c0start(r.30) // get v address of toc
+ rlwinm r.toc, r.toc, 0, 0x7fffffff // cvt to phys addr
+
+//
+// Set the bit mask identifying the available breakpoint registers.
+// 601 = 1 data, 1 instr., 603 = 1 instr., and 604 = 1 data, 1 instr.
+// All others = 0 breakpoint registers.
+//
+
+ mfpvr r.17 // get processor type & rev
+ rlwinm r.17, r.17, 16, 0xffff // isolate processor type
+ lis r.4,0x0100 // 0 = data addr bkpt register
+ // 1 = instruction addr bkpt register
+ cmpwi r.17, PV603 // 603?
+ cmpwi cr.1, r.17, PV603p // 603+?
+ cmpwi cr.7, r.17, PV603pp // 603++?
+ li r.6, 4 // Offset for 603 branch
+ beq setbkp // jif we are on a 603
+ beq cr.1,setbkp // jif we are on a 603+
+ beq cr.7,setbkp // jif we are on a 603++
+ oris r.4, r.4, 0x1000 // Add data bkpt
+ li r.6, 0 // Assume 601 branches (default)
+ cmpwi r.17, PV601
+ beq setbkp // jif 601
+ li r.6, 8 // Offset for 604 branch
+ cmpwi r.17, PV604
+ cmpwi cr.1, r.17, PV604p
+ beq setbkp // jif 604
+ beq cr.1,setbkp // jif 604+
+ cmpwi r.17, PV613
+ beq setbkp // jif 613
+ li r.4, 0 // Not a known chip. No DRs
+ li r.6, 12 // Offset for not supported branch
+
+setbkp: lwz r.5, [toc]KiBreakPoints(r.toc)
+ rlwinm r.5, r.5, 0, 0x7fffffff // get phys address of KiBreakPoints
+ stw r.4, 0(r.5)
+ cmpwi r.6, 0
+ lwz r.28, addr_common_exception_entry-c0start(r.30)
+ rlwinm r.28, r.28, 0, 0x7fffffff // base addr = common_exception_entry
+ beq kicopytraps // 601 branch is the default
+
+//
+// Processor is not a 601, change each of the Debug Register branch
+// tables for the appropriate processor.
+//
+
+ la r.5, BranchDr1-common_exception_entry(r.28) // 1st
+ lhzx r.4, r.5, r.6 // Load appropriate branch instruction
+ add r.4, r.4, r.6 // Modify branch relative to table
+ sth r.4, 0(r.5) // Replace 601 branch
+
+ la r.5, BranchDr2-common_exception_entry(r.28) // 2nd
+ lhzx r.4, r.5, r.6 // Load appropriate branch instruction
+ add r.4, r.4, r.6 // Modify branch relative to table
+ sth r.4, 0(r.5) // Replace 601 branch
+
+ la r.5, BranchDr3-common_exception_entry(r.28) // 3rd
+ lhzx r.4, r.5, r.6 // Load appropriate branch instruction
+ add r.4, r.4, r.6 // Modify branch relative to table
+ sth r.4, 0(r.5) // Replace 601 branch
+
+ la r.5, BranchDr4-common_exception_entry(r.28) // 4th
+ lhzx r.4, r.5, r.6 // Load appropriate branch instruction
+ add r.4, r.4, r.6 // Modify branch relative to table
+ sth r.4, 0(r.5) // Replace 601 branch
+
+kicopytraps:
+
+ lwzu r.10, 4(r.7) // copy trap handler to low
+ stwu r.10, 4(r.8) // memory (word by word)
+ bdnz kicopytraps
+
+//
+// 603e/ev Errata 19 work around.
+//
+// r.17 has the (shifted) PVR in it. If this is a 603e/603ev, turn on guarded
+// storage for data TLB entries by modifying the TLB miss handler.
+//
+
+ cmpwi r.17, PV603p // 603e?
+ beq do_errata_19_tlb // modify instruction if so
+ cmpwi r.17, PV603pp // 603ev?
+ bne skip_errata_19_tlb // skip modification if not
+do_errata_19_tlb:
+ lwz r.10, tlbld603-real0(0) // load TLB handler instruction
+ ori r.10, r.10, PTE_GUARDED // turn on guarded storage
+ stw r.10, tlbld603-real0(0) // store TLB handler instruction
+skip_errata_19_tlb:
+
+//
+// Force sdr1 (address of HPT) to 0 until HPT allocated. This is necessary
+// because some HALs use sdr1 to get an address in KSEG0 to use for cache
+// flushing. Setting it to 0 ensures that the calculated address will be
+// in KSEG0.
+//
+
+ li r.3, 0
+ mtsdr1 r.3
+
+//
+// Move the code that needs to be in KSEG0.
+//
+
+ rlwinm r.3, r.31, 0, 0x7fffffff // get real address of LPB
+ lwz r.8, LpbKernelKseg0PagesDescriptor(r.3)
+ clrlwi r.8, r.8, 1
+ lwz r.8, MadBasePage(r.8)
+ slwi r.8, r.8, PAGE_SHIFT
+ ori r.6, r.8, 0 // save KSEG0 code address
+ subi r.8, r.8, 4
+ li r.9, (Kseg0CodeEnd-Kseg0CodeStart+3) >> 2 // num words to move
+ mtctr r.9
+ subi r.7, r.30, c0start-Kseg0CodeStart+4 // source address - 4
+kicopykseg0:
+ lwzu r.10, 4(r.7) // copy trap handler to low
+ stwu r.10, 4(r.8) // memory (word by word)
+ bdnz kicopykseg0
+
+//
+// Fix the branches into KSEG0 code. These are built as relative
+// branches with an offset from Kseg0CodeStart, so we need to add the
+// offset from the instruction to the base of the KSGE0 code to the
+// offset in the instruction.
+//
+
+ la r.7, KiPriorityExitRfiJump1-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ la r.7, KiPriorityExitRfiJump2-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ la r.7, KiServiceExitKernelRfiJump-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ la r.7, KiServiceExitUserRfiJump-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ lwz r.7, addr_RtlpRestoreContextRfiJump-c0start(r.30)
+ clrlwi r.7, r.7, 1
+ lwz r.4, 0(r.7)
+ li r.5, RtlpRestoreContextRfi-Kseg0CodeStart
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+//
+// Change the function descriptor for KiStartProcessor so that it points
+// to Kseg0Code.StartProcessor. Fix up the instructions in
+// Kseg0Code.StartProcessor that load the address of the real
+// KiStartProcessor.
+//
+// N.B. The only reference to KiStartProcessor is in KeStartAllProcessors,
+// which references through the function descriptor. There are no
+// direct calls to KiStartProcessor.
+//
+
+ lwz r.5, [toc]KiStartProcessor(r.toc)
+ clrlwi r.5, r.5, 1
+ addi r.4, r.6, StartProcessor-Kseg0CodeStart
+ oris r.4, r.4, K_BASE
+ stw r.4, 0(r.5)
+
+ la r.7, cnstart-c0start(r.30)
+ addi r.4, r.6, StartProcessor.LoadKiStartProcessorAddress-Kseg0CodeStart
+ lwz r.5, 0(r.4)
+ rlwimi r.5, r.7, 16, 0xffff
+ stw r.5, 0(r.4)
+ lwz r.5, 4(r.4)
+ rlwimi r.5, r.7, 0, 0xffff
+ stw r.5, 4(r.4)
+
+//
+// Fix up the KiFlushSingleTb, KeFillEntryTb, and KeFlushCurrentTb functions.
+//
+// The first thing in KeFlushCurrentTb is a load immediate of the
+// number of TLB congruence classes for this procesor. It is set
+// to a default of 128, we adjust that to the correct value now.
+//
+
+ lhz r.4, LpbNumberCongruenceClasses(r.3)
+ lwz r.7, flct0-Kseg0CodeStart(r.6) // load current li instruction
+ rlwimi r.7, r.4, 0, 0x0000ffff // merge number congruence classes
+ // into li instruction
+ stw r.7, flct0-Kseg0CodeStart(r.6) // replace instruction
+
+ lis r.7, 0x6000 // r.7 now contains a no-op
+
+//
+// Is this a 601? If so, the upper 16 bits of the PVR would contain
+// 0x0001 and we don't care about the lower 16 bits. By checking to
+// see if any of the upmost bits are non-zero we can determine if it
+// is (or is not) a 601. If if is not a 601, the result of the follow-
+// ing struction (in cr.0) will be "not equal". If this processor is
+// a 601 we remove the tlbsync/sync sequences in KiFlushSingleTb,
+// KeFillEntryTb and KeFlushCurrentTb.
+//
+
+#if !defined(NT_UP)
+
+ cmpwi r.17, PV601
+ bne ikms10 // jif not a 601
+
+ stw r.7, flst1-Kseg0CodeStart(r.6) // nop KiFlushSingleTb tlbsync
+ stw r.7, flst2-Kseg0CodeStart(r.6) // sync
+
+ stw r.7, fiet1-Kseg0CodeStart(r.6) // nop KeFillEntryTb tlbsync
+ stw r.7, fiet2-Kseg0CodeStart(r.6) // sync
+
+ stw r.7, flct3-Kseg0CodeStart(r.6) // nop KeFlushCurrentTb tlbsync
+ stw r.7, flct4-Kseg0CodeStart(r.6) // sync
+
+ikms10:
+
+#endif
+
+//
+// Fix KiFlushSingleTb, KeFillEntryTb and KeFlushCurrentTb HPT usage
+//
+
+ lhz r.5, LpbHashedPageTableSize(r.3)
+ cmpwi r.5, 0 // does this processor use a HPT?
+ beq ikms20 // if processor does not use an
+ // HPT, leave branches alone
+
+ stw r.7, flst0-Kseg0CodeStart(r.6) // allow fall-thru to HPT case.
+ stw r.7, fiet0-Kseg0CodeStart(r.6) // allow fall-thru to HPT case.
+ stw r.7, flct1-Kseg0CodeStart(r.6) // no-op branch around hpt clear
+
+#if !defined(NT_UP)
+ stw r.7, flct2-Kseg0CodeStart(r.6) // no-op branch around hpt clear
+#endif
+
+ikms20:
+
+//
+// Modify the branch instructions at KiFlushSingleTb, KeFillEntryTb, and
+// KeFlushCurrentTb so that they point to the real routines in KSEG0.
+//
+// Also, modify the function descriptors for KiFlushSingleTb,
+// KeFillEntryTb, and KeFlushCurrentTb so that they point to the real
+// routines in KSEG0.
+//
+
+ la r.7, FlushSingleTbJump-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ la r.7, FillEntryTbJump-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ la r.7, FlushCurrentTbJump-common_exception_entry(r.28)
+ lwz r.4, 0(r.7)
+ rlwinm r.5, r.4, 0, 0x03fffffc
+ add r.5, r.5, r.6
+ sub r.5, r.5, r.7
+ rlwimi r.4, r.5, 0, 0x03fffffc
+ stw r.4, 0(r.7)
+
+ oris r.5, r.6, K_BASE
+
+ lwz r.7, [toc]KiFlushSingleTb(r.toc)
+ clrlwi r.7, r.7, 1
+ la r.4, FlushSingleTb-Kseg0CodeStart(r.5)
+ stw r.4, 0(r.7)
+
+ lwz r.7, [toc]KeFillEntryTb(r.toc)
+ clrlwi r.7, r.7, 1
+ la r.4, FillEntryTb-Kseg0CodeStart(r.5)
+ stw r.4, 0(r.7)
+
+ lwz r.7, [toc]KeFlushCurrentTb(r.toc)
+ clrlwi r.7, r.7, 1
+ la r.4, FlushCurrentTb-Kseg0CodeStart(r.5)
+ stw r.4, 0(r.7)
+
+//
+// KeZeroPage defaults to using an FP store loop which is faster than
+// a dcbz loop on 603 class processors. If this is not one of those
+// processors, replace the branch at kzp.repl with an ISYNC instruction.
+//
+// N.B. The caches will get flushed (explicitly) before KeZeroPage is
+// ever used so we don't worry about it here.
+//
+
+ cmpwi cr.0, r.17, PV603
+ cmpwi cr.1, r.17, PV603p
+ cmpwi cr.7, r.17, PV603pp
+ beq cr.0, kzp.adjust.end // jif 603
+ beq cr.1, kzp.adjust.end // jif 603e
+ beq cr.7, kzp.adjust.end // jif 603ev
+
+ lis r.12, 0x4c00 // generate an isync instruction
+ ori r.12, r.12, 0x012c // 4c00012c.
+ stw r.12, kzp.repl-common_exception_entry(r.28)
+
+kzp.adjust.end:
+
+//
+// Set address of Processor Control Region for Kernel (KiPcr).
+//
+
+ lwz r.12, LpbPcrPage(r.3) // Get PCR page number
+ slwi r.12, r.12, PAGE_SHIFT // convert to real byte address
+ mtsprg sprg.0, r.12 // Real addr of KiPcr in SPRG 0
+ oris r.11, r.12, K_BASE // Virt addr of KiPcr in kernel
+ mtsprg sprg.1, r.11 // virtual space in SPRG 1.
+
+//
+// Initialize first process PD address, PDEs for PD and hyper PT.
+//
+
+ lwz r.1, LpbPdrPage(r.3) // pnum of PD,hyPT left by OS Ldr
+ slwi r.1, r.1, PAGE_SHIFT // make it a real address
+ stw r.1, PcPgDirRa(r.12) // store in PCR for HPT misses
+ b set_segment_registers
+
+//--------------------------------------------------------------------------
+//
+// KiStartProcessor()
+//
+// This is the system entry point for processors other than processor 0.
+// It will re-initialize memory mapping for KSEG0 (via BAT0), call the
+// routine to initialize the kernel and fall thru into the idle loop.
+//
+// Arguments:
+//
+// r3 - supplies address of loader parameter block.
+// r4 - supplies the per-processor virtual address of the PCR (not 0xffffd000)
+//
+// Return values:
+//
+// None. There is no return from this function.
+//
+// Remarks:
+//
+// ntoskrnl is assumed to be in memory covered by KSEG0.
+//
+//--------------------------------------------------------------------------
+
+ ALTERNATE_ENTRY_S(KiStartProcessor,INIT)
+
+cnstart: // this address in r.30
+
+ lwz r.toc, rm_toc_pointer-cnstart(r.30)// get kernel toc pointer
+ rlwinm r.toc, r.toc, 0, 0x7fffffff // cvt to phys addr
+
+//
+// Set address of Processor Control Region for Kernel (KiPcr)
+//
+
+ rlwinm r.3, r.31, 0, 0x7fffffff// get real address of LPB
+ lwz r.12, LpbPcrPage(r.3) // Get PCR page number
+ slwi r.12, r.12, PAGE_SHIFT // convert to real byte address
+ mtsprg sprg.0, r.12 // physical addr of KiPcr in SPRG 0
+ mtsprg sprg.1, r.4 // virtual addr of KiPcr in SPRG 1.
+
+//
+// Initialize PageDirectory address in this PCR.
+//
+
+ lwz r.1, LpbPdrPage(r.3) // pnum of PD,hyPT left by OS Ldr
+ slwi r.1, r.1, PAGE_SHIFT // make it a real address
+ stw r.1, PcPgDirRa(r.12) // store in PCR for HPT misses
+
+//
+// The following code is executed at startup on ALL processors.
+//
+
+set_segment_registers:
+
+//
+// Set the Storage Descriptor Register (address of the Hashed Page
+// Table) for this processor.
+//
+// Note: This implementation requires the HPT be allocated at an
+// address < 4GB on 64 bit PowerPC implementations. As it is
+// (currently) required that the HPT be addressable in KSEG0
+// this should not be of significant concern.
+//
+
+ lhz r.10, LpbHashedPageTableSize(r.3)
+ slwi r.10, r.10, PAGE_SHIFT
+ subi r.10, r.10, 1
+ lwz r.1, LpbHashedPageTable(r.3)
+ rlwimi r.1, r.10, 16, 0x1ff
+ mtsdr1 r.1
+
+//
+// Invalidate most segment registers.
+//
+
+ lis r.0, SREG_INVAL // invalid segment register value
+ mtsr 0, r.0
+ mtsr 1, r.0
+ mtsr 2, r.0
+ mtsr 3, r.0
+ mtsr 4, r.0
+ mtsr 5, r.0
+ mtsr 6, r.0
+ mtsr 7, r.0
+ mtsr 11, r.0
+ mtsr 15, r.0 // temp set 15 invalid
+
+//
+// Initialize segment register 12. Assume initial PID = 0.
+//
+
+ li r.10, 12 // T=0, Ks,Kp=0, VSID=PID,12
+ mtsr 12, r.10
+
+//
+// Initialize the global segment registers 8, 9, 10, 13, 14
+//
+
+ li r.10, 8
+ oris r.10, r.10, 0x2000 // T=0 Ks=0 Kp=1 VSID=14
+ mtsr 8, r.10
+ li r.10, 9 // T=0 Ks,Kp=0 VSID=9
+ mtsr 9, r.10
+ li r.10, 10 // T=0 Ks,Kp=0 VSID=10
+ mtsr 10, r.10
+ li r.10, 13 // T=0 Ks,Kp=0 VSID=13
+ mtsr 13, r.10
+ li r.10, 14
+ oris r.10, r.10, 0x2000 // T=0 Ks=0 Kp=1 VSID=14
+ mtsr 14, r.10
+
+//
+// Set BAT0 so we have block address translation for the low part of KSEG0
+//
+// Virtual address layout is as follows
+//
+// 4GB --------------------------------- FFFFFFFF
+// | Non Paged Pool |
+// | |
+// - - - - - - - - - - - - - - - - -
+// | Paged Pool |
+// | | 90000000 *
+// -- ---------------------------------
+// | | |
+// BAT0 | | Kernel and HAL |
+// | | | 80000000 **
+// 2GB -- ---------------------------------
+// | |
+// | |
+// | |
+// | User Space |
+// | |
+// | |
+// | |
+// | |
+// | |
+// | |
+// | | 0
+// 0GB ---------------------------------
+//
+// * On MIPS this is C0000000, however, we can cover only
+// 256MB with a BAT and why not allocate that space to
+// the paged pool?
+// ** Mapped to physical address 0.
+//
+// WARNING: 601 BAT registers are incompatible with other 60x
+// BAT registers.
+//
+// Set BAT0 to virtual 0x80000000, physical 0 for max size.
+// (max size = 8MB for 601, 256MB for other 60x processors).
+// BAT effective page size = 128KB, so, BEPI = 2^31 / 2^17
+// = 2^(31-17) = 2^14 = 16K entries,..... but this field is
+// left justified, ie left shift 17 bits to position in reg ...
+// in other words, just take the base address and slam into BLPI
+// without adjusting.
+//
+// Set the following control bits
+//
+// W Write Thru 0
+// I Inhibit Caching 0
+// M Memory Coherency 1
+//
+// want key = 0 for supervisor mode, 1 for user mode, so
+// Ks = 0
+// Ku = 1
+// want all access in supervisor mode, none in user mode, so
+// PP = 0b00
+//
+
+ lwz r.11, LpbKseg0Top(r.3) // Get VA of byte above KSEG0
+ stw r.11, PcKseg0Top(r.12) // Copy into PCR
+
+ mfpvr r.6 // check processor type
+ rlwinm r.6,r.6,16,16,31 // extract processor version
+ cmpwi r.6,PV601
+ bne bat_not_601
+
+ li r.10, 0b0010100 // WIM | Ks | Ku | PP
+ oris r.10, r.10, K_BASE // set BLPI
+
+//
+// Set BSM all 1s and Valid bit.
+// PBN (Physical Block Number) = 0.
+//
+
+ clrlwi r.11, r.11, 1 // get size of KSEG0 (turn off 0x80000000)
+ subi r.11, r.11, 1 // convert to mask
+ rlwinm r.11, r.11, 32-17, 0x3f // mask >> 17 == block length mask
+ ori r.11, r.11, 0x40 // set V
+
+ mtbatl 0, r.11 // set BAT0
+ mtbatu 0, r.10
+
+//
+// Clear Valid bit in BATs 1, 2 and 3
+//
+
+ li r.0, 0
+ mtbatl 1, r.0
+ mtbatl 2, r.0
+ mtbatl 3, r.0
+
+ b skip_bat_not_601
+
+bat_not_601:
+
+//
+// Clear Valid bits in ALL BATs prior to setting any of them.
+//
+
+ li r.0, 0
+ mtdbatu 0, r.0
+ mtdbatl 0, r.0
+ mtdbatu 1, r.0
+ mtdbatl 1, r.0
+ mtdbatu 2, r.0
+ mtdbatl 2, r.0
+ mtdbatu 3, r.0
+ mtdbatl 3, r.0
+
+ mtibatu 0, r.0
+ mtibatl 0, r.0
+ mtibatu 1, r.0
+ mtibatl 1, r.0
+ mtibatu 2, r.0
+ mtibatl 2, r.0
+ mtibatu 3, r.0
+ mtibatl 3, r.0
+
+ isync
+
+//
+// Set BAT0 to cover KSEG0.
+//
+// Set Block Effective Page Index (ie effective address) to 2GB,
+// BL to 8MB, Valid Supervisor mode, not Valid Problem mode.
+//
+
+ clrlwi r.11, r.11, 1 // get size of KSEG0 (turn off 0x80000000)
+ subi r.11, r.11, 1 // convert to mask
+ rlwinm r.11, r.11, 32-15, 0x1ffc // mask >> 17 << 2 == block length mask
+ ori r.11, r.11, 2 // set Vs (Vp is off)
+ oris r.11, r.11, K_BASE // set BEPI (0x80000000)
+
+//
+// BRPN (Block Real Page Number) = 0. PP for Supervisor
+// read/write.
+//
+// ------- W - Write thru 0
+// |------ I - Inhibit Cache 0
+// ||----- M - Memory Coherency REQUIRED
+// |||---- G - Guard bit 0
+// ||||--- Reserved
+// |||||
+ li r.10, 0b0010010 // BRPN | WIMG | PP
+
+ mtibatl 0, r.10 // set IBAT0
+ mtibatu 0, r.11
+
+//
+// 603e/ev Errata 19 work around.
+//
+// r.6 has the (shifted) PVR in it. If this is a 603e/603ev, turn on guarded
+// storage for the DBAT. It is illegal to set G=1 for the IBAT.
+//
+
+ cmpwi r.6, PV603p // 603e?
+ beq do_errata_19_bat // modify instruction if so
+ cmpwi r.6, PV603pp // 603ev?
+ bne skip_errata_19_bat // skip modification if not
+do_errata_19_bat:
+ ori r.10, r.10, PTE_GUARDED // turn on guarded storage
+skip_errata_19_bat:
+
+ mtdbatl 0, r.10 // set DBAT0
+ mtdbatu 0, r.11
+
+
+skip_bat_not_601:
+
+//
+// Initialize the Processor Control Region (PCR).
+//
+
+ li r.11, PCR_MINOR_VERSION // set minor version number
+ sth r.11, PcMinorVersion(r.12)
+ li r.11, PCR_MAJOR_VERSION // set major version number
+ sth r.11, PcMajorVersion(r.12)
+
+#if DBG
+
+ lhz r.11, LpbIcacheMode(r.3) // get,set cache modes
+ stb r.11, PcIcacheMode(r.12)
+ rlwinm r.11, r.11, 0x18, 0x18, 0x1f
+ stb r.11, PcDcacheMode(r.12)
+
+#endif
+
+ lwz r.11, LpbPcrPage2(r.3) // Get PCR2 page number
+ slwi r.11, r.11, PAGE_SHIFT // convert to real byte address
+ stw r.11, PcPcrPage2(r.12) // Store in PCR
+
+//
+// Initialize the addresses of various data structures that are
+// referenced from the exception and interrupt handling code.
+//
+// N.B. The panic stack is a separate stack that is used when
+// the current kernel stack overlfows.
+//
+// N.B. The interrupt stack is a separate stack and is used to
+// process all interrupts that run at IRQL 3 and above.
+//
+
+ lwz r.11, LpbPrcb(r.3) // set processor block address
+ stw r.11, PcPrcb(r.12)
+ lwz r.1, LpbKernelStack(r.3) // set initial stack address
+ stw r.1, PcInitialStack(r.12)
+ lwz r.11, LpbPanicStack(r.3) // set panic stack address
+ stw r.11, PcPanicStack(r.12)
+ lwz r.11, LpbInterruptStack(r.3) // set interrupt stack address
+ stw r.11, PcInterruptStack(r.12)
+ lwz r.11, LpbThread(r.3) // set current thread address
+ stw r.11, PcCurrentThread(r.12)
+
+//
+// Get the first level data and instruction cache values from the loader
+// parameter block and move them into the PCR.
+//
+
+ lwz r.23, LpbFirstLevelDcacheSize(r.3)
+ lwz r.24, LpbFirstLevelDcacheFillSize(r.3)
+ lwz r.25, LpbFirstLevelIcacheSize(r.3)
+ lwz r.26, LpbFirstLevelIcacheFillSize(r.3)
+
+ stw r.23, PcFirstLevelDcacheSize(r.12)
+ addi r.23, r.24, -1
+ stw r.24, PcFirstLevelDcacheFillSize(r.12)
+ stw r.24, PcDcacheFillSize(r.12)
+ stw r.23, PcDcacheAlignment(r.12)
+
+ addi r.23, r.26, -1
+ stw r.25, PcFirstLevelIcacheSize(r.12)
+ stw r.26, PcFirstLevelIcacheFillSize(r.12)
+ stw r.26, PcIcacheFillSize(r.12)
+ stw r.23, PcIcacheAlignment(r.12)
+
+//
+// Set the second level data and instruction cache fill size and size.
+//
+
+ lwz r.23, LpbSecondLevelDcacheSize(r.3)
+ lwz r.24, LpbSecondLevelDcacheFillSize(r.3)
+ lwz r.25, LpbSecondLevelIcacheSize(r.3)
+ lwz r.26, LpbSecondLevelIcacheFillSize(r.3)
+
+ stw r.23, PcSecondLevelDcacheSize(r.12)
+ stw r.24, PcSecondLevelDcacheFillSize(r.12)
+ stw r.25, PcSecondLevelIcacheSize(r.12)
+ stw r.26, PcSecondLevelIcacheFillSize(r.12)
+
+//
+// Set current IRQL to highest value
+//
+
+ li r.11, HIGH_LEVEL
+ stb r.11, PcCurrentIrql(r.12)
+
+//
+// Compute address of Loader Parameter Block into r.8 where it will
+// remain for the call to KiInitializeKernel.
+//
+
+ oris r.8, r.3, K_BASE // LoaderBlock |= KSEG0_BASE
+
+//
+// Get processor into mapped mode
+//
+
+ bl ki.virtual
+
+// **** PROCESSOR NOW IN VIRTUAL MODE ****
+
+// For the remainder of this module, register usage is compliant with the
+// standard linkage conventions for little-endian PowerPC.
+//
+
+//
+// Get virtual address of ntoskrnl's TOC
+//
+
+ oris r.toc, r.toc, K_BASE // TOC is in KSEG0
+
+//
+// Buy stack frame
+//
+
+ subi r.1, r.1, STK_MIN_FRAME+8
+ li r.13, 0 // zero back chain and friends
+ stw r.13, 0(r.1) // initialize teb
+ stw r.13, 4(r.1)
+ stw r.13, 8(r.1)
+
+//
+// Setup arguments and call kernel initialization procedure
+//
+// KiInitializeKernel(
+// IdleProcess,
+// IdleThread,
+// IdleStack,
+// Prcb,
+// CpuNumber,
+// LoaderParameterBlock
+// )
+//
+
+ lwz r.3, LpbProcess(r.8) // get idle process address
+ lwz r.4, LpbThread(r.8) // get idle thread address
+ lwz r.5, LpbKernelStack(r.8) // get idle thread stack address
+ lwz r.6, LpbPrcb(r.8) // get processor block address
+ lbz r.7, PbNumber(r.6) // get processor number
+
+//
+// Set segment register 15 to a unique vsid for this processor. This vsid
+// also has the SREG_INVAL bit set so a dmiss or dtlb miss to any address
+// will be shunted to the storage error path rather than filling the entry
+// from the "shared" NT page tables.
+//
+
+ lis r.0, SREG_INVAL|(SREG_INVAL>>1) // special marker for segment f
+ or r.0, r.0, r.7 // VSID = 0b11,procnum
+ oris r.0, r.0, 0x2000 // T=0 Ks=0 Kp=1
+ mtsr 15, r.0
+ isync
+
+ bl ..KiInitializeKernel
+
+ bl ..KiIdleLoop
+
+ DUMMY_EXIT (KiSystemBegin)
+
+//--------------------------------------------------------------------------
+//
+// ki.virtual switch kernel from unmapped instructions and data to
+// mapped.
+//
+// Kernel is loaded into memory at real address 0,
+// virtual address 0x80000000.
+//
+// On exit, MSR_IR and MSR_DR must be set and the return
+// address must have been adjusted such that execution
+// continues at the virtual address equivalent to the real
+// address in LR on entry.
+//
+// The change of state and transfer are accomplished atomically
+// by setting the target address and state for return from
+// interrupt then using rfi to put these changes into effect.
+//
+// Entry Requirements:
+// Processor executing in supervisor state.
+//
+// Returns to next instruction in caller.
+// MSR: Instruction Relocate ON
+// Data Relocate ON
+//
+//--------------------------------------------------------------------------
+
+ki.virtual:
+ mflr r.0 // save return address
+
+#if DBG
+
+//
+// This section of code determines the caching mode for the kernel.
+// Based on processor type and values in the PCR, either set HID0 to
+// turn off the caches (603/604), or do nothing (601).
+//
+ mfpvr r.11 // get processor type
+ lhz r.12, PcIcacheMode(r.12) // get I/D caching information
+ srwi r.11, r.11, 16 // extract processor version
+ cmpli cr.6, 0, r.11, PV601 // cr.6 -> is this a 601?
+ rlwinm r.4, r.12, 0x0, 0x18, 0x1f // r.4 -> i-cache mode
+ rlwinm r.5, r.12, 0x18, 0x18, 0x1f // r.5 -> d-cache mode
+ beq- cr.6, cache_done // branch if on 601, nothing to do
+
+//
+// Set cache bits for HID0 on 603/604
+//
+ cmpli cr.6, 0, r.4, 0 // cr.6 -> any bits set for icache?
+ addi r.6, r.0, 0 // r.6 -> 0
+ ori r.6, r.6, 0xc000 // r.6 -> 0xc000 (I/D caches on)
+ beq+ cr.6, cache_d // branch if no bits set for icache
+ xori r.6, r.6, 0x8000 // r.6 -> 0x4000 (I cache off)
+
+cache_d:
+ cmpli cr.6, 0, r.5, 0 // cr.6 -> any bits set for dcache?
+ beq+ cr.6, cache_done // branch if no bits set
+ xori r.6, r.6, 0x4000 // r.6 -> 0x[80]000 (turn off dcache)
+
+//
+// At this point r.6 has the bits to or into the register that we are
+// going to place in HID0. Possible values are:
+// 0xc000 : I cache ON, D cache ON
+// 0x8000 : I cache ON, D cache OFF
+// 0x4000 : I cache OFF, D cache ON
+// 0x0000 : I cache OFF, D cache OFF
+//
+// N.B. r.6 is NOT set for 601
+//
+cache_done:
+#endif
+
+//
+// Acquire the HPT lock to ensure that tlbie/tlbsync is done on
+// only one processor at a time.
+//
+
+#if !defined(NT_UP)
+ li r.10,HPT_LOCK // get hpt lock address
+kv.getlk:
+ lwarx r.11,0,r.10 // load and reserve lock word
+ cmpwi r.11,0 // is lock available?
+ mfsprg r.11,sprg.0 // get processor ctl region addr
+ bne- kv.getlk_spin // loop if lock is unavailable
+ stwcx. r.11,0,r.10 // store conditional to lock word
+ bne- kv.getlk_spin // loop if lost reserve
+ isync // context synchronize
+ b kv.getlk_got
+kv.getlk_spin:
+ lwz r.11,0(r.10)
+ cmpwi r.11,0
+ beq+ kv.getlk
+ b kv.getlk_spin
+kv.getlk_got:
+#endif // NT_UP
+
+//
+// before switching, flush L1 cache just to be sure everything that was
+// loaded is really in memory. We flush the data cache and invalidate
+// the instruction cache. Currently the largest D cache is the unified
+// I/D cache on the 601 at 32KB. We flush the D-Cache by loading 256KB
+// worth of data. This is more than can be contained in the largest
+// anticipated cache.
+//
+// The HAL can't be used for this function yet because mapping isn't
+// enabled.
+//
+// N.B. We do the cache flushing/invalidation in blocks of 32 bytes
+// which is the smallest PowerPC cache block size.
+//
+
+ li r.11, 256*1024/32 // amount to load, in blocks
+ mtctr r.11
+ li r.10, -32 // start address - sizeof cache block
+
+//
+// Touch 256 K bytes
+//
+
+lcache:
+ lbzu r.11, 32(r.10)
+ bdnz lcache
+
+//
+// Invalidate the TLB. We just outright invalidate 256 congruence classes,
+// the largest known number is currently the 601 which has 128. 256 will
+// hopefully allow for expansion.
+//
+
+ li r.7,256 // default num congruence classes
+ mtctr r.7
+tlbi: tlbie r.7 // invalidate entry
+ addi r.7,r.7, 0x1000 // bump to next page
+ bdnz tlbi
+
+ sync // wait tlbie completion
+
+//
+// Depending on processor type, invalidate the instruction cache and
+// enable both instruction and data caches.
+//
+
+ mfpvr r.12 // check processor type
+ srwi r.12, r.12, 16 // extract processor version
+ cmpwi cr.1, r.12, PV601 // is this a 601?
+ cmpwi cr.6, r.12, PV603 // is this a 603?
+ cmpwi cr.7, r.12, PV603p // perhaps a 603+?
+ cmpwi cr.0, r.12, PV603pp // perhaps a 603++?
+ beq cr.1, go_virtual // branch is 601
+
+//
+// not a 601, on MP systems, wait for tlb propagation.
+//
+
+#if !defined(NT_UP)
+
+ tlbsync
+ sync
+
+#endif
+
+ beq cr.6, caches_603 // branch if 603
+ beq cr.7, caches_603 // branch if 603+
+ beq cr.0, caches_603 // branch if 603++
+ cmpwi cr.6, r.12, PV604 // is this a 604?
+ cmpwi cr.7, r.12, PV604p // is this a 604+?
+ beq cr.6, caches_604 // branch if 604
+ beq cr.7, caches_604 // branch if 604+
+ cmpwi cr.0, r.12, PV613 // is this a 613?
+ beq cr.0, caches_613
+ b caches_unknown // branch to handle unknown cases
+
+// THIS IS 603 SPECIFIC ...
+
+caches_603:
+
+ mfspr r.12, hid0 // get hid0
+ rlwinm r.12, r.12, 0, 0x0fff // clear ICE, DCE, ILOCK, DLOCK
+ ori r.11, r.12, 0xc00 // flash instruction and data caches
+ mtspr hid0, r.11 // set ICFI and DCFI
+ mtspr hid0, r.12 // clear ICFI and DCFI
+#if !DBG
+ ori r.12, r.12, 0xc000 // enable instruction and data caches
+#else
+ or r.12, r.12, r.6 // set established cache inhibit bits
+ sync
+ isync
+#endif
+ mtspr hid0, r.12
+ b go_virtual // switch modes
+
+// THIS IS 604/604+ SPECIFIC ...
+
+caches_604:
+
+ mfspr r.12,hid0 // get hid0
+#if DBG
+ rlwinm r.12,r.12,0x0,0x12,0xf // clear cache enables
+ ori r.12,r.12,h0_604_sse+h0_604_bhte+h0_604_icia+h0_604_dcia
+ or r.12,r.12,r.6 // or in cache modes
+ sync
+ isync
+#else
+ ori r.12,r.12,h0_604_prefered// enable all the things we want
+ ori r.12,r.12,h0_604_icia+h0_604_dcia // and invalidate both
+ // caches. 604 clears ICIA and DCIA
+ // automatically.
+#endif
+ mtspr hid0,r.12
+
+
+ b go_virtual // fall thru to switch modes
+
+// THIS IS 613 SPECIFIC ...
+
+caches_613:
+
+ mfspr r.12,hid0 // get hid0
+#if DBG
+ rlwinm r.12,r.12,0x0,0x12,0xf // clear cache enables
+ ori r.12,r.12,h0_613_sge+h0_613_btic+h0_613_bhte+h0_613_icia+h0_613_dcia
+ or r.12,r.12,r.6 // or in cache modes
+ sync
+ isync
+#else
+ ori r.12,r.12,h0_613_preferred // enable all the things we want
+ ori r.12,r.12,h0_613_icia+h0_613_dcia // and invalidate both
+ // caches. 613 clears ICIA and DCIA
+ // automatically.
+#endif
+ mtspr hid0,r.12
+
+
+ b go_virtual // fall thru to switch modes
+
+//
+// Ensure the interrupt/exception vectors are not stale in the I-Cache
+// by invalidating all cache lines in the region that was copied to low
+// memory.
+//
+// Note that as soon as we are able to do so, we should use the hal to
+// do a full, machine dependent, I-Cache invalidate.
+//
+
+caches_unknown:
+
+ li r.11, 0 // target address
+ li r.10, (end_of_code_to_move-real0+31)/32 // num blocks to inval
+ mtctr r.10
+
+invalidate_icache:
+ icbi 0, r.11
+ addi r.11, r.11, 32
+ bdnz invalidate_icache
+
+//
+// Now invalidate all code in *this* module, some of which may have
+// been modified during initialization.
+//
+
+ bl invalidate_real0
+invalidate_real0:
+ mflr r.10 // address of invalidate_real0 to r.10
+ li r.12, (invalidate_real0-end_of_code_to_move)/32
+ mtctr r.12 // number if blocks to invalidate
+ subi r.10, r.10, invalidate_real0-end_of_code_to_move
+
+invalidate_real0_loop:
+ icbi 0, 10
+ addi r.10, r.10, 32
+ bdnz invalidate_real0_loop
+
+go_virtual:
+
+#if !defined(NT_UP)
+ li r.10,0 // get a zero value
+ sync // ensure all previous stores done
+ stw r.10,HPT_LOCK(0) // store zero in hpt lock word
+#endif // NT_UP
+
+//
+// Done. Now get system into virtual mode. (return address is in r.0)
+//
+// N.B. rfi does not load the MSR ILE bit from SRR1, so we need to turn
+// ILE on explicitly before the rfi.
+//
+
+ mfmsr r.10 // get current MSR
+ rlwimi r.10, r.10, 0, MASK_SPR(MSR_ILE,1) // turn on ILE
+ mtmsr r.10 // set new MSR
+
+ LWI(r.10, FLIH_MSR) // initialize machine state
+ oris r.0, r.0, K_BASE // set top bit of address
+ mtsrr0 r.0 // set rfi target address
+ mtsrr1 r.10 // set rfi target machine state
+ rfi // return
+
+
+//--------------------------------------------------------------------------
+//
+// ki.real switch kernel from mapped instructions and data to
+// unmapped.
+//
+// On exit, MSR_IR and MSR_DR will be clear and the return
+// address adjusted such that execution continues at the
+// real address equivalent to the address in lr on entry.
+// Interrupts are disabled by clearing bit MSR_EE.
+//
+// Note: this depends on the return address being in the
+// range covered by KSEG0.
+//
+// The change of state and transfer are accomplished atomically
+// by setting the target address and state for return from
+// interrupt then using rfi to put these changes into effect.
+//
+// Entry Requirements:
+// Processor executing in supervisor state.
+//
+// Returns to next instruction in caller.
+// MSR: Instruction Relocate OFF
+// Data Relocate OFF
+// Interrupts Disabled
+// r.30 return address (in real mode)
+//
+//--------------------------------------------------------------------------
+
+ki.real:
+
+ mflr r.30
+ mfmsr r.8 // get current state
+ rlwinm r.8, r.8, 0, ~INT_ENA // clear interrupt enable
+ mtmsr r.8 // disable interrupts
+ rlwinm r.8, r.8, 0, ~(MASK_SPR(MSR_IR,1)|MASK_SPR(MSR_DR,1))
+ mtsrr1 r.8 // desired initial state
+ rlwinm r.30, r.30, 0, 0x7fffffff // physical return addrress
+ mtsrr0 r.30
+ rfi // return
+
+//--------------------------------------------------------------------------
+//
+// Address of toc and common_exception_entry, available to init code.
+//
+
+rm_toc_pointer:
+ .long .toc // address of kernel toc
+addr_common_exception_entry:
+ .long common_exception_entry
+addr_RtlpRestoreContextRfiJump:
+ .long ..RtlpRestoreContextRfiJump
+
+//--------------------------------------------------------------------------
+//
+// Remaining code in this module exists for the life of the system.
+//
+//--------------------------------------------------------------------------
+
+ .new_section .text,"rcx6" // force 64 byte alignment
+ // for text in this module.
+ .text
+
+ .align 2
+toc_pointer:
+ .long .toc // address of kernel toc
+
+//--------------------------------------------------------------------------
+//
+// common_exception_entry
+//
+// This is the common entry point into kernel for most exceptions/
+// interrupts. The processor is running with instruction and data
+// relocation enabled when control reaches here.
+//
+// on Entry:
+// MSR: External interrupts disabled
+// Instruction Relocate ON
+// Data Relocate ON
+// GP registers:
+// r.2: Constant identifying the exception type
+// r.3: Saved SRR0 (interrupt address)
+// r.4: Saved SRR1 (MSR value)
+// r.5: -available-
+// r.11: -available-
+// In the PCR:
+// PcGprSave[0]: Saved r.2
+// PcGprSave[1]: Saved r.3
+// PcGprSave[2]: Saved r.4
+// PcGprSave[3]: Saved r.5
+// PcGprSave[5]: Saved r.11
+//
+// All other registers still have their contents as of the time
+// of interrupt
+//
+// Our stack frame header must contain space for 16 words of arguments, the
+// maximum that can be specified on a system call. Stack frame header struct
+// defines space for 8 such words.
+//
+// We'll build a structure on the stack like this:
+//
+// low addr | |
+// | |
+// / |--------------------| <-r.1 at point we call
+// | | Stack Frame Header | KiDispatchException
+// | | (back chain, misc. |
+// | | stuff, 16 words of |
+// | | parameter space) |
+// / |--------------------|
+// | Trap Frame |
+// STACK_DELTA | (volatile state) |
+// | <------ includes ExceptionRecord, imbedded within
+// \ |--------------------|
+// | | Exception Frame |
+// | | (non-volatile |
+// | | state) |
+// | | |
+// | |--------------------|
+// | | Slack space, |
+// | | skipped over to |
+// | | avoid stepping on |
+// | | data used by leaf |
+// | | routines |
+// \ |--------------------| <-r.1 at point of interrupt, if interrupted
+// | | kernel code, or base of kernel stack if
+// high addr | | interrupted user code
+
+//
+// This stack frame format is defined a KEXCEPTION_STACK_FRAME in ppc.h.
+//
+
+#define DeliverApcSaveTrap 0xc // Save trap frame address in reserved
+ // (offset 12) Stack Frame Header
+ // location for possible call out
+
+ .text // resume .text section
+
+//
+// An Exception Record is embedded within the Trap Frame
+//
+ .set ER_BASE, TF_BASE + TrExceptionRecord
+ .set DR_BASE, PbProcessorState + PsSpecialRegisters
+
+//--------------------------------------------------------------------------
+// The following is never executed, it is provided to allow virtual
+// unwind to restore register state prior to an exception occuring.
+// This is a common prologue for the various exception handlers.
+
+ FN_TABLE(KiCommonExceptionEntry,0,3)
+
+ DUMMY_ENTRY(KiCommonExceptionEntry)
+
+ stwu r.sp, -STACK_DELTA (r.sp)
+ stw r.0, TrGpr0 + TF_BASE (r.sp)
+ mflr r.0
+ stw r.0, TrLr + TF_BASE (r.sp)
+ mflr r.0
+ stw r.0, EfLr (r.sp)
+ mfcr r.0
+ stw r.0, EfCr (r.sp)
+
+ stw r.2, TrGpr2 + TF_BASE(r.sp)
+ stw r.3, TrGpr3 + TF_BASE(r.sp)
+ stw r.4, TrGpr4 + TF_BASE(r.sp)
+ stw r.5, TrGpr5 + TF_BASE(r.sp)
+ stw r.6, TrGpr6 + TF_BASE(r.sp)
+ stw r.7, TrGpr7 + TF_BASE(r.sp)
+ stw r.8, TrGpr8 + TF_BASE(r.sp)
+ stw r.9, TrGpr9 + TF_BASE(r.sp)
+ stw r.10, TrGpr10 + TF_BASE(r.sp)
+ stw r.11, TrGpr11 + TF_BASE(r.sp)
+ stw r.12, TrGpr12 + TF_BASE(r.sp)
+
+ mfctr r.6 // Fixed Point Exception
+ mfxer r.7 // registers
+
+ stfd f.0, TrFpr0 + TF_BASE(r.sp) // save volatile FPRs
+ lis r.12, K_BASE // base addr of KSEG0
+ stfd f.1, TrFpr1 + TF_BASE(r.sp)
+ lfd f.1, FpZero-real0(r.12) // get FP 0.0
+ stfd f.2, TrFpr2 + TF_BASE(r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+ stfd f.3, TrFpr3 + TF_BASE(r.sp)
+ stfd f.4, TrFpr4 + TF_BASE(r.sp)
+ stfd f.5, TrFpr5 + TF_BASE(r.sp)
+ stfd f.6, TrFpr6 + TF_BASE(r.sp)
+ stfd f.7, TrFpr7 + TF_BASE(r.sp)
+ stfd f.8, TrFpr8 + TF_BASE(r.sp)
+ stfd f.9, TrFpr9 + TF_BASE(r.sp)
+ stfd f.10, TrFpr10 + TF_BASE(r.sp)
+ stfd f.11, TrFpr11 + TF_BASE(r.sp)
+ stfd f.12, TrFpr12 + TF_BASE(r.sp)
+ stfd f.13, TrFpr13 + TF_BASE(r.sp)
+
+ stw r.6, TrCtr + TF_BASE(r.sp) // save Count register
+ stw r.7, TrXer + TF_BASE(r.sp) // save Fixed Point Exception rg
+ stfd f.0, TrFpscr + TF_BASE(r.sp) // save FPSCR register.
+ mtfsf 0xff, f.1 // clear FPSCR
+
+// \PROLOGUE_END(KiCommonExceptionEntry)
+ .set KiCommonExceptionEntry.body, $+1 // so the debugger can see
+ // difference between this
+ // and normal prologues.
+
+ .align 6 // ensure the following is
+ // cache block aligned (for
+ // performance) (cache line
+ // for 601)
+common_exception_entry:
+ mfcr r.5 // save CR at time of interrupt
+ stw r.6, KiPcr+PCR_SAVE6(r.0) // save r.6 in PCR
+
+// Code here and below frequently needs to test whether the previous mode
+// was "kernel" or "user". We isolate the PR (problem state, i.e., user mode)
+// bit from the previous MSR into Condition Reg bit 19 (in cr.4), where it will
+// stay. Subsequently we can just branch-if-true (for user mode) or
+// branch-if-false (for kernel mode) on CR bit WAS_USER_MODE.
+
+ .set WAS_USER_MODE, 19 // CR bit number
+
+ rlwinm r.6, r.4, 32+MSR_PR-WAS_USER_MODE, MASK_SPR(WAS_USER_MODE,1)
+ mtcrf 0b00001000, r.6 // PR to cr.4 WAS_USER_MODE
+ lwz r.6, KiPcr+PcInitialStack(r.0) // kernel stack addr for thread
+ bt WAS_USER_MODE, cee.20 // branch if was in user state
+
+// Get stack lower bound.
+
+ lwz r.11, KiPcr+PcStackLimit(r.0) // get current stack limit
+
+// Processor was in supervisor state. We'll add our stack frame to the stack
+// whose address is still in r.1 from the point of interrupt. First, make sure
+// that the stack address is valid.
+
+ cmplw cr.1, r.sp, r.6 // test for underflow, into cr.1
+
+// Make sure that stack hasn't overflowed, underflowed, or become misaligned
+
+ subi r.6, r.sp, STACK_DELTA // allocate stack frame; ptr into r.6
+
+ cmplw cr.2, r.6, r.11 // test for overflow, into cr.2
+ andi. r.11, r.sp, 7 // test r.sp for 8-byte align into cr.0
+ bgt- cr.1, cee.10 // branch if stack has underflowed
+ bne- cr.0, cee.10 // branch if stack is misaligned
+ bge+ cr.2, cee.30 // branch if stack has not overflowed
+
+// stack overflow/underflow/misalign
+
+cee.10:
+// Allow for the possibility that we're actually changing from
+// one thread's stack to another,... this is a two instruction
+// window during which interrupts are disabled but might be that
+// someone is single stepping thru that code ... The address of
+// the second instruction is 'global' for the benefit of this
+// test. If that is what happened, we must actually execute
+// the second instruction so that the correct stack is in use
+// because we would fail the stack check if any other exception
+// occurs while we are in this state and also because the old
+// stack may not be mapped any longer.
+//
+// That instruction is a
+//
+// ori r.sp, r.22, 0
+//
+// (we can't check this because the instruction has been replaced
+// by a breakpoint).
+//
+// We KNOW that r.22 contains what we should use as a stack pointer!!
+//
+// Available registers
+// r.6 contains what we had hoped would be the new stack address,
+// r.11
+
+ li r.6, KepSwappingContextAddr-real0// dereference pointer to
+ oris r.6, r.6, K_BASE // word containing addr of
+ lwz r.6, 0(r.6) // KepSwappingContext
+ cmplw r.6, r.3
+ bne cee.15 // jif that wasn't it.
+
+// Ok, that seems to be the problem, do the stack switch and try
+// to validate again. (can't branch up as if we fail, we'll be
+// in a loop).
+
+ lwz r.11, KiPcr+PcStackLimit(r.0) // get current stack limit
+ lwz r.6, KiPcr+PcInitialStack(r.0) // kernel stack addr for thread
+ cmplw cr.1, r.22, r.6 // test for underflow, into cr.1
+ subi r.6, r.22, STACK_DELTA // allocate stack frame; ptr into r.6
+ cmplw cr.2, r.6, r.11 // test for overflow, into cr.2
+ andi. r.11, r.22, 7 // test r.22 for 8-byte align into cr.0
+ bgt- cr.1, cee.15 // branch if stack has underflowed
+ bne- cr.0, cee.15 // branch if stack is misaligned
+ bge+ cr.2, cee.30 // branch if stack has not overflowed
+
+//
+// It really is a problem (underflow, overflow or misalignment).
+//
+cee.15:
+ lwz r.11, KiPcr+PcStackLimit(r.0) // refetch old stack limit
+ lwz r.6, KiPcr+PcPanicStack(r.0) // switch to panic stack
+ lwz r.2, KiPcr+PcInitialStack(r.0) // refetch old initial stack
+ stw r.11, TF_BASE-8-STACK_DELTA_NEWSTK(r.6) // save old StackLimit as
+ // if it was 15th parameter
+ subi r.11, r.6, KERNEL_STACK_SIZE // compute stack limit
+ stw r.6, KiPcr+PcInitialStack(r.0) // so we don't repeat ourselves
+ // ie, avoid overflowing because
+ // we went to the panic stack
+ stw r.11, KiPcr+PcStackLimit(r.0) // set stack limit
+ subi r.6, r.6, STACK_DELTA_NEWSTK // allocate stack frame
+ stw r.2, TF_BASE-4(r.6) // save old InitialStack as
+ // if it was 16th parameter
+ li r.2, CODE_PANIC // set exception cause to panic
+ b cee.30 // process exception
+
+// Previous state was user mode
+//
+// Segment registers 9, 10, 12 and 13 need to be setup for kernel mode.
+// In user mode they are set to zero as no access is allowed to these
+// segments, and there are no combinations of Ks Kp and PP that allow
+// kernel both read-only and read/write pages that are user no-access.
+
+cee.20:
+ mfsr r.6, 0 // get PID from SR0
+ ori r.6, r.6, 12 // T=0 Ks,Kp=0 VSID=pgdir,12
+ mtsr 12, r.6
+ li r.6, 9 // T=0 Ks,Kp=0 VSID=9
+ mtsr 9, r.6
+ li r.6, 10 // T=0 Ks,Kp=0 VSID=10
+ mtsr 10, r.6
+ li r.6, 13 // T=0 Ks,Kp=0 VSID=13
+ mtsr 13, r.6
+ isync // context synchronize
+
+ lbz r.6, KiPcr+PcDebugActive(r.0)
+ cmpwi cr.1, r.6, 0 // Hardware debug register set?
+
+ lwz r.6, KiPcr+PcInitialStack(r.0) // kernel stack addr for thread
+ subi r.6, r.6, STACK_DELTA_NEWSTK // allocate stack frame
+
+// Stack address in r.6
+ beq+ cr.1, cee.30 // jif no debug registers set
+
+// Yuck! There aren't any registers but this is the best place.
+// Save r.3 and reload it after debug register processing.
+ stw r.3, TrIar + TF_BASE (r.6) // we need some registers
+ stw r.4, TrMsr + TF_BASE (r.6) // save SRR1 (MSR)
+ stw r.5, TrCr + TF_BASE (r.6) // save Condition Register
+ stw r.7, TrGpr7 + TF_BASE (r.6)
+ stw r.8, TrGpr8 + TF_BASE (r.6)
+ li r.3, 0 // Initialize Dr7
+ lwz r.5, KiPcr+PcPrcb(r.0) // get processor block address
+ lwz r.4, DR_BASE + SrKernelDr7(r.5) // Kernel DR set?
+ rlwinm r.4, r.4, 0, 0xFF
+ cmpwi cr.7, r.4, 0
+ stw r.3, TrDr7 + TF_BASE(r.6) // No DRs set
+ lwz r.7, DR_BASE + SrKernelDr0(r.5) // Get kernel IABR
+ lwz r.8, DR_BASE + SrKernelDr1(r.5) // Get kernel DABR
+ ori r.7, r.7, 0x3 // Sanitize IABR (Dr0)
+ ori r.8, r.8, 0x4 // Sanitize DABR (Dr1)
+
+//
+// WARNING: Don't rearrange this branch table. The first branch is overlayed
+// with the correct branch instruction (modified) based on the processor
+// during system initialization. The correct order is 601, 603, 604, skip.
+//
+BranchDr1:
+ b cee.21 // 601
+ b cee.23 // 603
+ b cee.24 // 604/613 - common path with 601
+ b cee.27 // unknown
+
+cee.21: // 601 SPECIFIC
+ li r.3, 0x0080 // Normal run mode
+ rlwinm r.7, r.7, 0, 0xfffffffc // Sanitize IABR (Dr0)
+ rlwinm r.8, r.8, 0, 0xfffffff8 // Sanitize DABR (Dr1)
+ bne cr.7, cee.24 // Leave hid1 set for full cmp
+ mtspr hid1, r.3
+
+cee.24:
+ mfspr r.3, iabr // Load the IABR (Dr0)
+ rlwinm. r.3, r.3, 0, 0xfffffffc // IABR(DR0) set?
+ li r.4, 0 // Initialize Dr7
+ stw r.3, TrDr0 + TF_BASE(r.6)
+ mfspr r.3, dabr // Load the DABR (Dr1)
+ beq noiabr.1 // jiff Dr0 not set
+ li r.4, 0x1 // Set LE0 in Dr7
+
+noiabr.1:
+ rlwimi r.4, r.3, 19, 11, 11 // Interchange R/W1 bits
+ rlwimi r.4, r.3, 21, 10, 10 // and move to Dr7
+ rlwinm. r.3, r.3, 0, 0xfffffff8 // Sanitize Dr1
+ stw r.3, TrDr1 + TF_BASE(r.6) // Store Dr1 in trap frame
+ beq nodabr.1 // jiff Dr1 not set
+ ori r.4, r.4, 0x4 // Set LE1 in Dr7
+
+nodabr.1:
+ ori r.4, r.4, 0x100 // Set LE bit in Dr7
+ stw r.4, TrDr7 + TF_BASE(r.6)
+ li r.4, 0
+ beq cr.7, nokdr.1
+ lwz r.3, DR_BASE + SrKernelDr7(r.5)
+ rlwinm. r.4, r.3, 0, 0x0000000c // LE1/GE1 set?
+ beq nodr1.1 // jiff Dr1 not set
+ rlwimi r.8, r.3, 13, 30, 30 // Interchange R/W1 bits
+ rlwimi r.8, r.3, 11, 31, 31
+ mtspr dabr, r.8
+
+nodr1.1:
+ rlwinm. r.3, r.3, 0, 0x00000003 // LE0/GE0 set?
+ beq cee.27
+ mtspr iabr, r.7
+ isync
+ b cee.27
+
+cee.23: // 603 SPECIFIC
+ mfspr r.3, iabr // Load the IABR (Dr0)
+ rlwinm. r.3, r.3, 0, 0xfffffffc // Sanitize Dr0
+ li r.4, 0x101 // Initialize Dr7
+ stw r.3, TrDr0 + TF_BASE(r.6)
+ stw r.4, TrDr7 + TF_BASE(r.6)
+ li r.4, 0
+ beq cr.7, nokdr.2 // jif no kernel DR set
+ rlwinm r.7, r.7, 0, 0xfffffffc // Sanitize IABR
+ ori r.7, r.7, 0x2
+ mtspr iabr, r.7
+ b cee.27
+
+nokdr.2:
+ mtspr iabr, r.4
+ b cee.27
+
+nokdr.1:
+ mtspr dabr, r.4
+ mtspr iabr, r.4
+ isync
+
+cee.27:
+ lwz r.8, TrGpr8 + TF_BASE (r.6) // Reload all the registers
+ lwz r.7, TrGpr7 + TF_BASE (r.6) // the debug code clobbered
+ lwz r.5, TrCr + TF_BASE (r.6)
+ lwz r.4, TrMsr + TF_BASE (r.6)
+ lwz r.3, TrIar + TF_BASE (r.6)
+
+cee.30:
+
+// Save Trap Frame (volatile registers)
+
+ stw r.3, TrIar + TF_BASE (r.6) // save SRR0 (IAR) in Trap Frame
+ stw r.3, ErExceptionAddress + ER_BASE (r.6) // and in Exception Record
+ stw r.3, EfLr (r.6) // and for unwind
+ stw r.4, TrMsr + TF_BASE (r.6) // save SRR1 (MSR)
+ stw r.5, TrCr + TF_BASE (r.6) // save Condition Register
+ stw r.5, EfCr (r.6) // and for unwind
+
+ stw r.0, TrGpr0 + TF_BASE (r.6) // save volatile GPRs, fetching some
+ stw r.sp, TrGpr1 + TF_BASE (r.6) // of them from their temporary save
+ lwz r.0, KiPcr+PCR_SAVE2 (r.0) // area in the PCR
+ lwz r.5, KiPcr+PCR_SAVE3 (r.0)
+ stw r.0, TrGpr2 + TF_BASE (r.6)
+ stw r.5, TrGpr3 + TF_BASE (r.6)
+ lwz r.0, KiPcr+PCR_SAVE4 (r.0)
+ lwz r.5, KiPcr+PCR_SAVE5 (r.0)
+ stw r.0, TrGpr4 + TF_BASE (r.6)
+ stw r.5, TrGpr5 + TF_BASE (r.6)
+ li r.5, 0 // Init DR6 (DR status) to zero
+ lwz r.0, KiPcr+PCR_SAVE6 (r.0)
+ stw r.7, TrGpr7 + TF_BASE (r.6)
+ lbz r.7, KiPcr+PcCurrentIrql (r.0)
+ stw r.0, TrGpr6 + TF_BASE (r.6)
+ lwz r.0, KiPcr+PCR_SAVE11 (r.0)
+ stw r.8, TrGpr8 + TF_BASE (r.6)
+ stw r.9, TrGpr9 + TF_BASE (r.6)
+ stw r.10, TrGpr10 + TF_BASE (r.6)
+ stw r.0, TrGpr11 + TF_BASE (r.6)
+ stw r.12, TrGpr12 + TF_BASE (r.6)
+ stb r.7, TrOldIrql + TF_BASE (r.6) // save current Irql in tf
+ stw r.5, TrDr6 + TF_BASE (r.6)
+
+// We've pulled everything out of the PCR that needs saving.
+// Set up r.1 as our stack pointer so that we can take another interrupt now if need be.
+
+ stw r.sp, CrBackChain (r.6) // set chain to previous stack frame
+ ori r.sp, r.6, 0 // move new stack frame pointer to r.sp
+
+// Save rest of trap frame
+
+ cmpwi r2, CODE_DECREMENTER // does this interrupt save volatile FPRs?
+
+ mflr r.5 // get Link, Count, and
+ mfctr r.6 // Fixed Point Exception
+ mfxer r.7 // registers
+
+ ble skip_float // if le, don't save volatile FPRs
+ stfd f.0, TrFpr0 + TF_BASE (r.sp) // save volatile FPRs
+ lis r.12, K_BASE // base address of KSEG0
+ stfd f.1, TrFpr1 + TF_BASE (r.sp)
+ lfd f.1, FpZero-real0(r.12) // get FP 0.0
+ stfd f.2, TrFpr2 + TF_BASE (r.sp)
+ stfd f.3, TrFpr3 + TF_BASE (r.sp)
+ stfd f.4, TrFpr4 + TF_BASE (r.sp)
+ stfd f.5, TrFpr5 + TF_BASE (r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+ stfd f.6, TrFpr6 + TF_BASE (r.sp)
+ stfd f.7, TrFpr7 + TF_BASE (r.sp)
+ stfd f.8, TrFpr8 + TF_BASE (r.sp)
+ stfd f.9, TrFpr9 + TF_BASE (r.sp)
+ stfd f.10, TrFpr10 + TF_BASE (r.sp)
+ stfd f.11, TrFpr11 + TF_BASE (r.sp)
+ stfd f.12, TrFpr12 + TF_BASE (r.sp)
+ stfd f.13, TrFpr13 + TF_BASE (r.sp)
+ stfd f.0, TrFpscr + TF_BASE (r.sp)
+ mtfsf 0xff, f.1
+
+skip_float:
+
+ stw r.5, TrLr + TF_BASE (r.sp) // save Link,
+ stw r.6, TrCtr + TF_BASE (r.sp) // Count,
+ stw r.7, TrXer + TF_BASE (r.sp) // Fixed Point Exception
+
+// End of save Trap Frame
+
+// Volatile registers (r.0 thru r.12) are now available for use.
+// r.2 holds code number identifying interrupt
+// r.3 holds interrupt SRR0
+// r.4 holds interrupt SRR1
+
+//-----------------------------------------------------------------
+//
+// Perform processing specific to the type of PowerPC interrupt,
+// including deciding just what Windows NT exception code ("STATUS_...")
+// value should be associated with this interrupt/exception.
+//
+// The internal code number in r.2 is an index into the following branch table.
+//
+
+ bl next
+next:
+ mflr r.12
+ la r.12, branch_table - next (r.12)
+
+// leave r.12 pointing to branch_table; used as base for data addressing
+// in code below
+
+ add r.2, r.12, r.2
+ mtlr r.2
+ lwz r.2, toc_pointer - branch_table (r.12) // load the kernel's TOC address
+ blr // thru branch table
+
+// Note that no "validation" of the code is done; this table must match
+// the set of CODE_... values defined above, or all bets are off.
+
+branch_table:
+ b process_machine_check // 0
+ b process_external // 4
+ b process_decrementer // 8
+ b process_storage_error // 12
+ b process_page_fault // 16
+ b process_alignment // 20
+ b process_program // 24
+ b process_fp_unavail // 28
+ b process_direct_store // 32
+ b process_system_call // 36
+ b process_trace // 40
+ b process_fp_assist // 44
+ b process_run_mode // 48
+ b process_panic // 52
+// Added for PPC603:
+ b process_system_management // 56
+// 601, 604 data address breakpoint
+ b process_data_breakpoint // 60
+// 604 Performance Monitor Interupt
+ b process_pmi // 64
+
+ DUMMY_EXIT(KiCommonExceptionEntry)
+
+// Register contents at this point are:
+// r.0 -scratch-
+// r.1 (r.sp) Our stack pointer
+// r.2 (r.toc) Kernel's TOC pointer
+// r.3 SRR0 (interrupt address)
+// r.4 SRR1 (MSR at time of interrup)
+// r.5--r.11 -scratch-
+// r.12 Address of branch_table, above
+// r.13--r.31 Non-volatile state, STILL UNSAVED
+
+// This follows standard linkage conventions, with SRR0 and SRR1 as the
+// two parameters of a call.
+
+//-----------------------------------------------------------------
+//
+// Storage Error and Page Fault after DS and IS interrupts --
+//
+// The code immediately below is never executed. It is present to
+// allow for unwinding the stack through process_storage_error or
+// process_page_fault, for exception handling.
+
+ FN_TABLE(KiStorageFaultDispatch,0,0)
+
+ DUMMY_ENTRY (KiStorageFaultDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiStorageFaultDispatch)
+
+process_storage_error:
+ lwz r.4,KiPcr+PcBadVaddr(r.0) // get failing addr, st/l bit
+ rlwinm r.3,r.4,0,0x00000001 // isolate st/l indicator
+ stw r.3,ErExceptionInformation+ER_BASE(r.1) // st/l to er
+ stw r.4,ErExceptionInformation+4+ER_BASE(r.1) // fail to er
+ LWI (r.3,STATUS_ACCESS_VIOLATION) // access violation status
+ li r.4,2 // there are 2 er parms
+ b seter // goto setup rest of er
+
+process_page_fault:
+ mfmsr r.0
+ rlwinm r.5,r.4,18,0x00000001 // 3rd arg - processor mode
+ lwz r.4,KiPcr+PcBadVaddr(r.0) // 2nd arg - failing addr
+ rlwinm r.3,r.4,0,0x00000001 // 1st arg - st/l indicator
+ stw r.3,ErExceptionInformation+ER_BASE(r.1) // st/l to er
+ stw r.4,ErExceptionInformation+4+ER_BASE(r.1) // fail to er
+ ori r.0,r.0,INT_ENA
+ mtmsr r.0 // enable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ bl ..MmAccessFault // call mem mgmt fault subr
+
+//
+// Check if working set watch is enabled.
+//
+
+ lwz r.5,[toc]PsWatchEnabled(r.2) // get &working set watch enable
+ cmpwi r.3,STATUS_SUCCESS // mem mgmt handled fault?
+ lbz r.5,0(r5) // get working set watch enable flag
+ blt xcptn // branch if fault not handled
+ cmpwi r.5,0 // watch enabled?
+ lwz r.5,ErExceptionInformation+4+ER_BASE(r.1)// set bad addr
+ beq owdbkp // jif watch disabled
+
+ lwz r.4,TrIar+TF_BASE(r.1) // set failing PC
+ bl ..PsWatchWorkingSet // record working set information
+
+//
+// Check if the debugger has any owed breakpoints.
+//
+
+owdbkp:
+ lwz r.4,[toc]KdpOweBreakpoint(r.2)
+ lbz r.4,0(r.4) // get owed breakpoint flag
+ cmpwi r.4,0
+ beq KiAlternateExit // jif no owed breakpoints
+
+ bl ..KdSetOwedBreakpoints // call insrt breakpts subr
+ b KiAlternateExit // goto resume thread
+
+xcptn: LWI (r.0,(STATUS_IN_PAGE_ERROR|0x10000000)) // was code for
+ cmplw r.3,r.0 // irql too high returned?
+ beq irqlhi // branch if yes
+ li r.4,2 // assume 2 er parms
+ LWI (r.0,STATUS_ACCESS_VIOLATION) // was it
+ cmplw r.3,r.0 // access violation?
+ beq seter // branch if yes
+ LWI (r.0,STATUS_GUARD_PAGE_VIOLATION) // was it
+ cmplw r.3,r.0 // guard page violation?
+ beq seter // branch if yes
+ LWI (r.0,STATUS_STACK_OVERFLOW) // was it
+ cmplw r.3,r.0 // stack overflow?
+ beq seter // branch if yes
+ stw r.3,ErExceptionInformation+8+ER_BASE(r.1) // stat to er
+ LWI (r.3,STATUS_IN_PAGE_ERROR) // use in page error status
+ li r.4,3 // now there are 3 er parms
+
+seter: stw r.3,ErExceptionCode+ER_BASE(r.1) // set er xcptn code
+ stw r.4,ErNumberParameters+ER_BASE(r.1) // set er num parms
+ li r.0,0 // zero
+ stw r.0,ErExceptionFlags+ER_BASE(r.1) // er flags
+ stw r.0,ErExceptionRecord+ER_BASE(r.1) // er record ptr
+ b exception_dispatch // goto dispatch exception
+
+irqlhi: li r.3,IRQL_NOT_LESS_OR_EQUAL // get irql too high code
+ lwz r.4,ErExceptionInformation+4+ER_BASE(r.1) // fail addr
+ lbz r.5,KiPcr+PcCurrentIrql(r.0) // get current irql from pcr
+ lwz r.6,ErExceptionInformation+ER_BASE(r.1) // st/l indic
+ lwz r.7,ErExceptionAddress+ER_BASE(r.1) // get int addr
+ bl ..KeBugCheckEx // call bug check subroutine
+ b $
+
+ DUMMY_EXIT(KiStorageFaultDispatch)
+
+//-----------------------------------------------------------------
+//
+// Alignment interrupt --
+// Must be for data. It's not possible to cause the machine
+// to branch to an address that isn't a multiple of 4, hence
+// there is no "misaligned instruction" exception
+
+// This array of bits, indexed by the 7-bit "index" value from
+// the DSISR, indicates whether the offending instruction is
+// a load or a store. "1" indicates store. See alignem.c for
+// an indication of how the 7-bit index value maps to the set
+// of load/store instructions.
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_alignment, for exception
+// handling.
+
+ FN_TABLE(KiAlignmentFaultDispatch,0,0)
+
+ DUMMY_ENTRY (KiAlignmentFaultDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiAlignmentFaultDispatch)
+
+ .align 2
+al_table:
+ .long 0x44624460, 0x40004000, 0x60440402, 0x44624460
+
+process_alignment:
+ lwz r.4, KiPcr+PcSavedV1 (r.0) // get DSISR (align status)
+ lwz r.0, KiPcr+PcSavedV0 (r.0) // get DAR (offending address)
+
+ rlwinm r.5, r.4, 19, 0b1100 // isolate table word number
+ la r.6, al_table - branch_table (r.12) // load address of table
+ lwzx r.6, r.6, r.5 // load word from table
+ rlwinm r.5, r.4, 22, 0x1f // isolate bit number within word
+ rlwnm r.6, r.6, r.5, 0x1 // isolate load/store bit
+ cmpwi r.0, 0 // test for user vs. system address
+
+// put load/store indicator, DAR, and DSISR in exception record
+
+ stw r.6, ErExceptionInformation + ER_BASE (r.sp)
+ stw r.0, ErExceptionInformation + 4 + ER_BASE (r.sp)
+ stw r.4, ErExceptionInformation + 8 + ER_BASE (r.sp)
+
+ crand 0, cr.0 + LT, WAS_USER_MODE // test for system addr AND user mode
+ LWI (r.0, STATUS_DATATYPE_MISALIGNMENT) // load most probable status value
+ bf+ 0, al_1 // branch if not an access violation
+ LWI (r.0, STATUS_ACCESS_VIOLATION) // load access viol. status value
+al_1:
+ stw r.0, ErExceptionCode + ER_BASE (r.sp) // store status value in excep. record
+ li r.0, 0
+ stw r.0, ErExceptionFlags + ER_BASE (r.sp)
+ stw r.0, ErExceptionRecord + ER_BASE (r.sp)
+ li r.0, 3
+ stw r.0, ErNumberParameters + ER_BASE (r.sp)
+ b exception_dispatch
+
+ DUMMY_EXIT (KiAlignmentFaultDispatch)
+
+//-----------------------------------------------------------------
+//
+// Program interrupt --
+// SRR0 contains the failing instruction address
+// SRR1 contains bits indicating the reason for interrupt
+// Floating-point enabled exception (SRR1 bit 11)
+// Illegal instruction (SRR1 bit 12)
+// Privileged instruction executed in problem state (SRR1 bit 13)
+// Trap instruction (SRR1 bit 14)
+// In addition, SRR1 bit 15 is set if this is an
+// IMPRECISE floating point interrupt
+//
+// On entry to this code, the interrupt-time SRR0 value is in
+// r.3, and the SRR1 value is in r.4
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_program, for exception
+// handling.
+
+ FN_TABLE(KiProgramFaultDispatch,0,0)
+
+ DUMMY_ENTRY (KiProgramFaultDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiProgramFaultDispatch)
+
+// This code is rather poorly scheduled, but that doesn't matter.
+// None of this is performance-critical.
+
+ .set PR_FPE, 0 // locations of the indicator bits (above)
+ .set PR_ILL, 1 // after moving them to CR field 0
+ .set PR_PRIV, 2
+ .set PR_TRAP, 3
+
+ .set TO_BKP, 0b11111 // trap BREAKPOINT
+ .set TO_DIV0, 0b00110 // trap Integer DIV by zero
+ .set TO_DIV0U, 0b00111 // trap unconditional DIV by 0
+
+process_program:
+ rlwinm r.0, r.4, 11, 0xF0000000 // move the 4 bits to high end
+ mtcrf 0x80, r.0 // insert them into bits 0..3 of CR
+ bt PR_ILL, pr_2 // branch if illegal instruction
+
+ li r.0, 0 // fill in common info in
+ stw r.0, ErExceptionFlags + ER_BASE (r.sp) // exception record
+ stw r.0, ErExceptionRecord + ER_BASE (r.sp)
+ lwz r.4, 0 (r.3) // pick up the instruction itself
+ li r.0, 1
+ stw r.0, ErNumberParameters + ER_BASE (r.sp) // show 1 parameter
+ stw r.4, ErExceptionInformation + ER_BASE (r.sp) // save instr as 1st "info" word
+
+ bt PR_FPE, pr_1 // branch if float exception
+ bt PR_PRIV, pr_3 // branch if privileged instruction
+ // fall thru if trap instruction
+
+// Trap instruction.
+// If the instruction has 0b11111 as the trap condition field,
+// then it's a "breakpoint". Otherwise it's an array bounds
+// violation.
+// The instruction itself is in r.4 at this point.
+
+ rlwinm r.5, r.4, 11, 0b11111 // isolate the "TO" field
+ cmpwi r.5, TO_BKP // breakpoint?
+ LWI (r.0, STATUS_BREAKPOINT) // assume breakpoint status
+ beq pr_0 // branch if correct
+
+//
+// Integer divide by zero is implemented as a trap. The TO equal bit is set
+// and the immediate field must be zero. To differentiate from other possible
+// uses of trap on zero, the logically less than bit is also set. (in a comp
+// against zero this will NEVER cause the trap so is useful just as a flag).
+// The compiler may also set the logically greater than bit if this in an
+// unconditional divide by zero. In the following check, "or" in the logically
+// greater than bit then check that both the TO field is TO_DIV0U AND the
+// immediate field is zero.
+//
+ ori r.5, r.5, TO_DIV0^TO_DIV0U // |= "logically greater than"
+ rlwimi r.5, r.4, 16, 0xffff0000 // |= immediate field ( << 16)
+ cmpwi r.5, TO_DIV0U
+ beq pr_0
+ LWI (r.0, STATUS_ARRAY_BOUNDS_EXCEEDED) // assume bounds check trap
+
+pr_0:
+ stw r.0, ErExceptionCode + ER_BASE (r.sp) // store proper status in excep. record
+ b exception_dispatch
+
+// Floating-point enabled exception.
+// Pass all thru under the code "FLOAT_STACK_CHECK" at this point;
+// subdivide them further later.
+
+pr_1:
+ LWI (r.0, STATUS_FLOAT_STACK_CHECK)
+ stw r.0, ErExceptionCode + ER_BASE (r.sp)
+ b exception_dispatch
+
+// Illegal instruction.
+
+pr_2:
+
+#if 0
+
+//
+// Save the contents of the HPT group for the failing address.
+//
+
+ mfsrin r.5,r.3 // get sreg of virtual addr arg
+ stw r.5, 28 + ErExceptionInformation + ER_BASE (r.sp)
+ rlwinm r.6,r.3,20,0x0000ffff // align arg vpi with vsid
+ xor r.6,r.5,r.6 // hash - exclusive or vsid with vpi
+ rlwimi r.5,r.3,3,0x7e000000 // insert api into reg with vsid
+ rlwinm r.5,r.5,7,0xffffffbf // align vsid,api as 1st word hpte
+ stw r.5, 32 + ErExceptionInformation + ER_BASE (r.sp)
+ mfsdr1 r.7 // get storage description reg
+ rlwinm r.8,r.7,10,0x0007fc00 // align hpt mask with upper hash
+ ori r.8,r.8,0x03ff // append lower one bits to mask
+ and r.6,r.8,r.6 // take hash modulo hpt size
+ rlwinm r.6,r.6,6,0x01ffffc0 // align hash as hpt group offset
+ rlwinm r.7,r.7,0,0xffff0000 // get real addr of hash page table
+ oris r.7,r.7,K_BASE // or with kernel virtual address
+ or r.6,r.7,r.6 // or with offset to get group addr
+ stw r.6, 36 + ErExceptionInformation + ER_BASE (r.sp)
+ li r7,0x37fc
+ oris r7,r7,K_BASE
+ subi r6,r6,4
+ li r8,16
+ mfctr r0
+ mtctr r8
+loadloop1:
+ lwzu r8,4(r6)
+ stwu r8,4(r7)
+ bdnz loadloop1
+ mtctr r0
+ subi r6,r6,60
+
+//
+// Turn the data cache off.
+//
+
+ mfspr r9, 1008
+ ori r7, r9, 0x4000
+ mtspr 1008, r7
+ sync
+
+//
+// Dump the HPT group again.
+//
+
+ li r7,0x383c
+ oris r7,r7,K_BASE
+ subi r6,r6,4
+ li r8,16
+ mfctr r0
+ mtctr r8
+loadloop2:
+ lwzu r8,4(r6)
+ stwu r8,4(r7)
+ bdnz loadloop2
+ mtctr r0
+ subi r6,r6,60
+
+//
+// Get the instruction word from memory.
+//
+
+ lwz r4, 0(r3)
+ stw r.4, 4 + ErExceptionInformation + ER_BASE (r.sp)
+
+//
+// Dump the HPT group again.
+//
+
+ li r7,0x387c
+ oris r7,r7,K_BASE
+ subi r6,r6,4
+ li r8,16
+ mfctr r0
+ mtctr r8
+loadloop3:
+ lwzu r8,4(r6)
+ stwu r8,4(r7)
+ bdnz loadloop3
+ mtctr r0
+ subi r6,r6,60
+
+//
+// Turn the data cache on.
+//
+
+ mtspr 1008, r9
+ sync
+
+#endif
+
+ li r.0, 0 // fill in common info in
+ stw r.0, ErExceptionFlags + ER_BASE (r.sp) // exception record
+ stw r.0, ErExceptionRecord + ER_BASE (r.sp)
+ lwz r.4, 0 (r.3) // pick up the instruction itself
+ li r.0, 1
+ stw r.0, ErNumberParameters + ER_BASE (r.sp) // show 1 parameter
+ stw r.4, ErExceptionInformation + ER_BASE (r.sp) // save instr as 1st "info" word
+
+ LWI (r.0, STATUS_ILLEGAL_INSTRUCTION)
+ stw r.0, ErExceptionCode + ER_BASE (r.sp)
+
+#if 0
+
+//
+// The following is normally left out,... but it can be useful when
+// debugging coherency problems so I've left the code around for future
+// use. plj
+//
+
+//
+// Dump the HPT group again.
+//
+
+ li r7,0x38bc
+ oris r7,r7,K_BASE
+ subi r6,r6,4
+ li r8,16
+ mfctr r0
+ mtctr r8
+loadloop4:
+ lwzu r8,4(r6)
+ stwu r8,4(r7)
+ bdnz loadloop4
+ mtctr r0
+ subi r6,r6,60
+
+//
+// Look for a matching entry (valid or invalid) in the HPT for the instruction address.
+//
+
+nexthpte:
+ lwz r.7,4(r.6) // get 1st(be) word of hpte
+ lwz r.8,0(r.6) // get 2nd(be) word of hpte
+ clrlwi r.11,r.7,1 // mask off valid bit
+ cmplw r.5,r.11 // does hpte match search arg?
+ beq found // break if eq
+ addi r.6,r.6,8 // increment to next hpte
+ andi. r.7,r.6,0x003f // tested all hptes in prim group?
+ bne nexthpte // loop if not
+ li r.6,0 // no match found
+ li r.7,0
+ li r.8,0
+found:
+ stw r.6, 16 + ErExceptionInformation + ER_BASE (r.sp)
+ stw r.7, 20 + ErExceptionInformation + ER_BASE (r.sp)
+ stw r.8, 24 + ErExceptionInformation + ER_BASE (r.sp)
+
+//
+// Go get the instruction from memory (the load we just did got it from
+// the data cache). Try to do this by "invalidating" the data cache block
+// containing the instruction.
+//
+
+ dcbf 0, r.3 // invalidate d cache block
+ sync
+ lwz r.4, 0(r.3) // refetch the instruction
+ stw r.4, 8 + ErExceptionInformation + ER_BASE (r.sp)
+
+//
+// Now be even harsher about getting the instruction from memory.
+//
+
+ dcbf 0, r.3 // invalidate d cache block
+ sync
+ tlbie r.3 // invalidate TLB entry
+ sync
+ lwz r.4, 0(r.3) // refetch the instruction
+ li r.0, 10 // bump parameter count
+ stw r.0, ErNumberParameters + ER_BASE (r.sp)
+ stw r.4, 12 + ErExceptionInformation + ER_BASE (r.sp)
+
+//
+// Write to the FirePower scratch pad register, in case they're watching
+// with a logic analyzer.
+//
+
+ LWI (r.4,0xb0900024)
+ stw r.3, 0(r.4)
+ sync
+
+ //twi 31,0,0x16
+
+#endif
+
+ b exception_dispatch
+
+// Privileged instruction executed in user mode (problem state).
+
+pr_3:
+ LWI (r.0, STATUS_PRIVILEGED_INSTRUCTION)
+ stw r.0, ErExceptionCode + ER_BASE (r.sp)
+ b exception_dispatch
+
+ DUMMY_EXIT (KiProgramFaultDispatch)
+
+ FN_TABLE(KiDataAddressBreakpointDispatch,0,0)
+
+ DUMMY_ENTRY(KiDataAddressBreakpointDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END(KiDataAddressBreakpointDispatch)
+process_data_breakpoint:
+ li r.4, 2 // Dr1 Breakpoint register
+ LWI (r.0, STATUS_SINGLE_STEP) // assume breakpoint status
+ stw r.0, ErExceptionCode + ER_BASE (r.sp)
+ stw r.4, TrDr6 + TF_BASE(r.sp) // Save data breakpoint address
+ li r.0, 1
+ stw r.0, ErNumberParameters + ER_BASE (r.sp) // show 1 parameter
+ li r.0, 0
+ stw r.0, ErExceptionInformation + ER_BASE (r.sp) // parm = 0
+ b exception_dispatch
+
+ DUMMY_EXIT(KiDataAddressBreakpointDispatch)
+
+//-----------------------------------------------------------------
+//
+// Floating Point Unavailable interrupt --
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_fp_unavail, for exception
+// handling.
+
+ FN_TABLE(KiFloatingPointUnavailableDispatch,0,0)
+
+ DUMMY_ENTRY (KiFloatingPointUnavailableDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiFloatingPointUnavailableDispatch)
+
+process_fp_unavail:
+ li r.3, TRAP_CAUSE_UNKNOWN
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiFloatingPointUnavailableDispatch)
+
+//-----------------------------------------------------------------
+//
+// Machine Check, Decrementer Interrupt and External Interrupt are bundled
+// into somewhat common code as all three are handled by the HAL.
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_machine_check, process_decrementer
+// and process_external for exception handling.
+
+ .struct 0
+ .space StackFrameHeaderLength
+IntTOC: .space 4 // saved TOC
+IntOIS: .space 4 // saved On Interrupt Stack indicator
+IntIRQL:.space 1 // saved IRQL
+ .align 3 // 8 byte align
+IntFrame:
+
+ FN_TABLE (KiInterruptException,0,0)
+
+ DUMMY_ENTRY (KiInterruptException)
+ b common_exception_entry // use common prologue for unwind
+ stwu r.sp, -IntFrame(r.sp)
+
+ PROLOGUE_END (KiInterruptException)
+
+//-----------------------------------------------------------------
+//
+// Machine Check --
+// Machine check is treated just like an interrupt, we let
+// the HAL handle it.
+// Load offset to PCR->InterruptRoutine[MACHINE_CHECK_VECTOR]
+// and branch to KiInterruptException to handle dispatch.
+//
+process_machine_check:
+ li r.3, PcInterruptRoutine + IrMachineCheckVector
+ b KiInterruptException10
+
+//-----------------------------------------------------------------
+//
+// Performance Monitor --
+// The 604 (and follow-ons) Performance Monitor interrupt is
+// handled like an external interrupt. Some PMI agent registers
+// to handle the interrupt. Load offset to PMI handler and
+// branch to KiInterruptException to handle.
+//
+// N.B. Some versions of the 604 do not turn off ENINT in MMCR0 when
+// signaling the PM interrupt. Therefore interrupts must not be
+// enabled before the spot in the (external) PM interrupt handler
+// where ENINT is turned off. This implies that one must not set
+// breakpoints or make calls to DbgPrint anywhere along the path
+// from here to the PM interrupt handler.
+//
+process_pmi:
+ li r.3, PcInterruptRoutine + IrPmiVector
+ b KiInterruptException10
+
+
+//-----------------------------------------------------------------
+//
+// Decrementer interrupt --
+// Load offset to PCR->InterruptRoutine[DECREMENT_VECTOR]
+// and branch to KiInterruptException to handle dispatch.
+//
+process_decrementer:
+ li r.3, PcInterruptRoutine + IrDecrementVector
+ b KiInterruptException10
+
+//-----------------------------------------------------------------
+//
+// External (I/O) interrupt --
+// Load offset to PCR->InterruptRoutine[EXTERNAL_VECTOR]
+// and fall thru to KiInterruptException to handle dispatch.
+//
+
+process_external:
+ li r.3, PcInterruptRoutine + IrDeviceVector
+// b KiInterruptException10
+
+//-----------------------------------------------------------------
+//
+// KiInterruptException
+//
+// This code switches to the interrupt stack (if not already
+// on the interrupt stack) and dispatches the appropriate handler
+// for the interrupt.
+//
+// On return from the handler, we switch back to the previous
+// stack and check for and run dpc interrupts if IRQL is below
+// DISPATCH_LEVEL.
+//
+// On entry r.3 contains the offset into the PCR of the address
+// of the handler.
+// r.sp current stack pointer
+// Interrupts are disabled.
+//
+// Calls the handler with
+// r.3 Address of Interrupt Object
+// r.4 Address of Service Context
+// r.5 Address of Trap Frame
+//
+// Exits to KiAlternateExit.
+
+KiInterruptException10:
+ lwz r.7, KiPcr(r.3) // get address of fn descr
+ lwz r.8, KiPcr+PcPrcb(r.0) // get processor block address
+ lwz r.12,KiPcr+PcOnInterruptStack(r.0) // get stack indicator
+ stw r.sp,KiPcr+PcOnInterruptStack(r.0) // set new stack indicator
+ addi r.5, r.sp, TF_BASE // set 3rd parm = &Trap Frame
+ lwz r.6, 0(r.7) // get addr of entry point
+ lwz r.9, PbInterruptCount(r.8) // get current interrupt count
+ cmpwi r.12,0 // check already on intrpt stk
+ subi r.3, r.7, InDispatchCode // compute addr of Intr Object
+ lwz r.4, InServiceContext(r.3) // get addr of Service Context
+ mtlr r.6 // service proc entry point.
+ li r.6, -IntFrame // size of stack frame
+ bne kie20 // jif already on interrupt stk
+ lwz r.10,KiPcr+PcInitialStack(r.0) // get old initial stack addr
+ lwz r.11,KiPcr+PcStackLimit(r.0) // get old stack limit
+ lbz r.0, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ lwz r.6, KiPcr+PcInterruptStack(r.0) // get interrupt stack
+ stw r.10,KiPcr+PcSavedInitialStack(r.0) // save old initial stack addr
+ stw r.11,KiPcr+PcSavedStackLimit(r.0) // save old stack limit
+ subi r.11, r.6, KERNEL_STACK_SIZE // compute new stack limit
+ cmpwi r.0, DISPATCH_LEVEL // IRQL >= DISPATCH_LEVEL ?
+ stw r.6, KiPcr+PcInitialStack(r.0) // set new initial stack
+ stw r.11, KiPcr+PcStackLimit(r.0) // set new stack limit
+ subi r.6, r.6, IntFrame
+ stb r.0, IntIRQL(r.6) // save old IRQL (on new stack)
+ sub r.6, r.6, r.sp // diff needed for new sp
+ bge kie20 // jif IRQL >= DISPATCH_LEVEL
+
+//
+// IRQL is below DISPATCH_LEVEL, raise to DISPATCH_LEVEL to avoid context
+// switch while on the interrupt stack.
+//
+
+ li r.0, DISPATCH_LEVEL // raise IRQL
+ stb r.0, KiPcr+PcCurrentIrql(r.0)
+kie20: stwux r.sp, r.sp, r.6 // buy stack frame
+ stw r.toc, IntTOC(r.sp) // save our toc
+ stw r.12, IntOIS(r.sp) // save On Int Stk indicator
+ lwz r.toc, 4(r.7) // get callee's toc
+ addi r.9, r.9, 1 // increment interrupt count
+ stw r.9, PbInterruptCount(r.8)
+ blrl // call service proc
+
+//
+// Back from the service proc. If we switched stacks on the way in we
+// need to switch back.
+//
+
+ mfmsr r.7
+ lwz r.9, IntOIS(r.sp) // get saved stack indicator
+ lwz r.toc, IntTOC(r.sp) // restore our toc
+ cmpwi r.9, 0 // if eq, must switch stacks
+ rlwinm r.7,r.7,0,~INT_ENA
+ mtmsr r.7 // disable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ lbz r.6, IntIRQL(r.sp) // get previous IRQL
+ stw r.9, KiPcr+PcOnInterruptStack(r.0) // restore stack indicator
+ lwz r.sp, 0(r.sp) // switch stacks back
+ la r.3, TF_BASE(r.sp) // compute trap frame address
+ bne KiPriorityExit // jif staying on interrupt stk
+
+ lwz r.8, KiPcr+PcSavedInitialStack(r.0) // get old initial stack
+ lwz r.9, KiPcr+PcSavedStackLimit(r.0) // get old stack limit
+ cmpwi cr.3, r.6, APC_LEVEL // check current IRQL
+ stw r.8, KiPcr+PcInitialStack(r.0) // restore thread initial stack
+ stw r.9, KiPcr+PcStackLimit(r.0) // restore thread stack limit
+
+//
+// If previous IRQL is below DISPATCH_LEVEL, restore current IRQL to its
+// correct value, check for pending DPC interrupts and deliver them now.
+//
+// N.B. We used cr.3 for the comparison against APC_LEVEL. This cr
+// is non-volatile but we know we are going to exit thru a path that
+// will restore it so we're bending the rules a little. We use it
+// so that it will be intact after calling KiDispatchSoftwareInterrupt.
+//
+
+ bgt cr.3, KiPriorityExit // exit interrupt state
+
+ blt cr.3, kie25 // below APC_LEVEL, no matter
+ // what, we need to go the
+ // interrupt enable path
+
+//
+// We are at APC level, if no DPC pending, get out the fast way without
+// enabling interrupts.
+//
+
+ lbz r.5, KiPcr+PcDispatchInterrupt(r.0) // is there a DPC int pending?
+ cmpwi r.5, 0 // s/w int pending?
+ bne kie25
+ stb r.6, KiPcr+PcCurrentIrql(r.0) // set correct IRQL
+ b KiPriorityExit
+
+//
+// The only reason to enable interrupts before exiting interrupt state
+// is to run pending DPCs and APCs. In the following, we enable interrupts
+// BEFORE resetting current IRQL to its correct value which is below
+// DISPATCH_LEVEL. This (and the following sync) should cause any pending
+// interrupts to be taken immediately with IRQL set to DISPATCH_LEVEL.
+// When exiting from the second (nested) interrupt we will not enable
+// interrupts early (because previous IRQL is DISPATCH_LEVEL) so we
+// will not run out of stack if we are being swamped with interrupts.
+// This guarantees the maximum number of interrupt contexts on the
+// kernel stack at any point is 2. (The first which is below DISPATCH
+// and the second which is taken at DISPATCH LEVEL).
+//
+// plj programming note: If the mtmsr/sync combo isn't enough to force
+// pending interrupts at precisely the right time switch, to an rfi
+// sequence. Currently all PowerPC implementations will take a pending
+// interrupt prior to execution of the first instruction pointed to
+// by srr0 if interrupts are enabled as a result of the rfi. (processors
+// include 601, 601+, 603, 603+, 604, 604+ and 620).
+//
+// Solution to overflowing stack for devices that interrupt at a high rate.
+// Close the window to dispatching software interrupts and don't enable
+// until we determine there is a pending software interrupt. Return
+// from dispatching software interrupts disabled until we determine that
+// we are resuming to user mode and at an IRQL < APC_LEVEL. The only time
+// this should occur is when we are no longer running a nested interrupt.
+
+
+kie25:
+ lhz r.5, KiPcr+PcSoftwareInterrupt(r.0) // is there a s/w int pending?
+ stb r.6, KiPcr+PcCurrentIrql(r.0) // restore previous IRQL
+ cmpwi cr.1, r.5, 0
+ bne cr.1, kie30 // if ne, s/w int pending
+
+ lwz r.8, TF_BASE+TrMsr(r.sp) // load saved MSR value
+ extrwi. r.8, r.8, 1, MSR_PR // see if resuming user mode
+ beq KiPriorityExit // jif kernel mode
+ beq cr.3, KiPriorityExit // skip user mode APC check
+ // if at APC level.
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get address of current thread
+ stb r.5, ThAlerted(r.6) // clear kernel mode alerted
+ lbz r.6, ThApcState+AsUserApcPending(r.6)
+ cmpwi r.6, 0 // user mode APC pending?
+ beq KiPriorityExit // if eq, none pending
+
+kie30:
+
+//
+// Either a software interrupt is pending, or the current thread has a
+// user mode APC pending. We need to save the volatile floating point
+// state before we can proceed.
+//
+
+ stfd f.0, TrFpr0 + TF_BASE(r.sp) // save volatile FPRs
+ stfd f.1, TrFpr1 + TF_BASE(r.sp)
+ stfd f.2, TrFpr2 + TF_BASE(r.sp)
+ stfd f.3, TrFpr3 + TF_BASE(r.sp)
+ stfd f.4, TrFpr4 + TF_BASE(r.sp)
+ stfd f.5, TrFpr5 + TF_BASE(r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+ stfd f.6, TrFpr6 + TF_BASE(r.sp)
+ stfd f.7, TrFpr7 + TF_BASE(r.sp)
+ stfd f.8, TrFpr8 + TF_BASE(r.sp)
+ stfd f.9, TrFpr9 + TF_BASE(r.sp)
+ stfd f.10, TrFpr10 + TF_BASE(r.sp)
+ stfd f.11, TrFpr11 + TF_BASE(r.sp)
+ stfd f.12, TrFpr12 + TF_BASE(r.sp)
+ stfd f.13, TrFpr13 + TF_BASE(r.sp)
+ stfd f.0, TrFpscr + TF_BASE(r.sp)
+
+ beq cr.1, kie40 // if eq, no s/w int pending
+
+//
+// Software interrupt pending. Dispatch it.
+//
+
+ stwu r.sp, -IntFrame(r.sp) // buy stack frame
+ li r.3, 0 // Tell dispatch routines to
+ // not enable interrupts when
+ // returning to IRQL 0.
+ ori r.7,r.7,INT_ENA // Set up for interrupts to be enabled (r7 is
+ // input arg to KiDispatchSoftwareIntDisabled)
+ bl ..KiDispatchSoftwareIntDisabled // run pending DPCs and
+ // if applicable APCs.
+ addi r.sp, r.sp, IntFrame // return stack frame
+ la r.3, TF_BASE(r.sp) // compute trap frame address
+
+
+//
+// Having dispatched the software interrupt, we now need to check whether
+// the current thread has a user mode APC pending.
+//
+
+ lwz r.8, TF_BASE+TrMsr(r.sp) // load saved MSR value
+ extrwi. r.8, r.8, 1, MSR_PR // see if resuming user mode
+ beq ae.restore // jif kernel mode
+ beq cr.3, ae.restore // skip user mode APC check
+ // if at APC level.
+ li r.5, 0
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get address of current thread
+ stb r.5, ThAlerted(r.6) // clear kernel mode alerted
+ lbz r.6, ThApcState+AsUserApcPending(r.6)
+ cmpwi r.6, 0 // user mode APC pending?
+ beq ae.restore // if eq, none pending
+kie40:
+ mfmsr r.7
+ ori r.7,r.7,INT_ENA
+ mtmsr r.7 // enable interrupts
+ sync // flush pipeline
+
+//
+// A user mode APC is pending. Branch to common code to deliver it.
+//
+
+ b ae.apc_deliver // join common code
+
+ DUMMY_EXIT (KiInterruptException)
+
+//-----------------------------------------------------------------
+//
+// Direct-Store Error interrupt --
+// Treat this as a bus error.
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_direct_store, for exception
+// handling.
+
+ FN_TABLE(KiDirectStoreFaultDispatch,0,0)
+
+ DUMMY_ENTRY (KiDirectStoreFaultDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiDirectStoreFaultDispatch)
+
+process_direct_store:
+ li r.3, DATA_BUS_ERROR // should define a PPC specific
+ // code, for now borrow from MIPS.
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiDirectStoreFaultDispatch)
+
+//-----------------------------------------------------------------
+//
+// Trace exception --
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_trace, for exception handling.
+
+ FN_TABLE(KiTraceExceptionDispatch,0,0)
+
+ DUMMY_ENTRY (KiTraceExceptionDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiTraceExceptionDispatch)
+
+process_trace:
+ li r.3, TRAP_CAUSE_UNKNOWN // should define a PPC specific
+ // code, for now borrow from MIPS.
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiTraceExceptionDispatch)
+
+//-----------------------------------------------------------------
+//
+// Floating-Point Assist interrupt --
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_fp_assist, for exception
+// handling.
+
+ FN_TABLE(KiFpAssistExceptionDispatch,0,0)
+
+ DUMMY_ENTRY (KiFpAssistExceptionDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiFpAssistExceptionDispatch)
+
+process_fp_assist:
+ li r.3, TRAP_CAUSE_UNKNOWN // should define a PPC specific
+ // code, for now borrow from MIPS.
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiFpAssistExceptionDispatch)
+
+
+//-----------------------------------------------------------------
+//
+// System Management interrupt --
+// This is the power management handler for the PowerPC 603.
+// THIS CODE WILL CHANGE.
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_system_management, for exception
+// handling.
+
+ FN_TABLE(KiSystemManagementExceptionDispatch,0,0)
+
+ DUMMY_ENTRY (KiSystemManagementExceptionDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiSystemManagementExceptionDispatch)
+
+process_system_management:
+ li r.3, TRAP_CAUSE_UNKNOWN // will need PowerMgmt specific code
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiSystemManagementExceptionDispatch)
+
+
+
+//-----------------------------------------------------------------
+//
+// Run-Mode interrupt --
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_run_mode, for exception
+// handling.
+
+ FN_TABLE(KiRunModeExceptionDispatch,0,0)
+
+ DUMMY_ENTRY (KiRunModeExceptionDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiRunModeExceptionDispatch)
+
+process_run_mode:
+ LWI (r.0, STATUS_SINGLE_STEP) // assume breakpoint status
+ stw r.0, ErExceptionCode + ER_BASE (r.sp)
+ li r.0, 1 // Dr0 breakpoint register
+ stw r.0, TrDr6 + TF_BASE(r.sp) // Save instr. breakpoint address
+ stw r.0, ErNumberParameters + ER_BASE (r.sp) // show 1 parameter
+ li r.0, 0
+ stw r.0, ErExceptionInformation + ER_BASE (r.sp) // parm = 0
+ b exception_dispatch
+
+ DUMMY_EXIT (KiRunModeExceptionDispatch)
+
+//-----------------------------------------------------------------
+//
+// We've panic'd, call KeBugCheck
+//-----------------------------------------------------------------
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_panic, for exception
+// handling.
+
+ FN_TABLE(KiStackOvflDispatch,0,0)
+
+ DUMMY_ENTRY (KiStackOvflDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiStackOvflDispatch)
+
+process_panic:
+ li r.3, PANIC_STACK_SWITCH
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiStackOvflDispatch)
+
+//-----------------------------------------------------------------
+//
+// System Call entry via common_exception_entry ??? Can't happen.
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through process_run_mode, for exception
+// handling.
+
+ FN_TABLE(KiSystemCallEntryError,0,0)
+
+ DUMMY_ENTRY (KiSystemCallEntryError)
+
+ b common_exception_entry // use common prologue for unwind
+
+ PROLOGUE_END (KiSystemCallEntryError)
+
+process_system_call:
+ li r.3, TRAP_CAUSE_UNKNOWN // should define a PPC specific
+ // code, for now borrow from MIPS.
+ bl ..KeBugCheck
+ b $
+
+ DUMMY_EXIT (KiSystemCallEntryError)
+
+//-----------------------------------------------------------------
+//
+// We are about to call KiDispatchException, as follows:
+//
+// KiDispatchException (& Exception Record, [r.3]
+// & Exception Frame, [r.4]
+// & Trap Frame, [r.5]
+// Previous Mode (0=kernel, 1=user), [r.6]
+// First Chance flag (1)); [r.7]
+//
+// The code immediately below is never executed. It is present to allow
+// for unwinding the stack through exception_dispatch, for exception
+// handling.
+
+ FN_TABLE(KiExceptionDispatch,0,0)
+
+ DUMMY_ENTRY (KiExceptionDispatch)
+
+ b common_exception_entry // use common prologue for unwind
+
+ stw r.13, ExGpr13 + EF_BASE(r.sp) // save non-volatile GPRs
+ stw r.14, ExGpr14 + EF_BASE(r.sp)
+ stw r.15, ExGpr15 + EF_BASE(r.sp)
+ stw r.16, ExGpr16 + EF_BASE(r.sp)
+ stw r.17, ExGpr17 + EF_BASE(r.sp)
+ stw r.18, ExGpr18 + EF_BASE(r.sp)
+ stw r.19, ExGpr19 + EF_BASE(r.sp)
+ stw r.20, ExGpr20 + EF_BASE(r.sp)
+ stw r.21, ExGpr21 + EF_BASE(r.sp)
+ stw r.22, ExGpr22 + EF_BASE(r.sp)
+ stw r.23, ExGpr23 + EF_BASE(r.sp)
+ stw r.24, ExGpr24 + EF_BASE(r.sp)
+ stw r.25, ExGpr25 + EF_BASE(r.sp)
+ stw r.26, ExGpr26 + EF_BASE(r.sp)
+ stw r.27, ExGpr27 + EF_BASE(r.sp)
+ stw r.28, ExGpr28 + EF_BASE(r.sp)
+ stw r.29, ExGpr29 + EF_BASE(r.sp)
+ stw r.30, ExGpr30 + EF_BASE(r.sp)
+ stw r.31, ExGpr31 + EF_BASE(r.sp)
+
+ stfd f.14, ExFpr14 + EF_BASE(r.sp) // save non-volatile FPRs
+ stfd f.15, ExFpr15 + EF_BASE(r.sp)
+ stfd f.16, ExFpr16 + EF_BASE(r.sp)
+ stfd f.17, ExFpr17 + EF_BASE(r.sp)
+ stfd f.18, ExFpr18 + EF_BASE(r.sp)
+ stfd f.19, ExFpr19 + EF_BASE(r.sp)
+ stfd f.20, ExFpr20 + EF_BASE(r.sp)
+ stfd f.21, ExFpr21 + EF_BASE(r.sp)
+ stfd f.22, ExFpr22 + EF_BASE(r.sp)
+ stfd f.23, ExFpr23 + EF_BASE(r.sp)
+ stfd f.24, ExFpr24 + EF_BASE(r.sp)
+ stfd f.25, ExFpr25 + EF_BASE(r.sp)
+ stfd f.26, ExFpr26 + EF_BASE(r.sp)
+ stfd f.27, ExFpr27 + EF_BASE(r.sp)
+ stfd f.28, ExFpr28 + EF_BASE(r.sp)
+ stfd f.29, ExFpr29 + EF_BASE(r.sp)
+ stfd f.30, ExFpr30 + EF_BASE(r.sp)
+ stfd f.31, ExFpr31 + EF_BASE(r.sp)
+
+ PROLOGUE_END (KiExceptionDispatch)
+
+exception_dispatch:
+ mfmsr r.0
+ ori r.0,r.0,INT_ENA
+ mtmsr r.0 // enable interrupts
+ sync
+
+
+// The first argument (r.3) to KiDispatchException is the address
+// of the Exception Record.
+
+ la r.3, ER_BASE (r.sp)
+
+// The second argument (r.4) is the address of the Exception Frame.
+
+//-------------------------------------------------------------------
+//
+// Generate Exception Frame (save the non-volatile state)
+
+ la r.4, EF_BASE (r.sp) // address of Exception Frame
+
+ stw r.13, ExGpr13 (r.4) // save non-volatile GPRs
+ stw r.14, ExGpr14 (r.4)
+ stw r.15, ExGpr15 (r.4)
+ stw r.16, ExGpr16 (r.4)
+ stw r.17, ExGpr17 (r.4)
+ stw r.18, ExGpr18 (r.4)
+ stw r.19, ExGpr19 (r.4)
+ stw r.20, ExGpr20 (r.4)
+ stw r.21, ExGpr21 (r.4)
+ stw r.22, ExGpr22 (r.4)
+ stw r.23, ExGpr23 (r.4)
+ stw r.24, ExGpr24 (r.4)
+ stw r.25, ExGpr25 (r.4)
+ stw r.26, ExGpr26 (r.4)
+ stw r.27, ExGpr27 (r.4)
+ stw r.28, ExGpr28 (r.4)
+ stw r.29, ExGpr29 (r.4)
+ stw r.30, ExGpr30 (r.4)
+ stw r.31, ExGpr31 (r.4)
+
+ stfd f.14, ExFpr14 (r.4) // save non-volatile FPRs
+ stfd f.15, ExFpr15 (r.4)
+ stfd f.16, ExFpr16 (r.4)
+ stfd f.17, ExFpr17 (r.4)
+ stfd f.18, ExFpr18 (r.4)
+ stfd f.19, ExFpr19 (r.4)
+ stfd f.20, ExFpr20 (r.4)
+ stfd f.21, ExFpr21 (r.4)
+ stfd f.22, ExFpr22 (r.4)
+ stfd f.23, ExFpr23 (r.4)
+ stfd f.24, ExFpr24 (r.4)
+ stfd f.25, ExFpr25 (r.4)
+ stfd f.26, ExFpr26 (r.4)
+ stfd f.27, ExFpr27 (r.4)
+ stfd f.28, ExFpr28 (r.4)
+ stfd f.29, ExFpr29 (r.4)
+ stfd f.30, ExFpr30 (r.4)
+ stfd f.31, ExFpr31 (r.4)
+
+// End of Exception Frame
+//
+//-------------------------------------------------------------------
+
+// The third argument (r.5) to KiDispatch Exception is the address
+// of the Trap Frame.
+
+ la r.5, TF_BASE (r.sp)
+
+// The fourth argument (r.6) is the previous mode: 0 for kernel mode,
+// 1 for user mode. We have this value in bit WAS_USER_MODE of the CR.
+
+ mfcr r.6
+ rlwinm r.6, r.6, 32+WAS_USER_MODE-31, 1
+
+// The fifth argument (r.7) is the "first chance" flag.
+
+ li r.7, 1 // First Chance = TRUE
+
+// Call KiDispatchException(
+// &ExceptionRecord,
+// &Exception Frame,
+// &Trap Frame,
+// Previous Mode,
+// First Chance = TRUE)
+//
+ bl ..KiDispatchException
+
+// Load registers required by KiExceptionExit:
+// r.3 points to Exception Frame
+// r.4 points to Trap Frame
+
+ la r.3, EF_BASE (r.sp)
+ la r.4, TF_BASE (r.sp)
+
+ b ..KiExceptionExit
+
+// Fall thru ...
+
+ DUMMY_EXIT (KiExceptionDispatch)
+
+//--------------------------------------------------------------------------
+//
+// KiCommonFakeMillicode() -- This code is never executed. It is provided
+// to allow virtual unwind to restore register state
+// prior to an exception.
+//
+// This is fake register save millicode "called" during a fake prologue.
+// The reverse execution of the fake prologue establishes r.12 to point
+// to the Exception Frame.
+//
+// Arguments:
+//
+// r.12 -- address of Exception Frame to restore from.
+//
+// Return values:
+//
+// Only non-volatile registers are restored by the virtual unwinder.
+//
+//--------------------------------------------------------------------------
+
+ FN_TABLE(KiCommonFakeMillicode, 0, 1)
+
+ DUMMY_ENTRY(KiCommonFakeMillicode)
+
+ PROLOGUE_END(KiCommonFakeMillicode)
+
+ stfd f.14, ExFpr14 (r.12) // restore non-volatile FPRs
+ stfd f.15, ExFpr15 (r.12)
+ stfd f.16, ExFpr16 (r.12)
+ stfd f.17, ExFpr17 (r.12)
+ stfd f.18, ExFpr18 (r.12)
+ stfd f.19, ExFpr19 (r.12)
+ stfd f.20, ExFpr20 (r.12)
+ stfd f.21, ExFpr21 (r.12)
+ stfd f.22, ExFpr22 (r.12)
+ stfd f.23, ExFpr23 (r.12)
+ stfd f.24, ExFpr24 (r.12)
+ stfd f.25, ExFpr25 (r.12)
+ stfd f.26, ExFpr26 (r.12)
+ stfd f.27, ExFpr27 (r.12)
+ stfd f.28, ExFpr28 (r.12)
+ stfd f.29, ExFpr29 (r.12)
+ stfd f.30, ExFpr30 (r.12)
+ stfd f.31, ExFpr31 (r.12)
+
+ stw r.14, ExGpr14 (r.12) // restore non-volatile GPRs
+ stw r.15, ExGpr15 (r.12)
+ stw r.16, ExGpr16 (r.12)
+ stw r.17, ExGpr17 (r.12)
+ stw r.18, ExGpr18 (r.12)
+ stw r.19, ExGpr19 (r.12)
+ stw r.20, ExGpr20 (r.12)
+ stw r.21, ExGpr21 (r.12)
+ stw r.22, ExGpr22 (r.12)
+ stw r.23, ExGpr23 (r.12)
+ stw r.24, ExGpr24 (r.12)
+ stw r.25, ExGpr25 (r.12)
+ stw r.26, ExGpr26 (r.12)
+ stw r.27, ExGpr27 (r.12)
+ stw r.28, ExGpr28 (r.12)
+ stw r.29, ExGpr29 (r.12)
+ stw r.30, ExGpr30 (r.12)
+ stw r.31, ExGpr31 (r.12)
+ blr
+
+ DUMMY_EXIT(KiCommonFakeMillicode)
+
+//--------------------------------------------------------------------------
+//
+// KiExceptionExit() -- Control is transferred to this routine to exit
+// from an exception. The state contained in the
+// specified Trap Frame and Exception Frame is
+// reloaded, and execution is resumed.
+//
+// Note: This transfer of control occurs from
+//
+// 1. a fall thru from the above code
+// 2. an exit from the continue system service
+// 3. an exit from the raise exception system service
+// 4. an exit into user mode from thread startup.
+//
+// Arguments:
+//
+// r.1 -- a valid stack pointer
+// r.2 -- kernel's TOC pointer
+// r.3 -- address of Exception Frame
+// r.4 -- address of Trap Frame
+//
+// Return values:
+//
+// There is no return from this routine.
+//
+//--------------------------------------------------------------------------
+
+ FN_TABLE(KiExceptionExit_, 0, 0)
+
+ DUMMY_ENTRY(KiExceptionExit_)
+
+// The following is never executed, it is provided to allow virtual
+// unwind to restore register state prior to an exception occuring.
+
+ rfi // tell unwinder to update establisher
+ // frame address using sp
+
+ stw r.sp, TrGpr1 (r.sp) // Load r.1
+ stw r.12, TrGpr12 (r.sp) // Load r.12
+ bl ..KiCommonFakeMillicode // Restore the Exception Frame
+
+ stw r.0, TrGpr0 (r.sp)
+ mflr r.0 // Sets only Lr
+ stw r.0, TrLr (r.sp)
+ mflr r.0 // Sets Iar and Lr
+ stw r.0, TrIar (r.sp)
+ mfcr r.0
+ stw r.0, TrCr (r.sp)
+
+ stw r.2, TrGpr2 (r.sp)
+ stw r.3, TrGpr3 (r.sp)
+ stw r.4, TrGpr4 (r.sp)
+ stw r.5, TrGpr5 (r.sp)
+ stw r.6, TrGpr6 (r.sp)
+ stw r.7, TrGpr7 (r.sp)
+ stw r.8, TrGpr8 (r.sp)
+ stw r.9, TrGpr9 (r.sp)
+ stw r.10, TrGpr10 (r.sp)
+ stw r.11, TrGpr11 (r.sp)
+
+ mfctr r.6 // Fixed Point Exception
+ mfxer r.7 // registers
+
+ stfd f.0, TrFpr0 (r.sp) // save volatile FPRs
+ stfd f.1, TrFpr1 (r.sp)
+ stfd f.2, TrFpr2 (r.sp)
+ stfd f.3, TrFpr3 (r.sp)
+ stfd f.4, TrFpr4 (r.sp)
+ stfd f.5, TrFpr5 (r.sp)
+ stfd f.6, TrFpr6 (r.sp)
+ stfd f.7, TrFpr7 (r.sp)
+ stfd f.8, TrFpr8 (r.sp)
+ stfd f.9, TrFpr9 (r.sp)
+ stfd f.10, TrFpr10 (r.sp)
+ stfd f.11, TrFpr11 (r.sp)
+ stfd f.12, TrFpr12 (r.sp)
+ stfd f.13, TrFpr13 (r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+
+ stw r.6, TrCtr (r.sp) // Count,
+ stw r.7, TrXer (r.sp) // Fixed Point Exception,
+ stfd f.0, TrFpscr (r.sp) // and FPSCR registers.
+
+ stw r.sp, 12 (r.sp) // Load the Trap Frame
+ stw r.12, 4 (r.sp) // Load the Exception Frame
+
+ PROLOGUE_END(KiExceptionExit_)
+
+ .align 6 // cache line align
+
+ ALTERNATE_ENTRY(KiExceptionExit)
+
+ stw r.3, 4(r.sp) // store r.3 + r.4 for use
+ stw r.4, 12(r.sp) // by virtual unwinder
+
+ lbz r.8, KiPcr+PcCurrentIrql(r.0) // check if an APC could be
+ cmplwi r.8, APC_LEVEL // delivered now.
+ bge ee.apc_skip
+ee.apc_recheck:
+ lwz r.15, TrMsr(r.4) // load saved MSR value
+ lbz r.7, KiPcr+PcApcInterrupt(r.0)
+ extrwi r.15, r.15, 1, MSR_PR // extract problem state bit
+ or. r.6, r.7, r.15 // user mode || intr pending
+ beq ee.apc_skip // jif neither
+ addic. r.7, r.7, -1
+ beq ee.apc_intr // apc interrupt
+
+// no interrupt pending but going to user mode, check for user mode apc
+// pending.
+
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get address of current thread
+ li r.7, 0
+ stb r.7, ThAlerted(r.6) // clear kernel mode alerted
+ lbz r.6, ThApcState+AsUserApcPending(r.6)
+ cmplwi r.6, 0
+ beq ee.apc_skip // jif none pending
+ b ee.apc_deliver
+
+ee.apc_intr:
+ stb r.7, KiPcr+PcApcInterrupt(r.0)
+ee.apc_deliver:
+ lwz r.6, KiPcr+PcPrcb(r.0) // get address of PRCB
+ ori r.16, r.3, 0 // save incoming Exception
+ ori r.14, r.4, 0 // and Trap Frame addreses
+ lwz r.7, PbApcBypassCount(r.6) // get APC bypass count
+
+ li r.5, APC_LEVEL // raise Irql to APC_LEVEL
+ stb r.5, KiPcr+PcCurrentIrql(r.0)
+
+// Call to KiDeliverApc requires three parameters:
+// r.3 Previous Mode
+// r.4 addr of Exception Frame
+// r.5 addr of Trap Frame
+
+ addi r.7, r.7, 1 // increment APC bypass count
+ ori r.5, r.4, 0 // trap frame addr to r.5
+ ori r.4, r.3, 0 // exception frame addr to r.4
+ ori r.3, r.15, 0 // previous mode
+ stw r.7, PbApcBypassCount(r.6) // store new APC bypass count
+ bl ..KiDeliverApc // process pending apc
+
+ li r.5, 0 // lower Irql to < APC_LEVEL
+ stb r.5, KiPcr+PcCurrentIrql(r.0)
+ ori r.3, r.16, 0 // restore saved frame
+ ori r.4, r.14, 0 // pointers
+ b ee.apc_recheck // check again
+
+ee.apc_skip:
+
+// Restore state from Exception Frame
+
+ lfd f.14, ExFpr14 (r.3) // restore non-volatile FPRs
+ lfd f.15, ExFpr15 (r.3)
+ lfd f.16, ExFpr16 (r.3)
+ lfd f.17, ExFpr17 (r.3)
+ lfd f.18, ExFpr18 (r.3)
+ lfd f.19, ExFpr19 (r.3)
+ lfd f.20, ExFpr20 (r.3)
+ lfd f.21, ExFpr21 (r.3)
+ lfd f.22, ExFpr22 (r.3)
+ lfd f.23, ExFpr23 (r.3)
+ lfd f.24, ExFpr24 (r.3)
+ lfd f.25, ExFpr25 (r.3)
+ lfd f.26, ExFpr26 (r.3)
+ lfd f.27, ExFpr27 (r.3)
+ lfd f.28, ExFpr28 (r.3)
+ lfd f.29, ExFpr29 (r.3)
+ lfd f.30, ExFpr30 (r.3)
+ lfd f.31, ExFpr31 (r.3)
+
+ lwz r.14, ExGpr14 (r.3) // restore non-volatile GPRs
+ lwz r.15, ExGpr15 (r.3)
+ lwz r.16, ExGpr16 (r.3)
+ lwz r.17, ExGpr17 (r.3)
+ lwz r.18, ExGpr18 (r.3)
+ lwz r.19, ExGpr19 (r.3)
+ lwz r.20, ExGpr20 (r.3)
+ lwz r.21, ExGpr21 (r.3)
+ lwz r.22, ExGpr22 (r.3)
+ lwz r.23, ExGpr23 (r.3)
+ lwz r.24, ExGpr24 (r.3)
+ lwz r.25, ExGpr25 (r.3)
+ lwz r.26, ExGpr26 (r.3)
+ lwz r.27, ExGpr27 (r.3)
+ lwz r.28, ExGpr28 (r.3)
+ lwz r.29, ExGpr29 (r.3)
+ lwz r.30, ExGpr30 (r.3)
+ lwz r.31, ExGpr31 (r.3)
+
+ ori r.3, r.4, 0 // now r.3 points to Trap Frame
+
+ b ae.restore2 // we already checked for apcs
+
+ DUMMY_EXIT(KiExceptionExit_)
+
+// ----------------------------------------------------------------------
+//
+// Entry here is only from other routines in real0.s
+// On entry, r.1 (r.sp) points to stack frame containing Trap Frame
+// and space for Exception Frame.
+// Non-volatile state is in regs, not in Exception Frame.
+// Trap Frame and Exception Frame are addressed via stack pointer.
+//
+
+ FN_TABLE(KiAlternateExit_, 0, 0)
+
+ DUMMY_ENTRY(KiAlternateExit_)
+
+// The following is never executed, it is provided to allow virtual
+// unwind to restore register state prior to an exception occuring.
+
+ rfi // tell unwinder to update establisher
+ // frame address using sp
+
+ stw r.sp, TrGpr1 + TF_BASE (r.sp) // Load r.1
+ stw r.0, TrGpr0 + TF_BASE (r.sp)
+ mflr r.0 // Sets only Lr
+ stw r.0, TrLr + TF_BASE (r.sp)
+ mflr r.0 // Sets Iar and Lr
+ stw r.0, TrIar + TF_BASE (r.sp)
+ mfcr r.0
+ stw r.0, TrCr + TF_BASE (r.sp)
+
+ stw r.2, TrGpr2 + TF_BASE (r.sp)
+ stw r.3, TrGpr3 + TF_BASE (r.sp)
+ stw r.4, TrGpr4 + TF_BASE (r.sp)
+ stw r.5, TrGpr5 + TF_BASE (r.sp)
+ stw r.6, TrGpr6 + TF_BASE (r.sp)
+ stw r.7, TrGpr7 + TF_BASE (r.sp)
+ stw r.8, TrGpr8 + TF_BASE (r.sp)
+ stw r.9, TrGpr9 + TF_BASE (r.sp)
+ stw r.10, TrGpr10 + TF_BASE (r.sp)
+ stw r.11, TrGpr11 + TF_BASE (r.sp)
+ stw r.12, TrGpr12 + TF_BASE (r.sp)
+
+ mfctr r.6 // Fixed Point Exception
+ mfxer r.7 // registers
+
+ stfd f.0, TrFpr0 + TF_BASE (r.sp) // save volatile FPRs
+ stfd f.1, TrFpr1 + TF_BASE (r.sp)
+ stfd f.2, TrFpr2 + TF_BASE (r.sp)
+ stfd f.3, TrFpr3 + TF_BASE (r.sp)
+ stfd f.4, TrFpr4 + TF_BASE (r.sp)
+ stfd f.5, TrFpr5 + TF_BASE (r.sp)
+ stfd f.6, TrFpr6 + TF_BASE (r.sp)
+ stfd f.7, TrFpr7 + TF_BASE (r.sp)
+ stfd f.8, TrFpr8 + TF_BASE (r.sp)
+ stfd f.9, TrFpr9 + TF_BASE (r.sp)
+ stfd f.10, TrFpr10 + TF_BASE (r.sp)
+ stfd f.11, TrFpr11 + TF_BASE (r.sp)
+ stfd f.12, TrFpr12 + TF_BASE (r.sp)
+ stfd f.13, TrFpr13 + TF_BASE (r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+
+ stw r.6, TrCtr + TF_BASE (r.sp) // Count,
+ stw r.7, TrXer + TF_BASE (r.sp) // Fixed Point Exception,
+ stfd f.0, TrFpscr + TF_BASE (r.sp) // and FPSCR registers.
+
+ PROLOGUE_END(KiAlternateExit_)
+
+ .align 6 // cache line align
+
+KiAlternateExit:
+
+ lbz r.8, KiPcr+PcCurrentIrql(r.0) // level which could be
+ cmplwi r.8, APC_LEVEL // delivered now.
+ bge ae.restore
+ lwz r.8, TF_BASE+TrMsr(r.sp) // load saved MSR value
+ lbz r.7, KiPcr+PcApcInterrupt(r.0)
+ extrwi r.8, r.8, 1, MSR_PR // extract problem state bit
+ or. r.6, r.7, r.8 // user mode || intr pending
+ beq ae.restore // jif neither
+ addic. r.7, r.7, -1
+ beq ae.apc_intr // apc interrupt
+
+// no interrupt pending but going to user mode, check for user mode apc
+// pending.
+
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get address of current thread
+ li r.7, 0
+ stb r.7, ThAlerted(r.6) // clear kernel mode alerted
+ lbz r.6, ThApcState+AsUserApcPending(r.6)
+ cmplwi r.6, 0
+ beq ae.restore // jif none pending
+ b ae.apc_deliver
+
+ae.apc_intr:
+ cmplwi r.8, 0 // check previous mode
+ stb r.7, KiPcr+PcApcInterrupt(r.0) // clear pending intr flag
+ beq ae.apc_kernel // if previous mode == kernel
+ae.apc_deliver:
+
+// Call KiDeliverApc() for pending APC, previous mode == user. Before doing
+// so, we must store the non-volatile state into the Exception Frame, for
+// KiDeliverApc() takes Trap Frame and Exception Frame as input.
+
+ la r.4, EF_BASE (r.sp) // addr of Exception Frame
+ stw r.13, ExGpr13 (r.4) // store non-volatile GPRs
+ stw r.14, ExGpr14 (r.4)
+ stw r.15, ExGpr15 (r.4)
+ stw r.16, ExGpr16 (r.4)
+ stw r.17, ExGpr17 (r.4)
+ stw r.18, ExGpr18 (r.4)
+ stw r.19, ExGpr19 (r.4)
+ stw r.20, ExGpr20 (r.4)
+ stw r.21, ExGpr21 (r.4)
+ stw r.22, ExGpr22 (r.4)
+ stw r.23, ExGpr23 (r.4)
+ stw r.24, ExGpr24 (r.4)
+ stw r.25, ExGpr25 (r.4)
+ stw r.26, ExGpr26 (r.4)
+ stw r.27, ExGpr27 (r.4)
+ stw r.28, ExGpr28 (r.4)
+ stw r.29, ExGpr29 (r.4)
+ stw r.30, ExGpr30 (r.4)
+ stw r.31, ExGpr31 (r.4)
+
+ stfd f.14, ExFpr14 (r.4) // save non-volatile FPRs
+ stfd f.15, ExFpr15 (r.4)
+ stfd f.16, ExFpr16 (r.4)
+ stfd f.17, ExFpr17 (r.4)
+ stfd f.18, ExFpr18 (r.4)
+ stfd f.19, ExFpr19 (r.4)
+ stfd f.20, ExFpr20 (r.4)
+ stfd f.21, ExFpr21 (r.4)
+ stfd f.22, ExFpr22 (r.4)
+ stfd f.23, ExFpr23 (r.4)
+ stfd f.24, ExFpr24 (r.4)
+ stfd f.25, ExFpr25 (r.4)
+ stfd f.26, ExFpr26 (r.4)
+ stfd f.27, ExFpr27 (r.4)
+ stfd f.28, ExFpr28 (r.4)
+ stfd f.29, ExFpr29 (r.4)
+ stfd f.30, ExFpr30 (r.4)
+ stfd f.31, ExFpr31 (r.4)
+
+// The call to KiDeliverApc requires three parameters:
+// r.3 Previous Mode
+// r.4 addr of Exception Frame
+// r.5 addr of Trap Frame
+
+ae.apc_kernel:
+ li r.3, APC_LEVEL // raise Irql
+ stb r.3, KiPcr+PcCurrentIrql(r.0)
+ae.apc_again:
+ lwz r.6, KiPcr+PcPrcb(r.0) // get address of PRCB
+ la r.5, TF_BASE (r.sp) // r.5 = addr of trap frame
+ lwz r.3, TrMsr(r.5) // load saved MSR value
+ la r.4, EF_BASE (r.sp) // r.4 = addr of except. frame
+ lwz r.7, PbApcBypassCount(r.6) // get APC bypass count
+ extrwi r.3, r.3, 1, MSR_PR // r.3 = previous mode
+ addi r.7, r.7, 1 // increment APC bypass count
+ stw r.7, PbApcBypassCount(r.6) // store new APC bypass count
+ bl ..KiDeliverApc // process pending apc
+
+ lbz r.7, KiPcr+PcApcInterrupt(r.0)
+ addic. r.7, r.7, -1
+ bne ae.apc_done // none pending, continue
+ stb r.7, KiPcr+PcApcInterrupt(r.0)
+ b ae.apc_again
+
+ae.apc_done:
+ li r.3, 0 // lower Irql < APC_LEVEL
+ stb r.3, KiPcr+PcCurrentIrql(r.0)
+
+ae.restore:
+ la r.3, TF_BASE (r.sp) // addr of Trap Frame
+
+ae.restore2:
+
+// Restore state from Trap Frame and control information
+
+ DUMMY_EXIT(KiAlternateExit_)
+
+ lfd f.13, TrFpscr (r.3) // get FP status
+ lfd f.0, TrFpr0 (r.3) // restore volatile FPRs
+ lfd f.1, TrFpr1 (r.3)
+ lfd f.2, TrFpr2 (r.3)
+ lfd f.3, TrFpr3 (r.3)
+ lfd f.4, TrFpr4 (r.3)
+ lfd f.5, TrFpr5 (r.3)
+ lfd f.6, TrFpr6 (r.3)
+ lfd f.7, TrFpr7 (r.3)
+ lfd f.8, TrFpr8 (r.3)
+ lfd f.9, TrFpr9 (r.3)
+ lfd f.10, TrFpr10 (r.3)
+ mtfsf 0xff, f.13 // move FP status to FPSCR
+ lfd f.11, TrFpr11 (r.3)
+ lfd f.12, TrFpr12 (r.3)
+ lfd f.13, TrFpr13 (r.3) // restore f.13
+
+KiPriorityExit:
+
+ mfmsr r.10 // get current MSR
+ lwz r.9, TrCtr (r.3) // get XER, LR, CTR
+ lwz r.8, TrLr (r.3)
+ lwz r.7, TrXer (r.3)
+ lwz r.5, TrMsr (r.3) // get resume MSR
+ rlwinm r.10, r.10, 0, ~INT_ENA // turn off EE bit
+ lwz r.4, TrGpr4 (r.3) // get GPRs 4, 5 and 6 and
+ lwz r.0, TrGpr5 (r.3) // save them in the PCR
+ lwz r.6, TrGpr6 (r.3)
+ li r.11, TrMsr // offset to MSR in TF
+ lwz r.2, TrGpr2 (r.3)
+ mtctr r.9 // restore XER, LR, CTR
+ mtlr r.8
+ lwz r.9, TrGpr9 (r.3)
+ mtxer r.7
+ lwz r.8, TrGpr8 (r.3)
+ lwz r.7, TrGpr7 (r.3)
+
+//
+// WARNING: Cannot tolerate page fault or interrupt. TLB/HPT miss ok.
+//
+
+ mtmsr r.10 // disable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ stwcx. r.5, r.11, r.3 // clear outstanding reserves.
+ lwz r.1, TrGpr1 (r.3) // restore stack pointer
+ extrwi. r.11, r.5, 1, MSR_PR // test resume PR bit
+
+ stw r.4, KiPcr+PCR_SAVE4 (r.0) // save r.4, 5, 6 in PCR
+ stw r.0, KiPcr+PCR_SAVE5 (r.0)
+ stw r.6, KiPcr+PCR_SAVE6 (r.0)
+ lwz r.6, TrCr (r.3) // get resume CR
+ lwz r.4, TrIar (r.3) // get resume IAR
+
+ lwz r.12, TrGpr12 (r.3)
+
+//
+// This should not be needed,.... but it is.
+// Why? plj 6/1/95.
+//
+
+ lwz r.13, KiPcr+PcTeb (r.0)
+
+ bne kee.usermode // jif resuming user mode
+
+ lwz r.11, TrGpr11 (r.3)
+ lwz r.0, TrGpr0 (r.3)
+ mtcrf 0xff, r.6 // restore CR
+
+ lwz r.10, TrGpr10 (r.3)
+ lwz r.3, TrGpr3 (r.3)
+
+//
+// WARNING: Cannot tolerate TLB/HPT miss from mtsrr0 thru rfi. On an MP
+// system, the TLB/HPT could be flushed by another processor, so
+// we use the BAT0 address of the PCR.
+//
+
+ mfsprg r.6, sprg.1 // get BAT0 addr of PCR
+KiPriorityExitRfiJump1:
+ b $+(KiPriorityExitRfi-Kseg0CodeStart)
+
+kee.usermode:
+
+ lwz r.4, KiPcr+PcCurrentThread(r.0) // get address of current thread
+ lbz r.0, ThDebugActive(r.4) // Debug only but has to be here
+
+ cmpwi cr.1, r.0, 0 // Hardware debug register set?
+ bne- cr.1, ke.debug // jif debug registers set
+
+ke.09:
+ lwz r.4, TrIar (r.3) // get resume IAR (again)
+ lwz r.10, TrGpr10 (r.3)
+ lwz r.11, TrGpr11 (r.3)
+ lwz r.0, TrGpr0 (r.3)
+ mtcrf 0xff,r.6 // restore CR
+
+ lwz r.3, TrGpr3 (r.3)
+
+//
+// WARNING: The following removes access to the system paged pool
+// address space. The kernel stack is no longer addressable.
+//
+
+ lis r.6, SREG_INVAL // invalidate segment registers
+
+ mtsr 9, r.6 // 9, 10, 12, 13
+ mtsr 10, r.6
+ mtsr 12, r.6
+ mtsr 13, r.6
+
+//
+// WARNING: Cannot tolerate TLB/HPT miss from mtsrr0 thru rfi. On an MP
+// system, the TLB/HPT could be flushed by another processor, so
+// we use the BAT0 address of the PCR.
+//
+
+ mfsprg r.6, sprg.1 // get BAT0 addr of PCR
+KiPriorityExitRfiJump2:
+ b $+(KiPriorityExitRfi-Kseg0CodeStart)
+
+//
+// The following code is out of line for efficiency. It is only
+// executed when we are resuming to user mode in a thread that has
+// h/w breakpoints set.
+//
+// Registers 0, 4, 10 and 11 are available.
+//
+
+ke.debug:
+
+ lwz r.4, TrDr1 (r.3) // Get kernel DABR
+ lwz r.11, TrDr7 (r.3)
+ lwz r.10, TrDr0 (r.3) // Get kernel IABR
+ rlwinm r.4, r.4, 0, 0xfffffff8 // Sanitize DABR (Dr1)
+ ori r.10, r.10, 0x3 // Sanitize IABR (Dr0) 604
+ ori r.4, r.4, 0x4 // Sanitize DABR 604
+//
+// WARNING: Don't rearrange this branch table. The first branch is overlayed
+// with the correct branch instruction (modified) based on the processor
+// during system initialization. The correct order is 601, 603, 604, skip.
+//
+BranchDr2:
+ b ke.601 // 601
+ b ke.603 // 603
+ b ke.604 // 604/613
+ b ke.09 // unknown
+
+ke.601: // 601 SPECIFIC
+ lis r.0, 0x6080 // Full cmp., trace mode except.
+ rlwinm r.10, r.10, 0, 0xfffffffc // Sanitize IABR (Dr0)
+ rlwinm r.4, r.4, 0, 0xfffffff8 // Sanitize DABR (Dr0) undo 604
+ mtspr hid1, r.0
+
+ke.604:
+ rlwinm. r.0, r.11, 0, 0x0000000c // LE1/GE1 set?
+ beq kedr1.1 // jiff Dr1 not set
+ rlwimi r.4, r.11, 13, 30, 30 // Interchange R/W1 bits
+ rlwimi r.4, r.11, 11, 31, 31
+ mtspr dabr, r.4
+
+kedr1.1:
+ rlwinm. r.11, r.11, 0, 0x00000003 // LE0/GE0 set?
+ beq ke.09
+ mtspr iabr, r.10
+ isync
+ b ke.09
+
+ke.603: // 603 SPECIFIC
+ rlwinm r.10, r.10, 0, 0xfffffffc // Sanitize IABR
+ ori r.10, r.10, 0x2
+ mtspr iabr, r.10
+ b ke.09
+
+//-----------------------------------------------------------------
+//
+// System Call interrupt -- system_service_dispatch
+//
+// This is the kernel entry point for System Service calls.
+//
+// The processor is running with instruction and data relocation
+// enabled when control reaches here.
+//
+// Invocation of a "system service" routine
+//
+// Calls to the system service "stubs" (Zw<whatever>, Nt<whatever>) are
+// always call-thru-function-descriptor, like this:
+//
+// Calling procedure:
+// get addr of descriptor
+// save TOC pointer, if not already saved
+// load entry point addr from TOC, (gets addr of ..ZwSysCallInstr)
+// move to LR
+// load callee's TOC addr from TOC (gets system service code)
+// branch-and-link via LR
+//
+// ..ZwSysCallInstr:
+// <system call> instr
+// return
+//
+// The function descriptors for the system services are specially built.
+// All of them point to the same entry point in the first word,
+// ..ZwSysCallInstr. Instead of a TOC address, the system call code is
+// in the second word.
+//
+//
+// on Entry:
+// MSR: External interrupts disabled
+// Instruction Relocate ON
+// Data Relocate ON
+// GP registers:
+// r.0: Address of entry point
+// r.2: System Service number
+// r.3: thru r.10 system call paramaters
+// r.12: Previous mode (saved srr1)
+//
+// cr.0 eq set if previous mode was kernel
+//
+// Available registers:
+// r.0
+//
+// All other registers still have their contents as of the time
+// of interrupt
+//
+// Our stack frame header must contain space for 16 words of arguments, the
+// maximum that can be specified on a system call. Stack frame header struct
+// defines space for 8 such words.
+//
+// We'll build a structure on the stack like this:
+//
+// low addr | |
+// | |
+// / |--------------------| <-r.1 at point we call
+// | | Stack Frame Header | KiDispatchException
+// | | (back chain, misc. |
+// | | stuff, 16 words of |
+// | | parameter space) |
+// / |--------------------|
+// | Trap Frame |
+// STACK_DELTA | (volatile state) |
+// | <------ includes ExceptionRecord, imbedded within
+// \ |--------------------|
+// | | Exception Frame | Exception frame only if previous
+// | | (non-volatile | mode == User mode
+// | | state) |
+// | | |
+// \ |--------------------| <-r.1 at point of interrupt, if interrupted
+// | | kernel code, or base of kernel stack if
+// high addr | | interrupted user code
+
+//
+//--------------------------------------------------------------------------
+// The following is never executed, it is provided to allow virtual
+// unwind to restore register state prior to an exception occuring.
+// This is a common prologue for the various exception handlers.
+
+ FN_TABLE(KiSystemServiceDispatch,0,0)
+
+ DUMMY_ENTRY(KiSystemServiceDispatch)
+
+ stwu r.sp, -STACK_DELTA (r.sp)
+ mflr r.0
+ stw r.0, TrLr + TF_BASE (r.sp)
+
+ PROLOGUE_END(KiSystemServiceDispatch)
+
+ .align 6 // ensure the following is
+ // cache block aligned (for
+ // performance) (cache line
+ // for 601)
+system_service_dispatch:
+
+//
+// We need another register, trash r.13 which contains the Teb pointer
+// and reload it later. (r.13 might not be TEB if was kernel mode).
+//
+
+ lwz r.11, KiPcr+PcInitialStack(r.0) // kernel stack addr for thread
+ lwz r.13, KiPcr+PcCurrentThread(r.0)// get current thread addr
+ beq ssd.20 // branch if was in kernel state
+
+//
+// Previous state was user mode
+//
+// Segment registers 9, 10, 12, and 13 need to be setup for kernel mode.
+// In user mode they are set to zero as no access is allowed to these
+// segments, and there are no combinations of Ks Kp and PP that allow
+// kernel both read-only and read/write pages that are user no-access.
+//
+
+ mfsr r.0, 0 // get PID from SR0
+
+ ori r.0, r.0, 12 // T=0 Ks,Kp=0 VSID=pgdir,12
+ mtsr 12, r.0
+ li r.0, 9 // T=0 Ks,Kp=0 VSID=9
+ mtsr 9, r.0
+ li r.0, 10 // T=0 Ks,Kp=0 VSID=10
+ mtsr 10, r.0
+ li r.0, 13 // T=0 Ks,Kp=0 VSID=13
+ mtsr 13, r.0
+ isync // context synchronize
+
+//
+// Allocate stack frame and save old stack pointer and other volatile
+// registers in trap frame (needed if user mode APC needs to be run
+// on exit).
+//
+
+ lbz r.0, ThDebugActive(r.13) // get h/w debug flag
+ stw r.sp, TrGpr1 + TF_BASE - USER_SYS_CALL_FRAME(r.11)
+ stw r.sp, CrBackChain - USER_SYS_CALL_FRAME(r.11)
+ stw r.2, TrGpr2 + TF_BASE - USER_SYS_CALL_FRAME(r.11)
+ subi r.sp, r.11, USER_SYS_CALL_FRAME
+ stw r.3, TrGpr3 + TF_BASE - USER_SYS_CALL_FRAME(r.11)
+ stw r.4, TrGpr4 + TF_BASE(r.sp)
+ stw r.5, TrGpr5 + TF_BASE(r.sp)
+ stw r.6, TrGpr6 + TF_BASE(r.sp)
+ cmpwi cr.1, r.0, 0 // Hardware debug register set?
+ stw r.7, TrGpr7 + TF_BASE(r.sp)
+ stw r.8, TrGpr8 + TF_BASE(r.sp)
+ stw r.9, TrGpr9 + TF_BASE(r.sp)
+ stw r.10, TrGpr10 + TF_BASE(r.sp)
+
+ bne- cr.1, ssd.dbg_regs // jif debug registers set
+ b ssd.30 // join common code
+
+//
+// Processor was in supervisor state. We'll add our stack frame to the stack
+// whose address is still in r.1 from the point of interrupt.
+//
+// Test stack (r.sp) for 8 byte alignment, overflow or underflow.
+//
+
+ssd.20:
+
+ andi. r.0, r.sp, 7 // 8-byte align into cr.0
+ lwz r.0, KiPcr+PcStackLimit(r.0) // get current stack limit
+ cmplw cr.1, r.sp, r.11 // underflow, into cr.1
+ subi r.11, r.sp, KERN_SYS_CALL_FRAME // allocate stack frame; ptr into r.11
+ cmplw cr.5, r.11, r.0 // test for overflow, into cr.2
+ bgt- cr.1, ssd.stk_err // jif stack has underflowed
+ bne- cr.0, ssd.stk_err // jif stack is misaligned
+ blt- cr.5, ssd.stk_err // jif stack has overflowed
+
+//
+// Stack looks ok, use it. First, save the old stack pointer in the
+// back chain. Also, we need an additional scratch register, save r.10
+// now (already done in user mode case).
+//
+
+ stw r.sp, CrBackChain(r.11) // save old stack pointer
+ stw r.10,TrGpr10+TF_BASE(r.11) // save r.10
+ ori r.sp, r.11, 0 // set new stack pointer
+
+//
+// The following code is common to both user mode and kernel mode entry.
+// Stack address in r.sp, Save Trap Frame volatile registers.
+//
+
+ssd.30:
+
+ mflr r.0 // get return address
+ mffs f.0 // get FPSCR
+ lbz r.11, KiPcr+PcCurrentIrql(r.0) // get old (current) irql
+ stw r.12, TrMsr + TF_BASE(r.sp) // save SRR1 (MSR)
+ stw r.0, TrIar + TF_BASE(r.sp) // save return addr in TrapFrame
+ stw r.0, TrLr + TF_BASE(r.sp) // save Link register
+ stfd f.0, TrFpscr + TF_BASE(r.sp) // save FPSCR
+ stb r.11, TrOldIrql + TF_BASE(r.sp) // save current Irql in tf
+
+//
+// Use the service code as an index into the thread's service table, and call
+// the routine indicated.
+//
+// At this point-
+//
+// r.0 (scratch)
+// r.1 contains the current stack pointer.
+// r.2 contains the service code (still).
+// r.3 - r.10 are untouched (r.10 saved in trap frame)
+// r.11 is available
+// r.12 contains the old MSR value
+// r.13 contains the current thread address
+// r.14 - r.31 are untouched
+//
+// Warning, don't enable interrupts until you (at least) don't care what's
+// in r.13
+//
+
+ lbz r.0, ThPreviousMode(r.13) // get old previous mode
+ lwz r.11, ThTrapFrame(r.13) // get current trap frame address
+ extrwi r.12, r.12, 1, MSR_PR // extract user mode bit
+ stb r.12, ThPreviousMode(r.13) // set new previous mode
+ stb r.0, TrPreviousMode+TF_BASE(r.sp) // save old previous mode
+ stw r.11, TrTrapFrame+TF_BASE(r.sp) // save current trap frame address
+
+ la r.0, TF_BASE(r.sp) // get trap frame address
+ lwz r.12, ThServiceTable(r.13) // get service descriptor table address
+ stw r.0, ThTrapFrame(r.13) // store trap frame address
+
+#if DBG
+ lbz r.0, ThKernelApcDisable(r.13) // get current APC disable count
+ stb r.0, TrSavedKernelApcDisable+TF_BASE(r.sp) // save APC disable count
+ lbz r.13, ThApcStateIndex(r.13) // get current APC state index
+ stb r.13, TrSavedApcStateIndex+TF_BASE(r.sp) // save APC state index
+#endif
+
+ mfmsr r.0 // fetch the current MSR
+ lwz r.13, KiPcr+PcTeb(r.0) // restore Teb (ok enable ints)
+ ori r.0, r.0, INT_ENA // enable interrupts
+ mtmsr r.0 // external interrupts enabled
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+
+SystemServiceRepeat:
+
+ rlwinm r.10, r.2, 32-SERVICE_TABLE_SHIFT, SERVICE_TABLE_MASK
+ add r.12, r.12, r.10 // compute service descriptor address
+ cmpwi cr.7, r.10, SERVICE_TABLE_TEST // is this a GUI service?
+ lwz r.11, SdLimit(r.12) // get service number limit
+ rlwinm r.0, r.2, 0, SERVICE_NUMBER_MASK // isolate service table offset
+ cmplw r.0, r.11 // check service number against limit
+ bge ssd.convert_to_gui // jump if service number too high
+
+ slwi r.0, r.0, 2 // compute system service offset value
+
+#if DBG
+ lwz r.11, SdCount(r.12) // get service count table address
+ cmpwi r.11, 0 // does table exist?
+ beq ssd.100 // if not, skip update
+ lwzux r.2, r.11, r.0 // get count and addr of count
+ addi r.2, r.2, 1 // increment service count
+ stw r.2, 0(r.11) // store new count
+ssd.100:
+#endif
+
+ lwz r.11, SdBase(r.12) // get service table address
+ lwz r.2, -4(r.11) // get toc for this service table
+ lwzx r.11, r.11, r.0 // get address of service routine
+
+//
+// If the system service is a GUI service and the GDI user batch queue is
+// not empty, then call the appropriate service to flush the user batch.
+//
+
+ bne cr.7, ssd.115 // if ne, not GUI system service
+
+ lwz r.10, TeGdiBatchCount(r.13) // get number of batched GDI calls
+ stw r.11, TrGpr11+TF_BASE(r.sp) // save service routine address
+ cmpwi r.10, 0 // check number of batched GDI calls
+
+ stw r.3, TrGpr3+TF_BASE(r.sp) // save arguments (r10 already saved)
+ beq ssd.115 // if eq, no batched calls
+ bl ssd.113 // get a base address to use to load toc
+ssd.113:
+ stw r.2, TrGpr2+TF_BASE(r.sp) // save service table TOC
+ mflr r.2 // get &ssd.113
+ stw r.4, TrGpr4+TF_BASE(r.sp)
+ lwz r.2, toc_pointer-ssd.113(r.2) // load toc address
+ stw r.5, TrGpr5+TF_BASE(r.sp)
+ lwz r.2, [toc]KeGdiFlushUserBatch(r.2) // get address of flush routine
+ stw r.6, TrGpr6+TF_BASE(r.sp)
+ lwz r.2, 0(r.2) // get address of descriptor
+ stw r.7, TrGpr7+TF_BASE(r.sp)
+ lwz r.3, 0(r.2) // get address of flush routine
+ stw r.8, TrGpr8+TF_BASE(r.sp)
+ lwz r.2, 4(r.2) // get TOC for flush routine
+ mtctr r.3
+ stw r.9, TrGpr9+TF_BASE(r.sp)
+ stw r.0, TrGpr0+TF_BASE(r.sp) // save locals in r0 and r12
+ stw r.12, TrGpr12+TF_BASE(r.sp)
+ bctrl // call GDI user batch flush routine
+
+ lwz r.11,TrGpr11+TF_BASE(r.sp) // restore service routine address
+
+ lwz r.3,TrGpr3+TF_BASE(r.sp) // restore arguments (except r10)
+ lwz r.4,TrGpr4+TF_BASE(r.sp)
+ lwz r.5,TrGpr5+TF_BASE(r.sp)
+ lwz r.6,TrGpr6+TF_BASE(r.sp)
+ lwz r.7,TrGpr7+TF_BASE(r.sp)
+ lwz r.8,TrGpr8+TF_BASE(r.sp)
+ lwz r.9,TrGpr9+TF_BASE(r.sp)
+
+ lwz r.0,TrGpr0+TF_BASE(r.sp) // restore locals in r0 and r12
+ lwz r.12,TrGpr12+TF_BASE(r.sp)
+
+ lwz r.2,TrGpr2+TF_BASE(r.sp) // restore service table TOC
+
+ssd.115:
+
+//
+// Low-order bit of service table entry indicates presence of in-memory
+// arguments. Up to 8 args are passed in GPRs; any additional are passed
+// in memory in the caller's stack frame.
+//
+// note: we put the entry in the link register anyway, the bottom two bits
+// are ignored as a branch address.
+//
+
+ mtlr r.11 // set service routine address
+ andi. r.11, r.11, 1 // low-order bit set?
+ lwz r.10,TrGpr10+TF_BASE(r.sp) // restore r10
+ beq+ ssd.150 // jif no in-memory arguments
+
+//
+// Capture arguments passed in memory in caller's stack frame, to ensure that
+// caller does not modify them after they are probed and, in kernel mode,
+// because a trap frame has been allocated on the stack.
+//
+// For PowerPC, space for all the passed arguments is allocated in the caller's
+// stack frame. The first 8 words are actually passed in GPRs, but space is
+// allocated for them in case the called routine needs to take the address of
+// any of the arguments. Arguments past the 8th word are passed in storage.
+//
+// The "in-memory arguments" flag means that at least 9 words of parameters
+// are passed on this call, the first 8 being in GPRs 3 through 10. Since
+// we will call the system call target using our own stack frame, we must
+// copy our caller's in-memory arguments into our frame so that our callee
+// can find them.
+//
+// It is thought that the loop below, using 0-cycle branch-on-count, will be
+// faster on average than a straight branch-free copy of 8 words, but only
+// time will tell.
+
+ srwi r.0, r.0, 2 // compute argument count offset value
+ lwz r.11, SdNumber(r.12) // get pointer to argument count table
+ lwz r.12, 0(r.sp) // load caller's stack pointer
+ lbzx r.0, r.11, r.0 // load count of bytes to copy
+ addi r.11, r.sp, CrParameter7 // point to target, less 1 word
+ srwi r.0, r.0, 2 // compute count of words to copy
+ mtctr r.0 // move count to CTR
+ addi r.12, r.12, CrParameter7 // point to source, less 1 word
+ssd.120:
+ lwzu r.0, 4(r.12) // copy one word from source to
+ stwu r.0, 4(r.11) // target, updating pointers
+ bdnz ssd.120 // decrement count, jif non-zero
+
+//
+// Call the system service.
+//
+// Note that the non-volatile state is still in GPRs 13..31 and FPRs 14..31;
+// it is the called service routine's responsibility to preserve it.
+
+ssd.150:
+
+// In keeping with the assembler code in mips/x4trap.s, we put the
+// Trap Frame address in r.12 so that NtContinue() can find it, just
+// in case it happens to be the target.
+
+ la r.12, TF_BASE(r.sp) // mips code passes this in s8
+
+ blrl // call the target
+
+//----------------------------------------------------------
+//
+// Exit point for system calls
+//
+//----------------------------------------------------------
+
+ ALTERNATE_ENTRY (KiSystemServiceExit)
+
+//
+// Increment count of system calls and see if an APC interrupt should be
+// generated now.
+//
+// Restore old trap frame address from the current trap frame.
+//
+
+ lwz r.4, KiPcr+PcPrcb(r.0) // get processor block address
+ la r.12, TF_BASE(r.sp) // get trap frame address
+ lwz r.6, KiPcr+PcCurrentThread(r.0) // get current thread address
+ lbz r.8, KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ lwz r.5, PbSystemCalls(r.4) // get count of system calls
+ lwz r.9, TrTrapFrame(r.12) // get old trap frame address
+ lwz r.10, TrMsr(r.12) // load saved MSR value
+ addi r.5, r.5, 1 // bump count of system calls
+ stw r.9, ThTrapFrame(r.6) // restore old trap frame address
+ stw r.5, PbSystemCalls(r.4) // store new count of system calls
+
+//
+// KiServiceExit is an alternate entry referenced by KiCallUserMode in callout.s.
+// On entry:
+// r6 -- current thread
+// r8 -- contains current IRQL
+// r10 -- contains saved MSR
+// r12 -- points to trap frame
+//
+// NOTE: r.sp CANNOT BE USED FROM THIS POINT ON, except on paths that cannot
+// have come from KiCallUserMode. This is because the stack pointer is
+// different when this is a normal system service than when this is a user
+// mode callout.
+//
+
+ ALTERNATE_ENTRY (KiServiceExit)
+
+#if DBG
+ lbz r.5, ThKernelApcDisable(r.6) // get current APC disable count
+ lbz r.7, ThApcStateIndex(r.6) // get current APC state index
+ lbz r.4, TrSavedKernelApcDisable(r.12) // get previous APC disable count
+ lbz r.9, TrSavedApcStateIndex(r.12) // get previous APC state index
+ xor r.4, r.4, r.5 // compare APC disable count
+ xor r.7, r.9, r.7 // compare APC state index
+ or. r.7, r.7, r.4 // merge comparison value
+ bne ssd.badapc // if ne, invalid state or count
+#endif
+
+ cmplwi r.8, APC_LEVEL // APC deliverable?
+ bge ssd.190 // not at APC level, continue
+ lbz r.7, KiPcr+PcApcInterrupt(r.0)
+ extrwi r.8, r.10, 1, MSR_PR // extract problem state bit
+ or. r.0, r.7, r.8 // user mode || intr pending
+ beq+ ssd.190 // jif neither
+ addic. r.7, r.7, -1
+ beq ssd.160 // apc interrupt
+
+// no interrupt pending but going to user mode, check for user mode apc
+// pending.
+
+ li r.7, 0
+ stb r.7, ThAlerted(r.6) // clear kernel mode alerted
+ lbz r.6, ThApcState+AsUserApcPending(r.6)
+ cmplwi r.6, 0
+ beq+ ssd.190 // none pending, continue
+
+ la r.4, EF_BASE - TF_BASE (r.12) // addr of Exception Frame
+ b ssd.170
+
+ssd.170ep:
+ la r.4, EF_BASE (r.sp) // addr of Exception Frame
+ la r.12, TF_BASE (r.sp) // addr of Trap Frame
+ b ssd.170
+
+ssd.160:
+ cmplwi r.8, 0 // check previous mode
+ stb r.7, KiPcr+PcApcInterrupt(r.0) // clear pending intr flag
+ la r.4, EF_BASE - TF_BASE (r.12) // addr of Exception Frame
+ beq ssd.180 // if previous mode == kernel
+ssd.170:
+
+// Call KiDeliverApc() for pending APC, previous mode == user. Before doing
+// so, we must store the non-volatile state into the Exception Frame, for
+// KiDeliverApc() takes Trap Frame and Exception Frame as input.
+
+ stw r.13, ExGpr13 (r.4) // store non-volatile GPRs
+ stw r.14, ExGpr14 (r.4)
+ stw r.15, ExGpr15 (r.4)
+ stw r.16, ExGpr16 (r.4)
+ stw r.17, ExGpr17 (r.4)
+ stw r.18, ExGpr18 (r.4)
+ stw r.19, ExGpr19 (r.4)
+ stw r.20, ExGpr20 (r.4)
+ stw r.21, ExGpr21 (r.4)
+ stw r.22, ExGpr22 (r.4)
+ stw r.23, ExGpr23 (r.4)
+ stw r.24, ExGpr24 (r.4)
+ stw r.25, ExGpr25 (r.4)
+ stw r.26, ExGpr26 (r.4)
+ stw r.27, ExGpr27 (r.4)
+ stw r.28, ExGpr28 (r.4)
+ stw r.29, ExGpr29 (r.4)
+ stw r.30, ExGpr30 (r.4)
+ stw r.31, ExGpr31 (r.4)
+
+ stfd f.14, ExFpr14 (r.4) // save non-volatile FPRs
+ stfd f.15, ExFpr15 (r.4)
+ stfd f.16, ExFpr16 (r.4)
+ stfd f.17, ExFpr17 (r.4)
+ stfd f.18, ExFpr18 (r.4)
+ stfd f.19, ExFpr19 (r.4)
+ stfd f.20, ExFpr20 (r.4)
+ stfd f.21, ExFpr21 (r.4)
+ stfd f.22, ExFpr22 (r.4)
+ stfd f.23, ExFpr23 (r.4)
+ stfd f.24, ExFpr24 (r.4)
+ stfd f.25, ExFpr25 (r.4)
+ stfd f.26, ExFpr26 (r.4)
+ stfd f.27, ExFpr27 (r.4)
+ stfd f.28, ExFpr28 (r.4)
+ stfd f.29, ExFpr29 (r.4)
+ stfd f.30, ExFpr30 (r.4)
+ stfd f.31, ExFpr31 (r.4)
+
+// Also, clear volatile state within the trap frame that will be restored
+// by NtContinue when the APC completes that has not already been set to
+// reasonable values. (ie what wasn't saved on entry).
+
+ li r.0, 0
+ stw r.0, TrGpr11(r.12)
+ stw r.0, TrGpr12(r.12)
+ stw r.0, TrGpr0 (r.12)
+ stw r.0, TrXer (r.12)
+
+// Call to KiDeliverApc requires three parameters:
+// r.3 Previous Mode
+// r.4 addr of Exception Frame
+// r.5 addr of Trap Frame
+
+ssd.180:
+ bl ssd.185 // get a base address to
+ssd.185: // use to load kernel toc
+ lwz r.6, KiPcr+PcPrcb(r.0) // get address of PRCB
+ li r.5, APC_LEVEL // raise Irql
+ stw r.3, TrGpr3(r.12) // save sys call return value
+ lwz r.0, TrMsr (r.12) // load user's MSR value from trap frame
+ stb r.5, KiPcr+PcCurrentIrql(r.0)
+ ori r.5, r.12, 0 // r.5 <- trap frame addr
+ lwz r.7, PbApcBypassCount(r.6) // get APC bypass count
+ mflr r.2 // get &ssd.185
+ extrwi r.3, r.0, 1, MSR_PR // r.3 <- prev. state
+ addi r.7, r.7, 1 // increment APC bypass count
+
+ stw r.12, DeliverApcSaveTrap(r.sp) // save trap frame addr
+ lwz r.2, toc_pointer-ssd.185(r.2) // load toc address
+ stw r.7, PbApcBypassCount(r.6) // store new APC bypass count
+ bl ..KiDeliverApc // process pending apc
+ lwz r.12, DeliverApcSaveTrap(r.sp) // restore trap frame addr
+
+ li r.8, 0
+ stb r.8, KiPcr+PcCurrentIrql(r.0) // restore old IRQL
+ lwz r.10, TrMsr(r.12) // get caller's MSR value
+ lwz r.3, TrGpr3(r.12) // restore sys call result
+ssd.190:
+
+//
+// Return to the caller, in the proper mode.
+//
+// As this is like an ordinary call, we need not restore the volatile
+// registers. We must preserve r.3, the possible return value from
+// the system service routine.
+//
+// If we are returning to user state, we zero the rest of the volatile
+// state to prevent unauthorized viewing of left-over information.
+//
+// The non-volatile state has been preserved by our callee, as for all calls.
+//
+// We get the caller's stack frame pointer out of the back chain field
+// in our stack frame header, not by incrementing our stack pointer, because
+// the caller's stack may be user while ours is known to be kernel.
+//
+// We must reload these:
+// caller's TOC pointer (r.2)
+// caller's stack pointer (r.1)
+// caller's instruction address
+//
+// We already have
+// caller's MSR value (r.10)
+//
+// We can use (and must clear) these:
+// r.0
+// r.4 - r.9, r.11, r.12
+// f.0 - f.13
+// XER
+// CR
+
+ lfd f.0, TrFpscr(r.12) // get saved FPSCR
+ lwz r.7, KiPcr+PcCurrentThread(r.0) // get current thread address
+ extrwi. r.0, r.10, 1, MSR_PR // see if resuming user mode
+ lbz r.6, TrPreviousMode(r.12) // get old previous mode
+ mfmsr r.8 // fetch the current MSR value
+ lwz r.11, TrIar(r.12) // get caller's resume address
+ rlwinm r.8, r.8, 0, ~(INT_ENA) // clear int enable
+ stb r.6, ThPreviousMode(r.7) // restore old previous mode
+ mtfsf 0xff, f.0 // restore FPSCR
+ mtmsr r.8 // disable interrupts
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ bne ssd.200 // branch if resuming user mode
+
+// Resuming kernel mode -- don't bother to clear the volatile state
+
+
+ lwz r.sp, 0(r.sp) // reload caller's stack pointer
+
+//
+// WARNING: Cannot tolerate a TLB/HPT miss from here thru rfi.
+//
+
+KiServiceExitKernelRfiJump:
+ b $+(KiServiceExitKernelRfi-Kseg0CodeStart)
+
+// Resuming user mode -- clear the volatile state
+
+ssd.200:
+ lbz r.6, ThDebugActive(r.7)
+ lis r.5, K_BASE // base address of KSEG0
+ lfd f.0, FpZero-real0(r.5) // load FP 0.0
+ li r.0, 0 // clear a GP reg
+ lwz r.toc, TrGpr2(r.12) // reload caller's TOC pointer
+ cmpwi cr.1, r.6, 0 // Hardware debug register set?
+ lwz r.8, TrLr(r.12) // get saved LR
+ mtxer r.0 // clear the XER
+ bne cr.1, ssd.220 // jif no debug registers set
+
+ssd.210:
+ fmr f.1, f.0 // clear remaining volatile FPRs
+ lwz r.5, TrGpr5(r.12)
+ mtctr r.0 // clear the CTR
+ fmr f.2, f.0
+ lwz r.6, TrGpr6(r.12)
+ lis r.9, SREG_INVAL // invalid segment reg value
+ fmr f.3, f.0
+ lwz r.4, TrGpr4(r.12)
+ fmr f.4, f.0
+ lwz r.sp, TrGpr1(r.12) // reload caller's stack pointer
+ fmr f.5, f.0
+ li r.7, 0
+ fmr f.6, f.0
+ fmr f.7, f.0
+ fmr f.8, f.0
+ mtlr r.8 // restore saved LR
+ fmr f.9, f.0
+ li r.8, 0
+ fmr f.10, f.0
+ mtsr 9, r.9 // invalidate sregs 9, 10, 12, 13
+ fmr f.11, f.0
+ mtsr 10, r.9
+ fmr f.12, f.0
+ mtsr 12, r.9
+ fmr f.13, f.0
+ mtsr 13, r.9
+
+//
+// WARNING: Cannot tolerate a TLB/HPT miss from here thru rfi.
+//
+
+KiServiceExitUserRfiJump:
+ b $+(KiServiceExitUserRfi-Kseg0CodeStart)
+
+
+
+
+
+
+//
+// We get here from above if we are going to user mode and the h/w debug
+// registers are being used. This is where we renable them for this
+// processor. The code is out of line on the theory that it isn't used
+// all that often.
+//
+
+ssd.220:
+ lwz r.4, TrDr1 (r.12) // Get kernel DABR
+ lwz r.5, TrDr7 (r.12)
+ lwz r.6, TrDr0 (r.12) // Get kernel IABR
+ rlwinm r.4, r.4, 0, 0xfffffff8 // Sanitize DABR (Dr1)
+ ori r.6, r.6, 0x3 // Sanitize IABR (Dr0) 604
+ ori r.4, r.4, 0x4 // Sanitize DABR 604
+//
+// WARNING: Don't rearrange this branch table. The first branch is overlayed
+// with the correct branch instruction (modified) based on the processor
+// during system initialization. The correct order is 601, 603, 604, skip.
+//
+BranchDr4:
+ b ssd.201 // 601
+ b ssd.203 // 603
+ b ssd.204 // 604/613
+ b ssd.210 // unknown
+
+ssd.201: // 601 SPECIFIC
+ lis r.9, 0x6080 // Full cmp., trace mode except.
+ rlwinm r.4, r.4, 0, 0xfffffff8 // Sanitize DABR (Dr1)
+ rlwinm r.6, r.6, 0, 0xfffffffc // Sanitize IABR (Dr0)
+ mtspr hid1, r.9
+
+ssd.204:
+ rlwinm. r.9, r.5, 0, 0x0000000c // LE1/GE1 set?
+ beq ssddr11.1 // jiff Dr1 not set
+ rlwimi r.4, r.5, 13, 30, 30 // Interchange R/W1 bits
+ rlwimi r.4, r.5, 11, 31, 31
+ mtspr dabr, r.4
+
+ssddr11.1:
+ rlwinm. r.5, r.5, 0, 0x00000003 // LE0/GE0 set?
+ beq ssd.210
+ mtspr iabr, r.6
+ isync
+ b ssd.210
+
+ssd.203: // 603 SPECIFIC
+ rlwinm r.6, r.6, 0, 0xfffffffc // Sanitize IABR
+ ori r.6, r.6, 0x2
+ mtspr iabr, r.6
+ b ssd.210
+
+
+
+
+
+ .align 5
+
+ssd.convert_to_gui:
+
+//
+// The specified system service number is not within range. Attempt to
+// convert the thread to a GUI thread if specified system service is
+// not a base service and the thread has not already been converted to
+// a GUI thread.
+//
+// N.B. The argument registers r3-r10 and the system service number in r2
+// must be preserved if an attempt is made to convert the thread to
+// a GUI thread.
+//
+// At this point:
+//
+// r.0 contains masked service number (scratch)
+// r.1 contains the current stack pointer
+// r.2 contains the service code
+// r.3 - r.9 are untouched
+// r.10 contains the offset into the service descriptor table
+// r.11 contains the service number limit for the r.12 table (scratch)
+// r.12 contains the service descriptor address
+// r.13 contains the current TEB address
+// r.14 - r.31 are untouched
+// cr.7 is the result of comparing r.10 with 0 (if eq, service is a base service)
+//
+// On return to SystemServiceRepeat:
+//
+// r.0 is undefined
+// r.1 contains the current stack pointer
+// r.2 contains the service code
+// r.3 - r.9 are untouched
+// r.10 is undefined
+// r.11 is undefined
+// r.12 contains the service descriptor table address (ThWin32Thread)
+// r.13 contains the current TEB address
+// r.14 - r.31 are untouched
+//
+
+ bne cr.7, ssd.inv_service // if ne, not GUI system service
+
+ stw r.2,TrFpr0+TF_BASE(r.sp) // save system service number
+ stw r.3,TrGpr3+TF_BASE(r.sp) // save argument registers (except r10)
+ stw r.4,TrGpr4+TF_BASE(r.sp)
+ stw r.5,TrGpr5+TF_BASE(r.sp)
+ stw r.6,TrGpr6+TF_BASE(r.sp)
+ stw r.7,TrGpr7+TF_BASE(r.sp)
+ stw r.8,TrGpr8+TF_BASE(r.sp)
+ stw r.9,TrGpr9+TF_BASE(r.sp)
+ bl ssd.221 // get a base address to
+ssd.221: // use to load toc
+ mflr r.2 // get &ssd.221
+ lwz r.2, toc_pointer-ssd.221(r.2) // load toc address
+ bl ..PsConvertToGuiThread // attempt to convert to GUI thread
+ ori r.11,r.3, 0 // save completion status
+
+ la r.0, TF_BASE(r.sp) // get trap frame address
+ stw r.0, ThTrapFrame(r.13) // store trap frame address
+
+ lwz r.2,TrFpr0+TF_BASE(r.sp) // restore system service number
+ lwz r.3,TrGpr3+TF_BASE(r.sp) // restore argument registers (except r10)
+ lwz r.4,TrGpr4+TF_BASE(r.sp)
+ lwz r.5,TrGpr5+TF_BASE(r.sp)
+ lwz r.6,TrGpr6+TF_BASE(r.sp)
+ lwz r.7,TrGpr7+TF_BASE(r.sp)
+ lwz r.8,TrGpr8+TF_BASE(r.sp)
+ lwz r.9,TrGpr9+TF_BASE(r.sp)
+
+ lwz r.12,KiPcr+PcCurrentThread(r.0) // get current thread address
+ lwz r.12,ThServiceTable(r.12) // get service dispatcher table address
+ cmpwi r.11,0 // did conversion work?
+ beq SystemServiceRepeat // if yes, retry
+
+//
+// Invalid system service code number found in r.2
+//
+
+ssd.inv_service:
+ LWI (r.3, STATUS_INVALID_SYSTEM_SERVICE)
+ b ..KiSystemServiceExit
+
+#if DBG
+
+ssd.badapc:
+
+//
+//
+// An attempt is being made to exit a system service while kernel APCs are
+// disabled, or while attached to another process and the previous mode is
+// not kernel.
+//
+// r5 - Supplies the APC disable count.
+// r6 - Supplies the APC state index.
+//
+
+ mflr r.0 // save LR
+ bl ssd.badapc.1 // get a base address to
+ssd.badapc.1: // use to load kernel toc
+ mflr r.2 // get &ssd.badapc.1
+ lwz r.2, toc_pointer-ssd.badapc.1(r.2) // load toc address
+ mtlr r.0 // restore LR
+ li r.3, SYSTEM_EXIT_OWNED_MUTEX // set bug check code
+ li r.4, 0 // mutex levels have been removed
+ bl ..KeBugCheckEx // call bug check routine
+ b $
+
+#endif
+
+//
+// stack overflow/underflow/misalign on system call
+//
+// We need to convert this system call into a code panic trap, saving
+// state information such that common_exception_entry's handler can
+// deal with it. We have already trashed a certain amount of info
+// but what still exists we will set up in the manner common_exception_
+// entry's handler expects.
+
+ssd.stk_err:
+ stw r.2, KiPcr+PCR_SAVE2(r.0) // save service code
+ stw r.3, KiPcr+PCR_SAVE3(r.0) // save gprs 3 thru 6
+ stw r.4, KiPcr+PCR_SAVE4(r.0)
+ stw r.5, KiPcr+PCR_SAVE5(r.0)
+ stw r.6, KiPcr+PCR_SAVE6(r.0)
+ mfcr r.5 // preserved CR
+ mflr r.3 // fake up srr0
+ ori r.4, r.12, 0 // srr1
+ lis r.12, 0xdead // mark those we already
+ stw r.12, KiPcr+PCR_SAVE11(r.0) // lost
+ ori r.13, r.12, 0
+
+ lwz r.6, KiPcr+PcPanicStack(r.0) // switch to panic stack
+ li r.2, CODE_PANIC // set exception cause to panic
+ subi r.11, r.6, KERNEL_STACK_SIZE // compute stack limit
+ stw r.6, KiPcr+PcInitialStack(r.0) // so we don't repeat ourselves
+ // ie, avoid overflowing because
+ // we went to the panic stack.
+ stw r.11, KiPcr+PcStackLimit(r.0) // set stack limit
+ subi r.6, r.6, STACK_DELTA_NEWSTK // allocate stack frame
+ b cee.30 // process exception
+
+//
+// The following code is used to clear the hardware debug registers in
+// the event that we have just come from user mode and they are set.
+//
+// This code is out of line because it is expected to be executed
+// infrequently.
+//
+
+ssd.dbg_regs:
+
+ li r.3, 0 // Initialize DR7
+ lwz r.5, KiPcr+PcPrcb(r.0) // get processor block address
+ lwz r.4, DR_BASE + SrKernelDr7(r.5) // Kernel DR set?
+ rlwinm r.4, r.4, 0, 0xFF
+ cmpwi cr.7, r.4, 0
+ stw r.3, TrDr7 + TF_BASE(r.sp) // No DRs set
+ stw r.3, TrDr6 + TF_BASE(r.sp) // Not a DR breakpoint
+ lwz r.7, DR_BASE + SrKernelDr0(r.5) // Get kernel IABR
+ lwz r.8, DR_BASE + SrKernelDr1(r.5) // Get kernel DABR
+ ori r.7, r.7, 0x3 // Sanitize IABR (Dr0)
+ ori r.8, r.8, 0x4 // Sanitize DABR (Dr1)
+
+//
+// WARNING: Don't rearrange this branch table. The first branch is overlayed
+// with the correct branch instruction (modified) based on the processor
+// during system initialization. The correct order is 601, 603, 604, skip.
+//
+BranchDr3:
+ b ssd.dbg_10 // 601
+ b ssd.dbg_30 // 603
+ b ssd.dbg_20 // 604/613
+ b ssd.30 // unknown - back into mainline
+
+ssd.dbg_10: // 601 SPECIFIC
+ li r.3, 0x0080 // Normal run mode
+ rlwinm r.7, r.7, 0, 0xfffffffc // Sanitize IABR (Dr0)
+ rlwinm r.8, r.8, 0, 0xfffffff8 // Sanitize DABR (Dr1)
+ bne cr.7, ssd.dbg_20 // Leave hid1 set for full cmp
+ mtspr hid1, r.3
+
+ssd.dbg_20: // 601/604 SPECIFIC
+ mfspr r.3, iabr // Load the IABR (Dr0)
+ rlwinm. r.3, r.3, 0, 0xfffffffc // IABR(DR0) set?
+ li r.4, 0 // Initialize Dr7
+ stw r.3, TrDr0 + TF_BASE(r.sp)
+ mfspr r.3, dabr // Load the DABR (Dr1)
+ beq ssiabr.1 // jiff Dr0 not set
+ li r.4, 0x1 // Set LE0 in Dr7
+
+ssiabr.1:
+ rlwimi r.4, r.3, 19, 11, 11 // Interchange R/W1 bits
+ rlwimi r.4, r.3, 21, 10, 10 // and move to Dr7
+ rlwinm. r.3, r.3, 0, 0xfffffff8 // Sanitize Dr1
+ stw r.3, TrDr1 + TF_BASE(r.sp) // Store Dr1 in trap frame
+ beq ssdabr.1 // jiff Dr1 not set
+ ori r.4, r.4, 0x4 // Set LE1 in Dr7
+
+ssdabr.1:
+ ori r.4, r.4, 0x100 // Set LE bit in Dr7
+ stw r.4, TrDr7 + TF_BASE(r.sp)
+ li r.4, 0
+ beq cr.7, sskdr.1 // jif no kernel DR set
+ lwz r.3, DR_BASE + SrKernelDr7(r.5)
+ rlwinm. r.4, r.3, 0, 0x0000000c // LE1/GE1 set?
+ beq ssdr1.1 // jiff Dr1 not set
+ rlwimi r.8, r.3, 13, 30, 30 // Interchange R/W1 bits
+ rlwimi r.8, r.3, 11, 31, 31
+ mtspr dabr, r.8
+
+ssdr1.1:
+ rlwinm. r.3, r.3, 0, 0x00000003 // LE0/GE0 set?
+ beq ssd.dbg_90
+ mtspr iabr, r.7
+ isync
+ b ssd.dbg_90
+
+ssd.dbg_30: // 603 SPECIFIC
+ mfspr r.3, iabr // Load the IABR (Dr0)
+ rlwinm. r.3, r.3, 0, 0xfffffffc // Sanitize Dr0
+ li r.4, 0x101 // Initialize Dr7
+ stw r.3, TrDr0 + TF_BASE(r.sp)
+ stw r.4, TrDr7 + TF_BASE(r.sp)
+ li r.4, 0
+ beq cr.7, sskdr.2 // jif no kernel DR set
+ rlwinm r.7, r.7, 0, 0xfffffffc // Sanitize IABR
+ ori r.7, r.7, 0x2
+ mtspr iabr, r.7
+ b ssd.dbg_90
+
+sskdr.2:
+ mtspr iabr, r.4
+ b ssd.dbg_90
+
+sskdr.1:
+ mtspr dabr, r.4
+ mtspr iabr, r.4
+ isync
+
+ssd.dbg_90:
+ lwz r.8, TrGpr8 + TF_BASE(r.sp) // reload registers that
+ lwz r.7, TrGpr7 + TF_BASE(r.sp) // we clobbered
+ lwz r.5, TrGpr5 + TF_BASE (r.sp) // including cr.0
+ lwz r.4, TrGpr4 + TF_BASE (r.sp) // set cr.0 again
+ lwz r.3, TrGpr3 + TF_BASE (r.sp)
+ cmpwi r.2, 0
+ b ssd.30 // go back to main line
+
+ DUMMY_EXIT(KiSystemServiceDispatch)
+
+//
+// Define the size of a cache block. This is 32 bytes on all currently
+// supported processors, it is 64 bytes on some of the newer processors
+// including 620.
+//
+
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define BLOCK_SZ 32
+
+
+//++
+//
+// VOID
+// KeZeroPage(
+// ULONG PageFrame
+// );
+//
+// Routine Description:
+//
+// Zero a page of memory using the fastest means possible.
+//
+// Arguments:
+//
+// PageFrame Page Number (in physical memory) of the page to
+// be zeroed.
+//
+// Return Value:
+//
+// None.
+//
+// BUGBUG
+//
+// There are a number of 603 errata related to the dcbz instruction.
+// It turns out that using dcbz on 603 class processors is also slow
+// (601/604 are considerably faster). So, rather than applying h/w
+// workarounds to the 603 problems, we use a simple loop.
+//
+//
+//--
+
+ .align 5
+ LEAF_ENTRY(KeZeroPage)
+
+ mfmsr r.9 // get current MSR value
+ li r.6, PAGE_SIZE/BLOCK_SZ // number of blocks to zero
+ li r.7, 0 // starting offset
+ mtctr r.6 // set iteration count
+ rlwinm r.8, r.9, 0, 0xffff7fff // disable interrupts
+ rlwinm r.8, r.8, 0, 0xffffffef // disable data translation
+ mtmsr r.8 // disable ints and data xlate
+ cror 0,0,0 // N.B. 603e/ev Errata 15
+ slwi r.3, r.3, PAGE_SHIFT // chg page number to phys address
+
+//
+// If the processor is NOT a member of the 603 family (603, 603e, 603ev)
+// the following instruction will have been replaced at init time with an
+// ISYNC instruction which is required to stop the processor from looking
+// ahead and doing address translation while we're turning it off.
+//
+// N.B. We use a dbnz because the 603 code does one less iteration (see
+// below).
+//
+
+kzp.repl:
+ bdnz+ KeZeroPage603 // use 603 code unless this inst
+ // has been replaced with an isync.
+
+zero_block:
+ dcbz r.3, r.7
+ addi r.7, r.7, BLOCK_SZ
+ bdnz zero_block
+
+ mtmsr r.9 // restore old interrupt and xlate
+ // settings
+ isync
+
+ ALTERNATE_EXIT(KeZeroPage)
+
+//
+// The following code is used for 603 class machines. The loop is
+// unrolled but part of the first and last blocks are done seperately
+// in order to init f.1 which is used for the actual code.
+//
+// N.B. On a 601 or 604, the loop is faster if it is NOT unrolled.
+
+KeZeroPage603:
+ isync // allow no look ahead
+
+ stw r.7, 0(r.3) // zero first 8 bytes and reload
+ stw r.7, 4(r.3) // into an FP reg for rest of loop.
+ li r.5, 32 // size of cache block
+ lfd f.1, 0(r.3)
+
+zero_block603:
+ dcbtst r.5, r.3 // touch 32 bytes ahead
+ stfd f.1, 8(r.3)
+ stfd f.1,16(r.3)
+ stfd f.1,24(r.3)
+ stfdu f.1,32(r.3)
+ bdnz zero_block603
+
+//
+// Three 8 byte blocks to go.
+//
+
+ stfdu f.1, 8(r.3)
+ stfdu f.1, 8(r.3)
+ stfdu f.1, 8(r.3)
+
+ mtmsr r.9 // restore old interrupt and xlate
+ // settings
+ isync
+
+ LEAF_EXIT(KeZeroPage)
+
+//
+// The code for this routine really exists at label FlushSingleTb in this module.
+//
+
+ LEAF_ENTRY(KiFlushSingleTb)
+FlushSingleTbJump:
+ b $+(FlushSingleTb-Kseg0CodeStart)
+ LEAF_EXIT(KiFlushSingleTb)
+
+//
+// The code for this routine really exists at label FillEntryTb in this module.
+//
+
+ LEAF_ENTRY(KeFillEntryTb)
+FillEntryTbJump:
+ b $+(FillEntryTb-Kseg0CodeStart)
+ LEAF_EXIT(KeFillEntryTb)
+
+//
+// The code for this routine really exists at label FillEntryTb in this module.
+//
+
+ LEAF_ENTRY(KeFlushCurrentTb)
+FlushCurrentTbJump:
+ b $+(FlushCurrentTb-Kseg0CodeStart)
+ LEAF_EXIT(KeFlushCurrentTb)
+
diff --git a/private/ntos/ke/ppc/services.stb b/private/ntos/ke/ppc/services.stb
new file mode 100644
index 000000000..7598ffea9
--- /dev/null
+++ b/private/ntos/ke/ppc/services.stb
@@ -0,0 +1,81 @@
+//++
+//
+// Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+//
+// Module Name:
+//
+// sysstubs.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch stub procedures.
+//
+// Author:
+//
+// Rick Simpson 16-Sep-1993
+//
+// Based on MIPS version by David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// User or kernel mode.
+//
+// Revision History:
+//
+//--
+
+#include <kxppc.h>
+
+ .text
+ .align 5
+
+ SPECIAL_ENTRY(_SysCallGlue)
+ stw r.31, 4(r.sp)
+ mflr r.31
+ stw r.2, 8(r.sp)
+ PROLOGUE_END(_SysCallGlue)
+ ori r.2, r.0, 0
+ bl ..ZwSysCallInstr
+ mtlr r.31
+ lwz r.31, 4(r.sp)
+ lwz r.2, 8(r.sp)
+ SPECIAL_EXIT(_SysCallGlue)
+
+#define STUBS_BEGIN1( t ) .text
+#define STUBS_BEGIN2( t ) .align 2
+#define STUBS_BEGIN3( t ) .globl ..ZwSysCallInstr
+#define STUBS_BEGIN4( t ) ..ZwSysCallInstr:
+#define STUBS_BEGIN5( t ) sc
+#define STUBS_BEGIN6( t ) blr
+#define STUBS_BEGIN7( t ) .reldata
+#define STUBS_BEGIN8( t ) .align 2
+
+#define STUBS_END
+
+#define SYSSTUBS_ENTRY1( ServiceNumber, Name, NumArgs ) .globl Zw##Name, ..Zw##Name
+#define SYSSTUBS_ENTRY2( ServiceNumber, Name, NumArgs ) .reldata
+#define SYSSTUBS_ENTRY3( ServiceNumber, Name, NumArgs ) Zw##Name: .long ..ZwSysCallInstr
+#define SYSSTUBS_ENTRY4( ServiceNumber, Name, NumArgs ) .long ServiceNumber
+#define SYSSTUBS_ENTRY5( ServiceNumber, Name, NumArgs ) .text
+#define SYSSTUBS_ENTRY6( ServiceNumber, Name, NumArgs ) ..Zw##Name:
+#define SYSSTUBS_ENTRY7( ServiceNumber, Name, NumArgs ) li r.2, ServiceNumber
+#define SYSSTUBS_ENTRY8( ServiceNumber, Name, NumArgs ) sc
+
+#define USRSTUBS_ENTRY1( ServiceNumber, Name, NumArgs) .reldata
+#define USRSTUBS_ENTRY2( ServiceNumber, Name, NumArgs) .globl Zw##Name, Nt##Name, ..Nt##Name, ..Zw##Name
+#define USRSTUBS_ENTRY3( ServiceNumber, Name, NumArgs) Zw##Name:
+#define USRSTUBS_ENTRY4( ServiceNumber, Name, NumArgs) Nt##Name: .long ..ZwSysCallInstr, ServiceNumber
+#define USRSTUBS_ENTRY5( ServiceNumber, Name, NumArgs) .text
+#define USRSTUBS_ENTRY6( ServiceNumber, Name, NumArgs) ..Zw##Name:
+#define USRSTUBS_ENTRY7( ServiceNumber, Name, NumArgs) ..Nt##Name: li r.0, ServiceNumber
+#define USRSTUBS_ENTRY8( ServiceNumber, Name, NumArgs) b .._SysCallGlue
+
+
+ STUBS_BEGIN1( "System Service Stub Procedures" )
+ STUBS_BEGIN2( "System Service Stub Procedures" )
+ STUBS_BEGIN3( "System Service Stub Procedures" )
+ STUBS_BEGIN4( "System Service Stub Procedures" )
+ STUBS_BEGIN5( "System Service Stub Procedures" )
+ STUBS_BEGIN6( "System Service Stub Procedures" )
+ STUBS_BEGIN7( "System Service Stub Procedures" )
+ STUBS_BEGIN8( "System Service Stub Procedures" )
diff --git a/private/ntos/ke/ppc/sources b/private/ntos/ke/ppc/sources
new file mode 100644
index 000000000..b6b5dc9c6
--- /dev/null
+++ b/private/ntos/ke/ppc/sources
@@ -0,0 +1,32 @@
+
+PPC_SOURCES=..\ppc\alignem.c \
+ ..\ppc\allproc.c \
+ ..\ppc\apcuser.c \
+ ..\ppc\callback.c \
+ ..\ppc\callout.s \
+ ..\ppc\clock.s \
+ ..\ppc\ctxswap.s \
+ ..\ppc\dmpstate.c \
+ ..\ppc\exceptn.c \
+ ..\ppc\flush.c \
+ ..\ppc\flushtb.c \
+ ..\ppc\getsetrg.c \
+ ..\ppc\initkr.c \
+ ..\ppc\intobj.c \
+ ..\ppc\intsup.s \
+ ..\ppc\ipi.c \
+ ..\ppc\irql.s \
+ ..\ppc\miscasm.s \
+ ..\ppc\mpipi.s \
+ ..\ppc\pcr.s \
+ ..\ppc\procstat.s \
+ ..\ppc\real0.s \
+ ..\ppc\spinlock.s \
+ ..\ppc\sysstubs.s \
+ ..\ppc\systable.s \
+ ..\ppc\threadbg.s \
+ ..\ppc\thredini.c \
+ ..\ppc\timindex.s \
+ ..\ppc\vdm.c
+
+
diff --git a/private/ntos/ke/ppc/spinlock.s b/private/ntos/ke/ppc/spinlock.s
new file mode 100644
index 000000000..1ef8d6889
--- /dev/null
+++ b/private/ntos/ke/ppc/spinlock.s
@@ -0,0 +1,598 @@
+// TITLE("Spin Locks")
+//++
+//
+// Copyright (c) 1993 IBM Corporation
+//
+// Module Name:
+//
+// spinlock.s
+//
+// Abstract:
+//
+// This module implements the routines for acquiring and releasing
+// spin locks.
+//
+// Author:
+//
+// Peter L. Johnston (plj@vnet.ibm.com) 28-Jun-93
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+ .extern ..KxReleaseSpinLock
+
+
+ SBTTL("Initialize Executive Spin Lock")
+//++
+//
+// VOID
+// KeInitializeSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function initialzies an executive spin lock.
+//
+// Arguments:
+//
+// SpinLock (r3) - Supplies a pointer to a executive spinlock.
+//
+// Return Value:
+//
+// None.
+//
+// Remarks:
+//
+// Equivalent C code for body of function.
+// {
+// *spinlock = 0;
+// }
+//
+// Why is this in assembly code? I suspect simply so it is bundled
+// with the rest of the spin lock code.
+//--
+
+ LEAF_ENTRY(KeInitializeSpinLock)
+
+ li r.0, 0 // clear spin lock value by storing
+ stw r.0, 0(r.3) // zero at address from parameter.
+
+ LEAF_EXIT(KeInitializeSpinLock) // return
+
+ SBTTL("Acquire Executive Spin Lock")
+//++
+//
+// VOID
+// KeAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and acquires
+// the specified executive spinlock.
+//
+// Arguments:
+//
+// SpinLock (r3) - Supplies a pointer to an executive spinlock.
+//
+// OldIrql (r4) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+// Remarks:
+//
+// In the UniProcessor version, we just raise IRQL, the lock address
+// is never touched.
+//
+// In the MultiProcessor version the spinlock is taken after raising
+// IRQL.
+//
+// N.B. The old IRQL must be stored AFTER the lock is acquired.
+//
+//--
+
+ LEAF_ENTRY_S(KeAcquireSpinLock,_TEXT$01)
+
+#if !defined(NT_UP)
+
+ DISABLE_INTERRUPTS(r.5, r.6)
+
+ lwz r.12, KiPcr+PcCurrentThread(r.0) // addr of current thread
+
+ ACQUIRE_SPIN_LOCK(r.3, r.12, r.11, kas.10, kas.15)
+
+#endif
+
+ li r.0, DISPATCH_LEVEL // new IRQL = DISPATCH_LEVEL
+ lbz r.10,KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ stb r.0, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+#if !defined(NT_UP)
+ ENABLE_INTERRUPTS(r.5)
+#endif
+
+ stb r.10, 0(r.4) // return old IRQL
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.3, r.11, kas.10, kas.15, kas.17, r.5, r.6)
+#endif
+
+ DUMMY_EXIT(KeAcquireSpinLock)
+
+ SBTTL("Acquire SpinLock and Raise to DPC")
+//++
+//
+// KIRQL
+// KeAcquireSpinLockRaiseToDpc (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to dispatcher level and acquires
+// the specified spinlock.
+//
+// Arguments:
+//
+// SpinLock (r3) - Supplies a pointer to the spinlock that is to be
+// acquired.
+//
+// Return Value:
+//
+// The previous IRQL is returned at the function value.
+//
+// Remarks:
+//
+// In the UniProcessor version, we just raise IRQL, the lock address
+// is never touched.
+//
+// In the MultiProcessor version the spinlock is taken after raising
+// IRQL.
+//
+// N.B. The old IRQL must be stored AFTER the lock is acquired.
+//
+//--
+
+ LEAF_ENTRY_S(KeAcquireSpinLockRaiseToDpc,_TEXT$01)
+
+//
+// On PPC, synchronization level is the same as dispatch level.
+//
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToSynch)
+
+#if !defined(NT_UP)
+
+ DISABLE_INTERRUPTS(r.5, r.6)
+
+ lwz r.12, KiPcr+PcCurrentThread(r.0) // addr of current thread
+
+ ACQUIRE_SPIN_LOCK(r.3, r.12, r.11, kasrtd.10, kasrtd.15)
+
+#endif
+
+ li r.0, DISPATCH_LEVEL // new IRQL = DISPATCH_LEVEL
+ lbz r.10,KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ stb r.0, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+#if !defined(NT_UP)
+ ENABLE_INTERRUPTS(r.5)
+#endif
+
+ ori r.3, r.10, 0 // return old IRQL
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK_ENABLED(r.3, r.11, kasrtd.10, kasrtd.15, kasrtd.20, r.5, r.6)
+#endif
+
+ DUMMY_EXIT(KeAcquireSpinLockRaiseToDpc)
+
+ SBTTL("Release Executive Spin Lock")
+//++
+//
+// VOID
+// KeReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// IN KIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function releases an executive spin lock and lowers the IRQL
+// to its previous value.
+//
+// N.B. This routine is entered at DISPATCH_LEVEL.
+//
+// Arguments:
+//
+// SpinLock (r.3) - Supplies a pointer to an executive spin lock.
+//
+// OldIrql (r.4) - Supplies the previous IRQL value.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY_S(KeReleaseSpinLock, _TEXT$01)
+
+ cmpwi r.4, DISPATCH_LEVEL // check if new IRQL < DISPATCH
+
+#if !defined(NT_UP)
+ li r.0, 0
+ RELEASE_SPIN_LOCK(r.3, r.0)
+#endif
+
+ bgelr // if target IRQL >= DISPATCH
+ // just return
+ DISABLE_INTERRUPTS(r.7,r.10)
+ lhz r.10, KiPcr+PcSoftwareInterrupt(r.0) // s/w interrupt pending?
+ stb r.4, KiPcr+PcCurrentIrql(r.0) // set target IRQL
+ cmpw cr.6, r.10, r.4
+ srwi. r.5, r.10, 8 // isolate DPC pending
+ cmpwi cr.7, r.4, APC_LEVEL // compare IRQL to APC_LEVEL
+
+//
+// Possible values for SoftwareInterrupt (r.10) are
+//
+// 0x0101 DPC and APC interrupt pending
+// 0x0100 DPC interrupt pending
+// 0x0001 APC interrupt pending
+// 0x0000 No software interrupt pending (unlikely but possible)
+//
+// Possible values for current IRQL are zero or one. By comparing
+// SoftwareInterrupt against current IQRL (above) we can quickly see
+// if any software interrupts are valid at this time.
+//
+// Calculate correct IRQL for the interrupt we are processing. If DPC
+// then we need to be at DISPATCH_LEVEL which is one greater than APC_
+// LEVEL. r.5 contains one if we are going to run a DPC, so we add
+// APC_LEVEL to r.5 to get the desired IRQL.
+//
+
+ addi r.4, r.5, APC_LEVEL // calculate new IRQL
+
+ ble cr.6,Enable // jif no valid interrupt
+
+//
+// A software interrupt is pending and the new IRQL allows it to be
+// taken at this time. Branch directly to KxReleaseSpinLock (ctxswap.s)
+// to dispatch the interrupt. KxReleaseSpinLock returns directly to
+// KeReleaseSpinLock's caller.
+//
+// WARNING: KxReleaseSpinLock is dependent on the values in r.4, r.7, r.12
+// and condition register fields cr.0 and cr.7.
+//
+
+ b ..KxReleaseSpinLock
+
+Enable: ENABLE_INTERRUPTS(r.7)
+
+ LEAF_EXIT(KeReleaseSpinLock) // return
+
+ SBTTL("Try To Acquire Executive Spin Lock")
+//++
+//
+// BOOLEAN
+// KeTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// OUT PKIRQL OldIrql
+// )
+//
+// Routine Description:
+//
+// This function raises the current IRQL to DISPATCH_LEVEL and attempts
+// to acquires the specified executive spinlock. If the spinlock can be
+// acquired, then TRUE is returned. Otherwise, the IRQL is restored to
+// its previous value and FALSE is returned.
+//
+// Arguments:
+//
+// SpinLock (r.3) - Supplies a pointer to a executive spinlock.
+//
+// OldIrql (r.4) - Supplies a pointer to a variable that receives the
+// the previous IRQL value.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned. UP systems always succeed.
+// On MP systems we test if the lock is taken with interrupts disabled,
+// so that we don't have to check for any DPCs/APCs that could be queued
+// while we have priority at dispatch level.
+//
+// N.B. The old IRQL must be stored AFTER the lock is acquired.
+//
+//--
+
+ LEAF_ENTRY_S(KeTryToAcquireSpinLock,_TEXT$01)
+
+#if !defined(NT_UP)
+
+//
+// Try to acquire the specified spinlock.
+//
+
+ DISABLE_INTERRUPTS(r.9, r.8)
+
+ lwz r.12, KiPcr+PcCurrentThread(r.0) // addr of current thread
+
+ TRY_TO_ACQUIRE_SPIN_LOCK(r.3, r.12, r.11, ktas.10, ktas.20)
+
+#endif
+
+//
+// Raise IRQL and indicate success.
+//
+
+ lbz r.10,KiPcr+PcCurrentIrql(r.0) // get current IRQL
+ li r.0, DISPATCH_LEVEL // new IRQL = DISPATCH_LEVEL
+ li r.3, TRUE // set return value (success)
+ stb r.10, 0(r.4) // return old IRQL
+ stb r.0, KiPcr+PcCurrentIrql(r.0) // set new IRQL
+
+#if !defined(NT_UP)
+
+ ENABLE_INTERRUPTS(r.9)
+
+ blr // return
+
+//
+// The attempt to acquire the specified spin lock failed. Indicate failure.
+//
+
+ktas.20:
+ ENABLE_INTERRUPTS(r.9)
+
+ li r.3, FALSE // set return value (failure)
+
+#endif
+
+ LEAF_EXIT(KeTryToAcquireSpinLock)
+
+ SBTTL("Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function acquires a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY_S(KiAcquireSpinLock,_TEXT$01)
+
+ ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
+
+#if !defined(NT_UP)
+ lwz r.12, KiPcr+PcCurrentThread(r.0) // addr of current thread
+
+ ACQUIRE_SPIN_LOCK(r.3, r.12, r.11, kas.20, kas.30)
+#endif
+
+ blr
+
+#if !defined(NT_UP)
+ SPIN_ON_SPIN_LOCK(r.3, r.11, kas.20, kas.30)
+#endif
+
+ DUMMY_EXIT(KiAcquireSpinLock)
+
+ SBTTL("Release Kernel Spin Lock")
+//++
+//
+// VOID
+// KiReleaseSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function releases a kernel spin lock.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (a0) - Supplies a pointer to an executive spin lock.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ LEAF_ENTRY_S(KiReleaseSpinLock,_TEXT$01)
+
+ ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
+
+#if !defined(NT_UP)
+ li r.0, 0
+ RELEASE_SPIN_LOCK(r.3, r.0)
+#endif
+
+ LEAF_EXIT(KiReleaseSpinLock) // return
+
+ SBTTL("Try To Acquire Kernel Spin Lock")
+//++
+//
+// KIRQL
+// KiTryToAcquireSpinLock (
+// IN PKSPIN_LOCK SpinLock
+// )
+//
+// Routine Description:
+//
+// This function attempts to acquires the specified kernel spinlock. If
+// the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
+// is returned.
+//
+// N.B. This function assumes that the current IRQL is set properly.
+//
+// Arguments:
+//
+// SpinLock (r.3) - Supplies a pointer to a kernel spin lock.
+//
+// Return Value:
+//
+// If the spin lock is acquired, then a value of TRUE is returned.
+// Otherwise, a value of FALSE is returned.
+//
+//--
+
+ LEAF_ENTRY_S(KiTryToAcquireSpinLock,_TEXT$01)
+
+#if defined(NT_UP)
+
+ li r.3, TRUE // set return value (success)
+
+#else
+
+ lwz r.12, KiPcr+PcCurrentThread(r.0) // addr of current thread
+
+ TRY_TO_ACQUIRE_SPIN_LOCK(r.3, r.12, r.11, kitas.10, kitas.20)
+ li r.3, TRUE // set return value (success)
+
+ blr // return
+
+kitas.20:
+ li r.3, FALSE // set return value (failure)
+#endif
+ LEAF_EXIT(KiTryToAcquireSpinLock)
+
+
+#if !defined(NT_UP)
+#if SPINDBG
+ .struct 0
+ .space StackFrameHeaderLength
+kasdR5: .space 4 // saved R5
+kasdR6: .space 4 // saved R6
+kasdR7: .space 4 // saved R7
+kasdR8: .space 4 // saved R8
+ .align 3 // 8 byte align
+kasdFrameLength:
+
+ SPECIAL_ENTRY_S(KiAcquireSpinLockDbg,_TEXT$01)
+ stwu sp,-kasdFrameLength(sp)
+ stw r5,kasdR5(sp) // save r5
+ stw r6,kasdR6(sp) // save r6
+ stw r7,kasdR7(sp) // save r7
+ stw r8,kasdR8(sp) // save r8
+ PROLOGUE_END(KiAcquireSpinLockDbg)
+ lwz r5,[toc]KiSpinLockLimit(r.toc)
+ lwz r5,0(r5)
+ mflr r7
+ CHKBRK(r6,kasd.05)
+ CHKLOCK(r6,r3,kasd.10)
+ DBGSTORE_IRR(r6,r8,0x7711,r4,r7)
+kasd.10:
+ lwarx r6,0,r3 // load word at (r.3) and reserve
+ cmpwi r6,0 // check if locked
+ bne- kasd.20 // jif locked (predict NOT taken)
+ stwcx. r4,0,r3 // set lock owned
+ bne- kasd.20 // try again if store failed
+ isync // allow no readahead
+ CHKLOCK(r6,r3,kasd.11)
+ DBGSTORE_IRR(r6,r8,0x7712,r4,r7)
+kasd.11:
+ lwz r8,kasdR8(sp) // restore r8
+ lwz r7,kasdR7(sp) // restore r7
+ lwz r6,kasdR6(sp) // restore r6
+ lwz r5,kasdR5(sp) // restore r5
+ addi sp,sp,kasdFrameLength // deallocate stack frame
+ blr // return
+kasd.20:
+ subi r5,r5,1
+ cmpwi r5,0
+ beq- kasd.30
+kasd.25:
+ lwz r6,0(r3)
+ cmpwi r6,0
+ beq+ kasd.10
+ b kasd.20
+kasd.30:
+ //DBGSTORE_IRRR(r6,r8,0x7713,r3,r4,r7)
+ twi 31,0,0x16
+ lwz r5,[toc]KiSpinLockLimit(r.toc)
+ lwz r5,0(r5)
+ b kasd.25
+ DUMMY_EXIT(KiAcquireSpinLockDbg)
+
+ SPECIAL_ENTRY_S(KiTryToAcquireSpinLockDbg,_TEXT$01)
+ stwu sp,-kasdFrameLength(sp)
+ stw r5,kasdR5(sp) // save r5
+ stw r6,kasdR6(sp) // save r6
+ stw r7,kasdR7(sp) // save r7
+ stw r8,kasdR8(sp) // save r8
+ PROLOGUE_END(KiTryToAcquireSpinLockDbg)
+ lwz r5,[toc]KiSpinLockLimit(r.toc)
+ lwz r5,0(r5)
+ ori r6,r3,0
+ mflr r7
+ CHKBRK(r3,ktasd.05)
+ CHKLOCK(r3,r6,ktasd.10)
+ DBGSTORE_IRR(r3,r8,0x7721,r4,r7)
+ktasd.10:
+ lwarx r3,0,r6 // load word at (r.3) and reserve
+ cmpwi r3,0 // check if locked
+ bne- ktasd.40 // jif locked (predict NOT taken)
+ stwcx. r4,0,r6 // set lock owned
+ bne- ktasd.20 // try again if store failed
+ isync // allow no readahead
+ li r3,TRUE
+ktasd.15:
+ CHKLOCK(r8,r6,ktasd.16)
+ DBGSTORE_IRR(r6,r8,0x7722,r4,r7)
+ktasd.16:
+ lwz r8,kasdR8(sp) // restore r8
+ lwz r7,kasdR7(sp) // restore r7
+ lwz r6,kasdR6(sp) // restore r6
+ lwz r5,kasdR5(sp) // restore r5
+ addi sp,sp,kasdFrameLength // deallocate stack frame
+ blr // return
+ktasd.20:
+ subi r5,r5,1
+ cmpwi r5,0
+ bne+ ktasd.10
+ktasd.30:
+ //DBGSTORE_IRRR(r3,r8,0x7723,r3,r4,r7)
+ twi 31,0,0x16
+ lwz r5,[toc]KiSpinLockLimit(r.toc)
+ lwz r5,0(r5)
+ b ktasd.10
+ktasd.40:
+ li r3,FALSE
+ b ktasd.15
+ DUMMY_EXIT(KiTryToAcquireSpinLockDbg)
+#endif
+#endif
diff --git a/private/ntos/ke/ppc/table.stb b/private/ntos/ke/ppc/table.stb
new file mode 100644
index 000000000..971b68767
--- /dev/null
+++ b/private/ntos/ke/ppc/table.stb
@@ -0,0 +1,72 @@
+8 // This is the number of in register arguments
+//++
+//
+// Copyright (c) 1993 IBM Corporation and Microsoft Corporation
+//
+// Module Name:
+//
+// systable.s
+//
+// Abstract:
+//
+// This module implements the system service dispatch table.
+//
+// Author:
+//
+// Rick Simpson 16-Sep-1993
+//
+// based on MIPS version by David N. Cutler (davec) 29-Apr-1989
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+//--
+
+//
+// To add a system service simply add the name of the service to the below
+// table. If the system service has in memory arguments, then immediately
+// follow the name of the serice with a comma and following that the number
+// of bytes of in memory arguments, e.g. CreateObject,40.
+//
+
+#define TABLE_BEGIN1( t ) .rdata
+#define TABLE_BEGIN2( t ) .align 4
+#define TABLE_BEGIN3( t ) .globl KiServiceTable
+#define TABLE_BEGIN4( t ) .long .toc
+#define TABLE_BEGIN5( t ) KiServiceTable:
+#define TABLE_BEGIN6( t )
+#define TABLE_BEGIN7( t )
+#define TABLE_BEGIN8( t )
+
+#define TABLE_ENTRY(l,bias,numargs) \
+ .extern ..Nt##l ;\
+ .word ..Nt##l+bias
+
+#define TABLE_END( n ) \
+ .data ;\
+ .globl KiServiceLimit ;\
+KiServiceLimit: ;\
+ .word n + 1
+
+#define ARGTBL_BEGIN \
+ .rdata ;\
+ .align 4 ;\
+ .globl KiArgumentTable ;\
+KiArgumentTable:
+
+#define ARGTBL_ENTRY(e0,e1,e2,e3,e4,e5,e6,e7) .byte e0,e1,e2,e3,e4,e5,e6,e7
+
+#define ARGTBL_END
+
+
+ TABLE_BEGIN1( "System Service Dispatch Table" )
+ TABLE_BEGIN2( "System Service Dispatch Table" )
+ TABLE_BEGIN3( "System Service Dispatch Table" )
+ TABLE_BEGIN4( "System Service Dispatch Table" )
+ TABLE_BEGIN5( "System Service Dispatch Table" )
+ TABLE_BEGIN6( "System Service Dispatch Table" )
+ TABLE_BEGIN7( "System Service Dispatch Table" )
+ TABLE_BEGIN8( "System Service Dispatch Table" )
diff --git a/private/ntos/ke/ppc/threadbg.s b/private/ntos/ke/ppc/threadbg.s
new file mode 100644
index 000000000..d6843000a
--- /dev/null
+++ b/private/ntos/ke/ppc/threadbg.s
@@ -0,0 +1,219 @@
+// TITLE("Thread Startup")
+//++
+//
+// Copyright (c) 1990 Microsoft Corporation
+//
+// Module Name:
+//
+// threadbg.s
+//
+// Abstract:
+//
+// This module implements the PowerPC machine dependent code necessary to
+// startup a thread in kernel mode.
+//
+// Author:
+//
+// Peter L. Johnston (plj@vnet.ibm.com) 20-Sep-1993
+// Based on code by David N. Cutler (davec) 28-Mar-1990
+//
+// Environment:
+//
+// Kernel mode only, IRQL APC_LEVEL.
+//
+// Revision History:
+//
+//--
+
+#include "ksppc.h"
+
+ .extern ..KeBugCheck
+ .extern ..KiExceptionExit
+ .extern __imp_KeLowerIrql
+
+ SBTTL("Thread Startup")
+//++
+//
+// RoutineDescription:
+//
+// The following code is never executed. It's purpose is to allow the
+// kernel debugger to walk call frames backwards through thread startup
+// and to support get/set user context.
+//
+//--
+
+ .text // resume .text section
+
+
+ FN_TABLE(KiThreadDispatch,0,0)
+
+ DUMMY_ENTRY(KiThreadDispatch)
+
+ stwu r.sp, -STACK_DELTA (r.sp)
+ stw r.0, TrGpr0 + TF_BASE (r.sp)
+ mflr r.0
+ stw r.0, TrLr + TF_BASE (r.sp)
+ mflr r.0
+ stw r.0, EfLr (r.sp)
+ mfcr r.0
+ stw r.0, EfCr (r.sp)
+
+ stw r.2, TrGpr2 + TF_BASE(r.sp)
+ stw r.3, TrGpr3 + TF_BASE(r.sp)
+ stw r.4, TrGpr4 + TF_BASE(r.sp)
+ stw r.5, TrGpr5 + TF_BASE(r.sp)
+ stw r.6, TrGpr6 + TF_BASE(r.sp)
+ stw r.7, TrGpr7 + TF_BASE(r.sp)
+ stw r.8, TrGpr8 + TF_BASE(r.sp)
+ stw r.9, TrGpr9 + TF_BASE(r.sp)
+ stw r.10, TrGpr10 + TF_BASE(r.sp)
+ stw r.11, TrGpr11 + TF_BASE(r.sp)
+ stw r.12, TrGpr12 + TF_BASE(r.sp)
+
+ mfctr r.6 // Fixed Point Exception
+ mfxer r.7 // registers
+
+ stfd f.0, TrFpr0 + TF_BASE(r.sp) // save volatile FPRs
+ stfd f.1, TrFpr1 + TF_BASE(r.sp)
+ stfd f.2, TrFpr2 + TF_BASE(r.sp)
+ stfd f.3, TrFpr3 + TF_BASE(r.sp)
+ stfd f.4, TrFpr4 + TF_BASE(r.sp)
+ stfd f.5, TrFpr5 + TF_BASE(r.sp)
+ stfd f.6, TrFpr6 + TF_BASE(r.sp)
+ stfd f.7, TrFpr7 + TF_BASE(r.sp)
+ stfd f.8, TrFpr8 + TF_BASE(r.sp)
+ stfd f.9, TrFpr9 + TF_BASE(r.sp)
+ stfd f.10, TrFpr10 + TF_BASE(r.sp)
+ stfd f.11, TrFpr11 + TF_BASE(r.sp)
+ stfd f.12, TrFpr12 + TF_BASE(r.sp)
+ stfd f.13, TrFpr13 + TF_BASE(r.sp)
+ mffs f.0 // get Floating Point Status
+ // and Control Register (FPSCR)
+
+ stw r.6, TrCtr + TF_BASE(r.sp) // Count,
+ stw r.7, TrXer + TF_BASE(r.sp) // Fixed Point Exception,
+ stfd f.0, TrFpscr + TF_BASE(r.sp) // and FPSCR registers.
+
+ stw r.13, ExGpr13 + EF_BASE(r.sp) // save non-volatile GPRs
+ stw r.14, ExGpr14 + EF_BASE(r.sp)
+ stw r.15, ExGpr15 + EF_BASE(r.sp)
+ stw r.16, ExGpr16 + EF_BASE(r.sp)
+ stw r.17, ExGpr17 + EF_BASE(r.sp)
+ stw r.18, ExGpr18 + EF_BASE(r.sp)
+ stw r.19, ExGpr19 + EF_BASE(r.sp)
+ stw r.20, ExGpr20 + EF_BASE(r.sp)
+ stw r.21, ExGpr21 + EF_BASE(r.sp)
+ stw r.22, ExGpr22 + EF_BASE(r.sp)
+ stw r.23, ExGpr23 + EF_BASE(r.sp)
+ stw r.24, ExGpr24 + EF_BASE(r.sp)
+ stw r.25, ExGpr25 + EF_BASE(r.sp)
+ stw r.26, ExGpr26 + EF_BASE(r.sp)
+ stw r.27, ExGpr27 + EF_BASE(r.sp)
+ stw r.28, ExGpr28 + EF_BASE(r.sp)
+ stw r.29, ExGpr29 + EF_BASE(r.sp)
+ stw r.30, ExGpr30 + EF_BASE(r.sp)
+ stw r.31, ExGpr31 + EF_BASE(r.sp)
+
+ stfd f.14, ExFpr14 + EF_BASE(r.sp) // save non-volatile FPRs
+ stfd f.15, ExFpr15 + EF_BASE(r.sp)
+ stfd f.16, ExFpr16 + EF_BASE(r.sp)
+ stfd f.17, ExFpr17 + EF_BASE(r.sp)
+ stfd f.18, ExFpr18 + EF_BASE(r.sp)
+ stfd f.19, ExFpr19 + EF_BASE(r.sp)
+ stfd f.20, ExFpr20 + EF_BASE(r.sp)
+ stfd f.21, ExFpr21 + EF_BASE(r.sp)
+ stfd f.22, ExFpr22 + EF_BASE(r.sp)
+ stfd f.23, ExFpr23 + EF_BASE(r.sp)
+ stfd f.24, ExFpr24 + EF_BASE(r.sp)
+ stfd f.25, ExFpr25 + EF_BASE(r.sp)
+ stfd f.26, ExFpr26 + EF_BASE(r.sp)
+ stfd f.27, ExFpr27 + EF_BASE(r.sp)
+ stfd f.28, ExFpr28 + EF_BASE(r.sp)
+ stfd f.29, ExFpr29 + EF_BASE(r.sp)
+ stfd f.30, ExFpr30 + EF_BASE(r.sp)
+ stfd f.31, ExFpr31 + EF_BASE(r.sp)
+
+ PROLOGUE_END(KiThreadDispatch)
+
+//++
+//
+// Routine Description:
+//
+// This routine is called at thread startup. Its function is to call the
+// initial thread procedure. If control returns from the initial thread
+// procedure and a user mode context was established when the thread
+// was initialized, then the user mode context is restored and control
+// is transfered to user mode. Otherwise a bug check will occur.
+//
+// When this thread was created, a stack frame for this routine was
+// pushed onto the top of the thread's stack. Then a stack frame
+// for SwapContext was pushed onto the stack and initialized such
+// that SwapContext will return to the first instruction of this routine
+// when this thread is first switched to.
+//
+// Arguments:
+//
+// r.16 A boolean value that specifies whether a user mode thread
+// context was established when the thread was initialized.
+//
+// r.17 Starting context parameter for the initial thread.
+//
+// r.18 Starting address of the initial thread routine.
+//
+// r.19 Starting address of the initial system routine.
+//
+// r.20 Address of the (user mode) Trap Frame.
+//
+// Return Value:
+//
+// None.
+//
+//--
+
+ ALTERNATE_ENTRY(KiThreadStartup)
+
+ lwz r.sp, 0(r.sp) // unlink SwapContext's frame
+
+//
+// Pickup arguments - as this routine wasn't actually called by anything
+// we can use the non-volatile registers as we please.
+//
+
+ ori r.31, r.toc, 0 // save our TOC
+ li r.3, APC_LEVEL // lower IRQL to APC level
+ lwz r.4, [toc]__imp_KeLowerIrql(r.toc) // &&function descriptor
+ lwz r.4, 0(r.4) // &function descriptor
+ lwz r.5, 0(r.4) // &KeLowerIrql
+ lwz r.toc, 4(r.4) // HAL's TOC
+ mtctr r.5
+ bctrl
+ ori r.toc, r.31, 0 // restore our TOC
+
+ mtctr r.19 // set address of system routine
+ ori r.3, r.18, 0 // set address of thread routine
+ ori r.4, r.17, 0 // set startup context parameter
+ bctrl // call system startup routine
+ cmpwi r.16, 0 // check if user context
+ beq kts10 // jif none
+
+//
+// Finish in common exception exit code which will restore the nonvolatile
+// registers and exit to user mode.
+//
+
+ ori r.4, r.20, 0 // set trap frame address
+ addi r.3, r.20, TrapFrameLength // deduce exception frame addr
+
+ b ..KiExceptionExit // finish in exception exit code
+
+//
+// An attempt was made to enter user mode for a thread that has no user mode
+// context. Generate a bug check.
+//
+kts10:
+ li r.3,NO_USER_MODE_CONTEXT // set bug check code
+ bl ..KeBugCheck // call bug check routine
+
+KiThreadDispatch.end:
+
+
diff --git a/private/ntos/ke/ppc/thredini.c b/private/ntos/ke/ppc/thredini.c
new file mode 100644
index 000000000..fc23c88a7
--- /dev/null
+++ b/private/ntos/ke/ppc/thredini.c
@@ -0,0 +1,342 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ thredini.c
+
+Abstract:
+
+ This module implements the machine dependent functions to set the initial
+ context and data alignment handling mode of a thread object.
+
+Author:
+
+ David N. Cutler (davec) 1-Apr-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+ Sep 19, 1993 plj Conversion to IBM PowerPC
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macros are used to check that an input object is
+// really the proper type.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+
+VOID
+KiInitializeContextThread (
+ IN PKTHREAD Thread,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextRecord OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes the machine dependent context of a thread object.
+
+ Actually, what it does is to lay out the stack for the thread so that
+ it contains a stack frame that will be picked up by SwapContext and
+ returned thru, resulting in a transfer of control to KiThreadStartup.
+ In otherwords, we lay out a stack with a stack frame that looks as if
+ SwapContext had been called just before the first instruction in
+ KiThreadStartup.
+
+ N.B. This function does not check the accessibility of the context record.
+ It is assumed the the caller of this routine is either prepared to
+ handle access violations or has probed and copied the context record
+ as appropriate.
+
+ N.B. Arguments to the new thread are passed in the Swap Frame gprs
+ 16 thru 20 which will be loaded into the thread's gprs when
+ execution begins.
+
+ WARNING: If the thread has a user mode context the Top of stack MUST
+ be laid out identically to the top of stack laid out when an
+ interrupt or system call from user mode occurs.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextRecord - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKEXCEPTION_FRAME ExFrame;
+ ULONG InitialStack;
+ PKTRAP_FRAME TrFrame;
+ PKSWAP_FRAME SwFrame;
+ PSTACK_FRAME_HEADER StkFrame, ResumeFrame;
+
+ //
+ // If the initial stack address is in KSEG0, then the stack is not mapped
+ // and the kernel stack PTEs are set to zero. Otherwise, capture the PTEs
+ // that map the kernel stack.
+ //
+
+ InitialStack = (LONG)Thread->InitialStack;
+
+ //
+ // If a context frame is specified, then initialize a trap frame and
+ // and an exception frame with the specified user mode context.
+ //
+
+ if (ARGUMENT_PRESENT(ContextRecord)) {
+ ExFrame = (PKEXCEPTION_FRAME)(InitialStack - 8 -
+ sizeof(KEXCEPTION_FRAME));
+ TrFrame = (PKTRAP_FRAME)((ULONG)ExFrame - sizeof(KTRAP_FRAME));
+ StkFrame = (PSTACK_FRAME_HEADER)((ULONG)TrFrame - (8 * sizeof(ULONG)) -
+ sizeof(STACK_FRAME_HEADER));
+
+ //
+ // Zero the exception and trap frames and copy information from the
+ // specified context frame to the trap and exception frames.
+ //
+
+ RtlZeroMemory((PVOID)ExFrame, sizeof(KEXCEPTION_FRAME));
+ RtlZeroMemory((PVOID)TrFrame, sizeof(KTRAP_FRAME));
+ KeContextToKframes(TrFrame, ExFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags | CONTEXT_CONTROL,
+ UserMode);
+
+ //
+ // Set the saved previous processor mode in the trap frame and the
+ // previous processor mode in the thread object to user mode.
+ //
+
+ TrFrame->PreviousMode = UserMode;
+ Thread->PreviousMode = UserMode;
+
+ } else {
+
+ StkFrame = (PSTACK_FRAME_HEADER)((InitialStack -
+ sizeof(STACK_FRAME_HEADER)) & 0xfffffff0);
+ ExFrame = NULL;
+ TrFrame = NULL;
+
+ //
+ // Set the previous mode in thread object to kernel.
+ //
+
+ Thread->PreviousMode = KernelMode;
+ }
+
+ StkFrame->BackChain = (ULONG)0;
+
+ //
+ // Initialize the Swap Frame that swap context will use to
+ // initiate execution of this thread.
+ //
+
+ SwFrame = (PKSWAP_FRAME)(((ULONG)StkFrame -
+ sizeof(KSWAP_FRAME)) & 0xfffffff0);
+
+ //
+ // Initialize stack frame and set thread start up parameters.
+ //
+
+ if (ExFrame == NULL) {
+ SwFrame->ExceptionFrame.Gpr20 = (ULONG)ExFrame;
+ } else {
+ SwFrame->ExceptionFrame.Gpr20 = (ULONG)TrFrame;
+ }
+
+ SwFrame->ExceptionFrame.Gpr16 = (ULONG)ContextRecord;
+ SwFrame->ExceptionFrame.Gpr17 = (ULONG)StartContext;
+ SwFrame->ExceptionFrame.Gpr18 = (ULONG)StartRoutine;
+
+ //
+ // Pass the actual entry point addresses rather than the function
+ // descriptor's address.
+ //
+
+ SwFrame->ExceptionFrame.Gpr19 = *(ULONG *)SystemRoutine;
+
+ SwFrame->SwapReturn = *(ULONG *)KiThreadStartup;
+ SwFrame->ConditionRegister = 0;
+
+ //
+ // Buy a stack frame so we have the stack frame that will be
+ // current when SwapContext is running when we resume this
+ // thread.
+
+
+ ResumeFrame = ((PSTACK_FRAME_HEADER)SwFrame) - 1;
+
+ ResumeFrame->BackChain = (ULONG)StkFrame;
+
+ //
+ // Set the initial kernel stack pointer.
+ //
+
+ Thread->KernelStack = (PVOID)ResumeFrame;
+ ASSERT(!((ULONG)ResumeFrame & 0x7)); // ensure stack is align 8
+ return;
+}
+
+BOOLEAN
+KeSetAutoAlignmentProcess (
+ IN PKPROCESS Process,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ process and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the process. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Process->AutoAlignment;
+ Process->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
+
+BOOLEAN
+KeSetAutoAlignmentThread (
+ IN PKTHREAD Thread,
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the data alignment handling mode for the specified
+ thread and returns the previous data alignment handling mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Enable - Supplies a boolean value that determines the handling of data
+ alignment exceptions for the thread. A value of TRUE causes all
+ data alignment exceptions to be automatically handled by the kernel.
+ A value of FALSE causes all data alignment exceptions to be actually
+ raised as exceptions.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions were
+ previously automatically handled by the kernel. Otherwise, a value
+ of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ BOOLEAN Previous;
+
+ ASSERT_THREAD( Thread );
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the previous data alignment handling mode and set the
+ // specified data alignment mode.
+ //
+
+ Previous = Thread->AutoAlignment;
+ Thread->AutoAlignment = Enable;
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous data alignment mode.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Previous;
+}
diff --git a/private/ntos/ke/ppc/timindex.s b/private/ntos/ke/ppc/timindex.s
new file mode 100644
index 000000000..9f329b523
--- /dev/null
+++ b/private/ntos/ke/ppc/timindex.s
@@ -0,0 +1,175 @@
+// TITLE("Compute Timer Table Index")
+//++
+//
+// Copyright (c) 1993 Microsoft Corporation
+//
+// Module Name:
+//
+// timindex.s
+//
+// Abstract:
+//
+// This module implements the code necessary to compute the timer table
+// index for a timer.
+//
+// Author:
+//
+// David N. Cutler (davec) 17-May-1993
+//
+// Environment:
+//
+// Kernel mode only.
+//
+// Revision History:
+//
+// Sep 19, 1993 plj Conversion to IBM PowerPC
+//
+//--
+
+#include "ksppc.h"
+
+//
+// Define external vsriables that can be addressed using GP.
+//
+
+ .extern KiTimeIncrementReciprocal
+ .extern KiTimeIncrementShiftCount
+
+ SBTTL("Compute Timer Table Index")
+//++
+//
+// ULONG
+// KiComputeTimerTableIndex (
+// IN LARGE_INTEGER Interval,
+// IN LARGE_INTEGER CurrentTime,
+// IN PKTIMER Timer
+// )
+//
+// Routine Description:
+//
+// This function computes the timer table index for the specified timer
+// object and stores the due time in the timer object.
+//
+// N.B. The interval parameter is guaranteed to be negative since it is
+// expressed as relative time.
+//
+// The formula for due time calculation is:
+//
+// Due Time = Current time - Interval
+//
+// The formula for the index calculation is:
+//
+// Index = (Due Time / Maximum Time) & (Table Size - 1)
+//
+// The due time division is performed using reciprocal multiplication.
+//
+// Arguments:
+//
+// Interval (r.3, r.4) - Supplies the relative time at which the timer is
+// to expire.
+//
+// CurrentTime (r.5, r.6) - Supplies the current interrupt time.
+//
+// Timer (r.7) - Supplies a pointer to a dispatch object of type timer.
+//
+// Return Value:
+//
+// The time table index is returned as the function value and the due
+// time is stored in the timer object.
+//
+//--
+
+ LEAF_ENTRY(KiComputeTimerTableIndex)
+
+// Get addresses of KiTimeIncrementReciprocal and KiTimeIncrementShiftCounter
+
+ lwz r.11, [toc]KiTimeIncrementReciprocal(r.toc)
+ lwz r.10, [toc]KiTimeIncrementShiftCount(r.toc)
+
+ lwz r.12, 4(r.11) // get high part of magic divisor
+ lwz r.11, 0(r.11) // get low part of magic divisor
+
+// Calculate DueTime = CurrentTime - Interval, result in {r.3,r.4}
+
+ subfc r.3, r.3, r.5 // subtract low parts (with carry)
+ subfe r.4, r.4, r.6 // subtract high parts (using carry)
+
+ lbz r.10, 0(r.10) // get shift count
+
+ stw r.3, TiDueTime(r.7) // set due time of timer object
+ stw r.4, TiDueTime+4(r.7)
+
+//
+// Compute low 32-bits of dividend times low 32-bits of divisor.
+//
+ mulhwu r.0, r.3, r.11
+
+//
+// Compute low 32-bits of dividend times high 32-bits of divisor.
+//
+ mullw r.6, r.3, r.12 // low 32-bits to r.6
+ mulhwu r.7, r.3, r.12 // high 32-bits to r.7
+
+//
+// Compute high 32-bits of dividend times low 32-bits of divisor.
+//
+ mullw r.8, r.4, r.11 // low 32-bits to r.8
+ mulhwu r.9, r.4, r.11 // high 32-bits to r.9
+
+//
+// Compute high 32-bits of dividend times high 32-bits of divisor.
+//
+ mullw r.3, r.4, r.12 // low 32-bits to r.3
+ mulhwu r.4, r.4, r.12 // high 32-bits to r.4
+
+//
+// On the grounds that I can't do more than double precision arithmetic
+// without visual aids, I will attempt to draw a picture of what is
+// going on here,.... my apologies to those who don't need the pic.
+//
+//
+// _________________________________________________________________
+// | | | | |
+// | | | Due Time Low * Recip Low |
+// | | | r.0 - |
+// |---------------------------------------------------------------|
+// | | Due Time Low * Recip High | |
+// | | r.7 r.6 | |
+// |------------------------------------------------ |
+// | | Due Time High * Recip Low | |
+// | | r.9 r.8 | |
+// |------------------------------------------------ |
+// | Due Time High * Recip High | | |
+// | r.4 r.3 | | |
+// |---------------------------------------------------------------|
+// | | | | |
+// | x | y | z | |
+// |----------------------------------------------------------------
+//
+
+//
+// Add partial results to form high 64-bits of result.
+//
+// (add 3 low parts together (r.0, r.6 and r.8) generating carries.
+// the carries are added to the sum of the high parts (r.7, r.9 and r.3),
+// the sum of the low parts is discarded).
+//
+ addc r.0, r.6, r.0 // z = r.0 + r.6
+ adde r.7, r.9, r.7 // y = r.7 + r.9 + carry from z
+ addze r.4, r.4 // x += carry from y
+ addc r.0, r.8, r.0 // z += r.8
+ adde r.3, r.3, r.7 // y += r.3 + carry from z
+ addze r.4, r.4 // x += carry from y
+
+//
+// Combine low part of x with high part of y
+//
+// N.B. It is assumed that the shift count is less than 32-bits and not zero.
+//
+ subfic r.11, r.10, 32 // compute left shift count
+ srw r.3, r.3, r.10 // get high part of low part
+ slw r.4, r.4, r.11 // get low part of high word
+ or r.3, r.4, r.3 // combine upper low and lower high
+ rlwinm r.3, r.3, 0, TIMER_TABLE_SIZE - 1
+
+ LEAF_EXIT(KiComputeTimerTableIndex)
diff --git a/private/ntos/ke/ppc/vdm.c b/private/ntos/ke/ppc/vdm.c
new file mode 100644
index 000000000..cc36cc6e0
--- /dev/null
+++ b/private/ntos/ke/ppc/vdm.c
@@ -0,0 +1,54 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ VDM.C
+
+Abstract:
+
+ This routine has a stub for the x86 only api NtStartVdmExecution.
+
+Author:
+
+ Dave Hastings (daveh) 2 Apr 1991
+
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+
+
+NTSTATUS
+NtInitializeVDM(
+ VOID
+ )
+{
+ return STATUS_SUCCESS;
+}
+
+NTSTATUS
+NtVdmStartExecution (
+ )
+
+/*++
+
+Routine Description:
+
+ This routine returns STATUS_NOT_IMPLEMENTED
+
+Arguments:
+
+Return Value:
+
+ STATUS_NOT_IMPLEMENTED
+--*/
+{
+
+ return STATUS_NOT_IMPLEMENTED;
+
+}
diff --git a/private/ntos/ke/procobj.c b/private/ntos/ke/procobj.c
new file mode 100644
index 000000000..850918b1b
--- /dev/null
+++ b/private/ntos/ke/procobj.c
@@ -0,0 +1,858 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ procobj.c
+
+Abstract:
+
+ This module implements the machine independent functions to manipulate
+ the kernel process object. Functions are provided to initilaize, attach,
+ detach, exclude, include, and set the base priority of process objects.
+
+Author:
+
+ David N. Cutler (davec) 7-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGE, KeInitializeProcess)
+#endif
+
+
+//
+// Define forward referenced function prototypes.
+//
+
+VOID
+KiAttachProcess (
+ IN PKPROCESS Process,
+ IN KIRQL OldIrql
+ );
+
+VOID
+KiMoveApcState (
+ IN PKAPC_STATE Source,
+ OUT PKAPC_STATE Destination
+ );
+
+//
+// The following assert macro is used to check that an input process is
+// really a kprocess and not something else, like deallocated pool.
+//
+
+#define ASSERT_PROCESS(E) { \
+ ASSERT((E)->Header.Type == ProcessObject); \
+}
+
+
+VOID
+KeInitializeProcess (
+ IN PRKPROCESS Process,
+ IN KPRIORITY BasePriority,
+ IN KAFFINITY Affinity,
+ IN ULONG DirectoryTableBase[2],
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel process object. The base priority,
+ affinity, and page frame numbers for the process page table directory
+ and hyper space are stored in the process object.
+
+ N.B. It is assumed that the process object is zeroed.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ BasePriority - Supplies the base priority of the process.
+
+ Affinity - Supplies the set of processors on which children threads
+ of the process can execute.
+
+ DirectoryTableBase - Supplies a pointer to an array whose fist element
+ is the value that is to be loaded into the Directory Table Base
+ register when a child thread is dispatched for execution and whose
+ second element contains the page table entry that maps hyper space.
+
+ Enable - Supplies a boolean value that determines the default
+ handling of data alignment exceptions for child threads. A value
+ of TRUE causes all data alignment exceptions to be automatically
+ handled by the kernel. A value of FALSE causes all data alignment
+ exceptions to be actually raised as exceptions.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize the standard dispatcher object header and set the initial
+ // signal state of the process object.
+ //
+
+ Process->Header.Type = ProcessObject;
+ Process->Header.Size = sizeof(KPROCESS) / sizeof(LONG);
+ InitializeListHead(&Process->Header.WaitListHead);
+
+ //
+ // Initialize the base priority, affinity, directory table base values,
+ // autoalignment, and stack count.
+ //
+ // N.B. The distinguished value MAXSHORT is used to signify that no
+ // threads have been created for the process.
+ //
+
+ Process->BasePriority = (SCHAR)BasePriority;
+ Process->Affinity = Affinity;
+ Process->AutoAlignment = Enable;
+ Process->DirectoryTableBase[0] = DirectoryTableBase[0];
+ Process->DirectoryTableBase[1] = DirectoryTableBase[1];
+ Process->StackCount = MAXSHORT;
+
+ //
+ // Initialize the stack count, profile listhead, ready queue list head,
+ // accumulated runtime, process quantum, thread quantum, and thread list
+ // head.
+ //
+
+ InitializeListHead(&Process->ProfileListHead);
+ InitializeListHead(&Process->ReadyListHead);
+ InitializeListHead(&Process->ThreadListHead);
+ Process->ThreadQuantum = THREAD_QUANTUM;
+
+ //
+ // Initialize the process state and set the thread processor selection
+ // seed.
+ //
+
+ Process->State = ProcessInMemory;
+ Process->ThreadSeed = (UCHAR)KiQueryLowTickCount();
+
+ //
+ // Initialize Ldt descriptor for this process (i386 only)
+ //
+
+#ifdef i386
+
+ //
+ // Initialize IopmBase and Iopl flag for this process (i386 only)
+ //
+
+ Process->IopmOffset = KiComputeIopmOffset(IO_ACCESS_MAP_NONE);
+
+#endif
+
+ return;
+}
+
+VOID
+KeAttachProcess (
+ IN PRKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This function attaches a thread to a target process' address space.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+
+ ASSERT_PROCESS(Process);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Attach target process.
+ //
+
+ KiAttachProcess(Process, OldIrql);
+ return;
+}
+
+BOOLEAN
+KeTryToAttachProcess (
+ IN PRKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This function tries to attach a thread to a target process' address
+ space. If the target process is in memory or out of memory, then the
+ target process is attached. Otherwise, it is not attached.
+
+ N.B. If the target process state is out of memory, then the caller
+ must have all pages for the process in memory. This function is
+ intended for use by the memory management system.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+Return Value:
+
+ If the target process state is not in transistion, then the target
+ process is atached and a value of TRUE is returned. Otherwise, a
+ value of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+
+ ASSERT_PROCESS(Process);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the target process state is not in transition, then set the
+ // target process state to in memory, attach the process, and return
+ // a value of TRUE. Otherwise, unlock the dispatcher database and
+ // return a value of FALSE.
+ //
+
+ if (Process->State != ProcessInTransition) {
+ Process->State = ProcessInMemory;
+ KiAttachProcess(Process, OldIrql);
+ return TRUE;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ return FALSE;
+ }
+}
+
+VOID
+KeDetachProcess (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function detaches a thread from another process' address space.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the current thread is attached to another address, then detach
+ // it.
+ //
+
+ if (Thread->ApcStateIndex != 0) {
+
+ //
+ // Check if a kernel APC is in progress, the kernel APC queue is
+ // not empty, or the user APC queue is not empty. If any of these
+ // conditions are true, then call bug check.
+ //
+
+#if DBG
+
+ if ((Thread->ApcState.KernelApcInProgress) ||
+ (IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]) == FALSE) ||
+ (IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]) == FALSE)) {
+ KeBugCheck(INVALID_PROCESS_DETACH_ATTEMPT);
+ }
+
+#endif
+
+ //
+ // Unbias current process stack count and check if the process should
+ // be swapped out of memory.
+ //
+
+ Process = Thread->ApcState.Process;
+ Process->StackCount -= 1;
+ if (Process->StackCount == 0) {
+ Process->State = ProcessInTransition;
+ InsertTailList(&KiProcessOutSwapListHead, &Process->SwapListEntry);
+ KiSwapEvent.Header.SignalState = 1;
+ if (IsListEmpty(&KiSwapEvent.Header.WaitListHead) == FALSE) {
+ KiWaitTest(&KiSwapEvent, BALANCE_INCREMENT);
+ }
+ }
+
+ //
+ // Restore APC state and check whether the kernel APC queue contains
+ // an entry. If the kernel APC queue contains an entry then set kernel
+ // APC pending and request a software interrupt at APC_LEVEL.
+ //
+
+ KiMoveApcState(&Thread->SavedApcState, &Thread->ApcState);
+ Thread->SavedApcState.Process = (PKPROCESS)NULL;
+ Thread->ApcStatePointer[0] = &Thread->ApcState;
+ Thread->ApcStatePointer[1] = &Thread->SavedApcState;
+ Thread->ApcStateIndex = 0;
+ if (IsListEmpty(&Thread->ApcState.ApcListHead[KernelMode]) == FALSE) {
+ Thread->ApcState.KernelApcPending = TRUE;
+ KiRequestSoftwareInterrupt(APC_LEVEL);
+ }
+
+ //
+ // Swap the address space back to the parent process.
+ //
+
+ KiSwapProcess(Thread->ApcState.Process, Process);
+
+ }
+
+ //
+ // Lower IRQL to its previous value and return.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+LONG
+KeReadStateProcess (
+ IN PRKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a process object.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+Return Value:
+
+ The current signal state of the process object.
+
+--*/
+
+{
+
+ ASSERT_PROCESS(Process);
+
+ //
+ // Return current signal state of process object.
+ //
+
+ return Process->Header.SignalState;
+}
+
+LONG
+KeSetProcess (
+ IN PRKPROCESS Process,
+ IN KPRIORITY Increment,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the signal state of a proces object to Signaled
+ and attempts to satisfy as many Waits as possible. The previous
+ signal state of the process object is returned as the function value.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Increment - Supplies the priority increment that is to be applied
+ if setting the process causes a Wait to be satisfied.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KeSetProcess will be immediately followed by a call to one of the
+ kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the process object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+ PRKTHREAD Thread;
+
+ ASSERT_PROCESS(Process);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the previous state of the process object is Not-Signaled and
+ // the wait queue is not empty, then satisfy as many Waits as
+ // possible.
+ //
+
+ OldState = Process->Header.SignalState;
+ Process->Header.SignalState = 1;
+ if ((OldState == 0) && (!IsListEmpty(&Process->Header.WaitListHead))) {
+ KiWaitTest(Process, Increment);
+ }
+
+ //
+ // If the value of the Wait argument is TRUE, then return to the
+ // caller with IRQL raised and the dispatcher database locked. Else
+ // release the dispatcher database lock and lower IRQL to its
+ // previous value.
+ //
+
+ if (Wait) {
+ Thread = KeGetCurrentThread();
+ Thread->WaitNext = Wait;
+ Thread->WaitIrql = OldIrql;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ //
+ // Return previous signal state of process object.
+ //
+
+ return OldState;
+}
+
+KPRIORITY
+KeSetPriorityProcess (
+ IN PKPROCESS Process,
+ IN KPRIORITY NewBase
+ )
+
+/*++
+
+Routine Description:
+
+ This function set the base priority of a process to a new value
+ and adjusts the priority and base priority of all child threads
+ as appropriate.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ NewBase - Supplies the new base priority of the process.
+
+Return Value:
+
+ The previous base priority of the process.
+
+--*/
+
+{
+
+ KPRIORITY Adjustment;
+ PLIST_ENTRY NextEntry;
+ KPRIORITY NewPriority;
+ KIRQL OldIrql;
+ KPRIORITY OldBase;
+ PKTHREAD Thread;
+
+ ASSERT_PROCESS(Process);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Save the current process base priority, set the new process base
+ // priority, compute the adjustment value, and adjust the priority
+ // and base priority of all child threads as appropriate.
+ //
+
+ OldBase = Process->BasePriority;
+ Process->BasePriority = (SCHAR)NewBase;
+ Adjustment = NewBase - OldBase;
+ NextEntry = Process->ThreadListHead.Flink;
+ if (NewBase >= LOW_REALTIME_PRIORITY) {
+ while (NextEntry != &Process->ThreadListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, ThreadListEntry);
+
+ //
+ // Compute the new base priority of the thread.
+ //
+
+ NewPriority = Thread->BasePriority + Adjustment;
+
+ //
+ // If the new base priority is outside the realtime class,
+ // then limit the change to the realtime class.
+ //
+
+ if (NewPriority < LOW_REALTIME_PRIORITY) {
+ NewPriority = LOW_REALTIME_PRIORITY;
+
+ } else if (NewPriority > HIGH_PRIORITY) {
+ NewPriority = HIGH_PRIORITY;
+ }
+
+ //
+ // Set the base priority and the current priority of the
+ // thread to the computed value and reset the thread quantum.
+ //
+ // N.B. If priority saturation occured the last time the thread
+ // base priority was set and the new process base priority
+ // is not crossing from variable to realtime, then the thread
+ // priority is not changed.
+ //
+
+ if ((Thread->Saturation == FALSE) || (OldBase < LOW_REALTIME_PRIORITY)) {
+ Thread->BasePriority = (SCHAR)NewPriority;
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->DecrementCount = 0;
+ Thread->PriorityDecrement = 0;
+ Thread->Saturation = FALSE;
+ KiSetPriorityThread(Thread, NewPriority);
+ }
+
+ NextEntry = NextEntry->Flink;
+ }
+
+ } else {
+ while (NextEntry != &Process->ThreadListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, ThreadListEntry);
+
+ //
+ // Compute the new base priority of the thread.
+ //
+
+ NewPriority = Thread->BasePriority + Adjustment;
+
+ //
+ // If the new base priority is outside the variable class,
+ // then limit the change to the variable class.
+ //
+
+ if (NewPriority >= LOW_REALTIME_PRIORITY) {
+ NewPriority = LOW_REALTIME_PRIORITY - 1;
+
+ } else if (NewPriority <= LOW_PRIORITY) {
+ NewPriority = 1;
+ }
+
+ //
+ // Set the base priority and the current priority of the
+ // thread to the computed value and reset the thread quantum.
+ //
+ // N.B. If priority saturation occured the last time the thread
+ // base priority was set and the new process base priority
+ // is not crossing from realtime to variable, then the thread
+ // priority is not changed.
+ //
+
+ if ((Thread->Saturation == FALSE) || (OldBase >= LOW_REALTIME_PRIORITY)) {
+ Thread->BasePriority = (SCHAR)NewPriority;
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->DecrementCount = 0;
+ Thread->PriorityDecrement = 0;
+ Thread->Saturation = FALSE;
+ KiSetPriorityThread(Thread, NewPriority);
+ }
+
+ NextEntry = NextEntry->Flink;
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return previous process base priority
+ //
+
+ return OldBase;
+}
+
+VOID
+KiAttachProcess (
+ IN PKPROCESS Process,
+ IN KIRQL OldIrql
+ )
+
+/*++
+
+Routine Description:
+
+ This function attaches a thread to a target process' address space.
+
+ N.B. The dispatcher database lock must be held when this routine is
+ called.
+
+Arguments:
+
+ Process - Supplies a pointer to a dispatcher object of type process.
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ OldIrql - Supplies the previous IRQL.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKTHREAD Thread;
+ KAFFINITY Processor;
+
+ //
+ // Get the address of the current thread object.
+ //
+
+ Thread = KeGetCurrentThread();
+
+ //
+ // Check whether there is already a process address space attached or
+ // the thread is executing a DPC. If either condition is true, then call
+ // bug check.
+ //
+
+ if (Process != Thread->ApcState.Process) {
+ if ((Thread->ApcStateIndex != 0) ||
+ (KeIsExecutingDpc() != FALSE)) {
+ KeBugCheckEx(
+ INVALID_PROCESS_ATTACH_ATTEMPT,
+ (ULONG)Process,
+ (ULONG)Thread->ApcState.Process,
+ (ULONG)Thread->ApcStateIndex,
+ (ULONG)KeIsExecutingDpc()
+ );
+ }
+ }
+
+ //
+ // If the target process is the same as the current process, then
+ // there is no need to attach the address space. Otherwise, attach
+ // the current thread to the target thread address space.
+ //
+
+ if (Process == Thread->ApcState.Process) {
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ } else {
+
+ //
+ // Bias the stack count of the target process to signify that a
+ // thread exists in that process with a stack that is resident.
+ //
+
+ Process->StackCount += 1;
+
+ //
+ // Save current APC state and initialize A new APC state.
+ //
+
+ KiMoveApcState(&Thread->ApcState, &Thread->SavedApcState);
+ InitializeListHead(&Thread->ApcState.ApcListHead[KernelMode]);
+ InitializeListHead(&Thread->ApcState.ApcListHead[UserMode]);
+ Thread->ApcState.Process = Process;
+ Thread->ApcState.KernelApcInProgress = FALSE;
+ Thread->ApcState.KernelApcPending = FALSE;
+ Thread->ApcState.UserApcPending = FALSE;
+ Thread->ApcStatePointer[0] = &Thread->SavedApcState;
+ Thread->ApcStatePointer[1] = &Thread->ApcState;
+ Thread->ApcStateIndex = 1;
+
+ //
+ // If the target process is in memory, then immediately enter the
+ // new address space by loading a new Directory Table Base. Otherwise,
+ // insert the current thread in the target process ready list, inswap
+ // the target process if necessary, select a new thread to run on the
+ // the current processor and context switch to the new thread.
+ //
+
+ if (Process->State == ProcessInMemory) {
+ KiSwapProcess(Process, Thread->SavedApcState.Process);
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ } else {
+ Thread->State = Ready;
+ Thread->ProcessReadyQueue = TRUE;
+ InsertTailList(&Process->ReadyListHead, &Thread->WaitListEntry);
+ if (Process->State == ProcessOutOfMemory) {
+ Process->State = ProcessInTransition;
+ InsertTailList(&KiProcessInSwapListHead, &Process->SwapListEntry);
+ KiSwapEvent.Header.SignalState = 1;
+ if (IsListEmpty(&KiSwapEvent.Header.WaitListHead) == FALSE) {
+ KiWaitTest(&KiSwapEvent, BALANCE_INCREMENT);
+ }
+ }
+
+ //
+ // Clear the active processor bit in the previous process and
+ // set active processor bit in the process being attached to.
+ //
+
+#if !defined(NT_UP)
+
+ Processor = KeGetCurrentPrcb()->SetMember;
+ Thread->SavedApcState.Process->ActiveProcessors &= ~Processor;
+ Process->ActiveProcessors |= Processor;
+
+#endif
+
+ Thread->WaitIrql = OldIrql;
+
+ KiSwapThread();
+
+ }
+ }
+
+ return;
+}
+
+VOID
+KiMoveApcState (
+ IN PKAPC_STATE Source,
+ OUT PKAPC_STATE Destination
+ )
+
+/*++
+
+Routine Description:
+
+ This function moves the APC state from the source structure to the
+ destination structure and reinitializes list headers as appropriate.
+
+Arguments:
+
+ Source - Supplies a pointer to the source APC state structure.
+
+ Destination - Supplies a pointer to the destination APC state structure.
+
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY First;
+ PLIST_ENTRY Last;
+
+ //
+ // Copy the APC state from the source to the destination.
+ //
+
+#if defined(_M_PPC) && (_MSC_VER >= 1000)
+ KIRQL OldIrql;
+ KeRaiseIrql(HIGH_LEVEL, &OldIrql);
+#endif
+
+ *Destination = *Source;
+
+#if defined(_M_PPC) && (_MSC_VER >= 1000)
+ KeLowerIrql(OldIrql);
+#endif
+
+ if (IsListEmpty(&Source->ApcListHead[KernelMode]) != FALSE) {
+ InitializeListHead(&Destination->ApcListHead[KernelMode]);
+
+ } else {
+ First = Source->ApcListHead[KernelMode].Flink;
+ Last = Source->ApcListHead[KernelMode].Blink;
+ Destination->ApcListHead[KernelMode].Flink = First;
+ Destination->ApcListHead[KernelMode].Blink = Last;
+ First->Blink = &Destination->ApcListHead[KernelMode];
+ Last->Flink = &Destination->ApcListHead[KernelMode];
+ }
+
+ if (IsListEmpty(&Source->ApcListHead[UserMode]) != FALSE) {
+ InitializeListHead(&Destination->ApcListHead[UserMode]);
+
+ } else {
+ First = Source->ApcListHead[UserMode].Flink;
+ Last = Source->ApcListHead[UserMode].Blink;
+ Destination->ApcListHead[UserMode].Flink = First;
+ Destination->ApcListHead[UserMode].Blink = Last;
+ First->Blink = &Destination->ApcListHead[UserMode];
+ Last->Flink = &Destination->ApcListHead[UserMode];
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/profobj.c b/private/ntos/ke/profobj.c
new file mode 100644
index 000000000..dba09aacc
--- /dev/null
+++ b/private/ntos/ke/profobj.c
@@ -0,0 +1,807 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ profobj.c
+
+Abstract:
+
+ This module implements the kernel Profile Object. Functions are
+ provided to initialize, start, and stop profile objects and to set
+ and query the profile interval.
+
+Author:
+
+ Bryan M. Willman (bryanwi) 19-Sep-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input profile object is
+// really a kprofile and not something else, like deallocated pool.
+//
+
+#define ASSERT_PROFILE(E) { \
+ ASSERT((E)->Type == ProfileObject); \
+}
+
+//
+// Structure representing an active profile source
+//
+typedef struct _KACTIVE_PROFILE_SOURCE {
+ LIST_ENTRY ListEntry;
+ KPROFILE_SOURCE Source;
+ KAFFINITY Affinity;
+ ULONG ProcessorCount[1]; // variable-sized, one per processor
+} KACTIVE_PROFILE_SOURCE, *PKACTIVE_PROFILE_SOURCE;
+
+//
+// Prototypes for IPI target functions
+//
+VOID
+KiStartProfileInterrupt (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+VOID
+KiStopProfileInterrupt (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ );
+
+
+VOID
+KeInitializeProfile (
+ IN PKPROFILE Profile,
+ IN PKPROCESS Process OPTIONAL,
+ IN PVOID RangeBase,
+ IN ULONG RangeSize,
+ IN ULONG BucketSize,
+ IN ULONG Segment,
+ IN KPROFILE_SOURCE ProfileSource,
+ IN KAFFINITY ProfileAffinity
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel profile object. The process,
+ address range, bucket size, and buffer are set. The profile is
+ set to the stopped state.
+
+Arguments:
+
+ Profile - Supplies a pointer to control object of type profile.
+
+ Process - Supplies an optional pointer to a process object that
+ describes the address space to profile. If not specified,
+ then all address spaces are included in the profile.
+
+ RangeBase - Supplies the address of the first byte of the address
+ range for which profiling information is to be collected.
+
+ RangeSize - Supplies the size of the address range for which profiling
+ information is to be collected. The RangeBase and RangeSize
+ parameters are interpreted such that RangeBase <= address <
+ RangeBase + RangeSize generates a profile hit.
+
+ BucketSize - Supplies the log base 2 of the size of a profiling bucket.
+ Thus, BucketSize = 2 yields 4-byte buckets, BucketSize = 7 yields
+ 128-byte buckets.
+
+ Segment - Supplies the non-Flat code segment to profile. If this
+ is zero, then the flat profiling is done. This will only
+ be non-zero on an x86 machine.
+
+ ProfileSource - Supplies the profile interrupt source.
+
+ ProfileAffinity - Supplies the set of processor to count hits for.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+#if !defined(i386)
+
+ ASSERT(Segment == 0);
+
+#endif
+
+ //
+ // Initialize the standard control object header.
+ //
+
+ Profile->Type = ProfileObject;
+ Profile->Size = sizeof(KPROFILE);
+
+ //
+ // Initialize the process address space, range base, range limit,
+ // bucket shift count, and set started FALSE.
+ //
+
+ if (ARGUMENT_PRESENT(Process)) {
+ Profile->Process = Process;
+
+ } else {
+ Profile->Process = NULL;
+ }
+
+ Profile->RangeBase = RangeBase;
+ Profile->RangeLimit = (PUCHAR)RangeBase + RangeSize;
+ Profile->BucketShift = BucketSize - 2;
+ Profile->Started = FALSE;
+ Profile->Segment = Segment;
+ Profile->Source = ProfileSource;
+ if (ProfileAffinity != 0) {
+ Profile->Affinity = ProfileAffinity & KeActiveProcessors;
+ } else {
+ Profile->Affinity = KeActiveProcessors;
+ }
+ return;
+}
+
+ULONG
+KeQueryIntervalProfile (
+ IN KPROFILE_SOURCE ProfileSource
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the profile sample interval the system is
+ currently using.
+
+Arguments:
+
+ ProfileSource - Supplies the profile source to be queried.
+
+Return Value:
+
+ Sample interval in units of 100ns.
+
+--*/
+
+{
+
+ HAL_PROFILE_SOURCE_INFORMATION ProfileSourceInfo;
+ ULONG ReturnedLength;
+ NTSTATUS Status;
+
+ if (ProfileSource == ProfileTime) {
+
+ //
+ // Return the current sampling interval in 100ns units.
+ //
+
+ return KiProfileInterval;
+
+ } else if (ProfileSource == ProfileAlignmentFixup) {
+ return(KiProfileAlignmentFixupInterval);
+
+ } else {
+
+ //
+ // The HAL is responsible for tracking this profile interval.
+ //
+
+ ProfileSourceInfo.Source = ProfileSource;
+ Status = HalQuerySystemInformation(HalProfileSourceInformation,
+ sizeof(HAL_PROFILE_SOURCE_INFORMATION),
+ &ProfileSourceInfo,
+ &ReturnedLength);
+ if (NT_SUCCESS(Status) && ProfileSourceInfo.Supported) {
+ return(ProfileSourceInfo.Interval);
+
+ } else {
+ return 0;
+ }
+ }
+}
+
+VOID
+KeSetIntervalProfile (
+ IN ULONG Interval,
+ IN KPROFILE_SOURCE Source
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the profile sampling interval. The interval is in
+ 100ns units. The interval will actually be set to some value in a set
+ of preset values (at least on pc based hardware), using the one closest
+ to what the user asked for.
+
+Arguments:
+
+ Interval - Supplies the length of the sampling interval in 100ns units.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ HAL_PROFILE_SOURCE_INTERVAL ProfileSourceInterval;
+
+ if (Source == ProfileTime) {
+
+ //
+ // If the specified sampling interval is less than the minimum
+ // sampling interval, then set the sampling interval to the minimum
+ // sampling interval.
+ //
+
+ if (Interval < MINIMUM_PROFILE_INTERVAL) {
+ Interval = MINIMUM_PROFILE_INTERVAL;
+ }
+
+ //
+ // Set the sampling interval.
+ //
+
+ KiProfileInterval = KiIpiGenericCall(HalSetProfileInterval, Interval);
+
+ } else if (Source == ProfileAlignmentFixup) {
+ KiProfileAlignmentFixupInterval = Interval;
+
+ } else {
+
+ //
+ // The HAL is responsible for setting this profile interval.
+ //
+
+ ProfileSourceInterval.Source = Source;
+ ProfileSourceInterval.Interval = Interval;
+ HalSetSystemInformation(HalProfileSourceInterval,
+ sizeof(HAL_PROFILE_SOURCE_INTERVAL),
+ &ProfileSourceInterval);
+ }
+
+ return;
+}
+
+BOOLEAN
+KeStartProfile (
+ IN PKPROFILE Profile,
+ IN PULONG Buffer
+ )
+
+/*++
+
+Routine Description:
+
+ This function starts profile data gathering on the specified profile
+ object. The profile object is marked started, and is registered with
+ the profile interrupt procedure.
+
+ If the number of active profile objects was previously zero, then the
+ profile interrupt is enabled.
+
+ N.B. For the current implementation, an arbitrary number of profile
+ objects may be active at once. This can present a large system
+ overhead. It is assumed that the caller appropriately limits the
+ the number of active profiles.
+
+Arguments:
+
+ Profile - Supplies a pointer to a control object of type profile.
+
+ Buffer - Supplies a pointer to an array of counters, which record
+ the number of hits in the corresponding bucket.
+
+Return Value:
+
+ A value of TRUE is returned if profiling was previously stopped for
+ the specified profile object. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql, OldIrql2;
+ PKPROCESS Process;
+ BOOLEAN Started;
+ KAFFINITY TargetProcessors;
+ PKPRCB Prcb;
+ PKACTIVE_PROFILE_SOURCE ActiveSource = NULL;
+ PKACTIVE_PROFILE_SOURCE CurrentActiveSource;
+ PKACTIVE_PROFILE_SOURCE AllocatedPool;
+ PLIST_ENTRY ListEntry;
+ ULONG SourceSize;
+ KAFFINITY AffinitySet;
+ PULONG Reference;
+
+ ASSERT_PROFILE(Profile);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Allocate pool that may be required before raising to PROFILE_LEVEL.
+ //
+
+ SourceSize = sizeof(KACTIVE_PROFILE_SOURCE) + sizeof(ULONG) *
+ (KeNumberProcessors - 1);
+ AllocatedPool = ExAllocatePool(NonPagedPool, SourceSize);
+ if (AllocatedPool == NULL) {
+ return(TRUE);
+ }
+
+ //
+ // Raise to dispatch level
+ //
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ Prcb = KeGetCurrentPrcb();
+
+ //
+ // Raise IRQL to PROFILE_LEVEL and acquire the profile lock.
+ //
+
+ KeRaiseIrql(KiProfileIrql, &OldIrql2);
+ KiAcquireSpinLock(&KiProfileLock);
+
+ //
+ // Assume object already started
+ //
+
+ Started = FALSE;
+ AffinitySet = 0L;
+ TargetProcessors = 0L;
+
+ //
+ // If the specified profile object is not started, set started to TRUE,
+ // set the address of the profile buffer, set the profile object to started,
+ // insert the profile object in the appropriate profile list, and start
+ // profile interrupts if the number of active profile objects was previously zero.
+ //
+
+ if (Profile->Started == FALSE) {
+
+ Started = TRUE;
+ Profile->Buffer = Buffer;
+ Profile->Started = TRUE;
+ Process = Profile->Process;
+ if (Process != NULL) {
+ InsertTailList(&Process->ProfileListHead, &Profile->ProfileListEntry);
+
+ } else {
+ InsertTailList(&KiProfileListHead, &Profile->ProfileListEntry);
+ }
+
+ //
+ // Check the profile source list to see if this profile source is
+ // already started. If so, update the reference counts. If not,
+ // allocate a profile source object, initialize the reference
+ // counts, and add it to the list.
+ //
+
+ ListEntry = KiProfileSourceListHead.Flink;
+ while (ListEntry != &KiProfileSourceListHead) {
+ CurrentActiveSource = CONTAINING_RECORD(ListEntry,
+ KACTIVE_PROFILE_SOURCE,
+ ListEntry);
+
+ if (CurrentActiveSource->Source == Profile->Source) {
+ ActiveSource = CurrentActiveSource;
+ break;
+ }
+ ListEntry = ListEntry->Flink;
+ }
+
+ if (ActiveSource == NULL) {
+
+ //
+ // This source was not found, allocate and initialize a new entry and add
+ // it to the head of the list.
+ //
+
+ ActiveSource = AllocatedPool;
+ AllocatedPool = NULL;
+ RtlZeroMemory(ActiveSource, SourceSize);
+ ActiveSource->Source = Profile->Source;
+ InsertHeadList(&KiProfileSourceListHead, &ActiveSource->ListEntry);
+ if (Profile->Source == ProfileAlignmentFixup) {
+ KiProfileAlignmentFixup = TRUE;
+ }
+ }
+
+ //
+ // Increment the reference counts for each processor in the
+ // affinity set.
+ //
+
+ AffinitySet = Profile->Affinity;
+ Reference = &ActiveSource->ProcessorCount[0];
+ while (AffinitySet != 0) {
+ if (AffinitySet & 1) {
+ *Reference = *Reference + 1;
+ }
+
+ AffinitySet = AffinitySet >> 1;
+ Reference = Reference + 1;
+ }
+
+ //
+ // Compute the processors which the profile interrupt is
+ // required and not already started
+ //
+
+ AffinitySet = Profile->Affinity & ~ActiveSource->Affinity;
+ TargetProcessors = AffinitySet & ~Prcb->SetMember;
+
+ //
+ // Update set of processors on which this source is active.
+ //
+
+ ActiveSource->Affinity |= Profile->Affinity;
+ }
+
+ //
+ // Release the profile lock, lower IRQL to its previous value, and
+ // return whether profiling was started.
+ //
+
+ KiReleaseSpinLock(&KiProfileLock);
+ KeLowerIrql(OldIrql2);
+
+ //
+ // Start profile interrupt on pending processors
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiStartProfileInterrupt,
+ (PVOID)Profile->Source,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ if (AffinitySet & Prcb->SetMember) {
+ HalStartProfileInterrupt(Profile->Source);
+ }
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower to original IRQL
+ //
+
+ KeLowerIrql(OldIrql);
+
+ //
+ // If the allocated pool was not used, free it now.
+ //
+
+ if (AllocatedPool != NULL) {
+ ExFreePool(AllocatedPool);
+ }
+
+ return Started;
+}
+
+BOOLEAN
+KeStopProfile (
+ IN PKPROFILE Profile
+ )
+
+/*++
+
+Routine Description:
+
+ This function stops profile data gathering on the specified profile
+ object. The object is marked stopped, and is removed from the active
+ profile list.
+
+ If the number of active profile objects goes to zero, then the profile
+ interrupt is disabled.
+
+Arguments:
+
+ Profile - Supplies a pointer to a control object of type profile.
+
+Return Value:
+
+ A value of TRUE is returned if profiling was previously started for
+ the specified profile object. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ KIRQL OldIrql, OldIrql2;
+ BOOLEAN Stopped;
+ KAFFINITY TargetProcessors;
+ PKPRCB Prcb;
+ BOOLEAN StopInterrupt = TRUE;
+ PLIST_ENTRY ListEntry;
+ PKACTIVE_PROFILE_SOURCE ActiveSource;
+ PKACTIVE_PROFILE_SOURCE PoolToFree=NULL;
+ KAFFINITY AffinitySet = 0;
+ KAFFINITY CurrentProcessor;
+ PULONG Reference;
+
+ ASSERT_PROFILE(Profile);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise to disaptch level
+ //
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ Prcb = KeGetCurrentPrcb();
+
+ //
+ // Raise IRQL to PROFILE_LEVEL and acquire the profile lock.
+ //
+
+ KeRaiseIrql(KiProfileIrql, &OldIrql2);
+ KiAcquireSpinLock(&KiProfileLock);
+
+ //
+ // Assume object already stopped
+ //
+
+ Stopped = FALSE;
+ AffinitySet = 0L;
+ TargetProcessors = 0L;
+
+ //
+ // If the specified profile object is not stopped, set stopped to TRUE, set
+ // the profile object to stopped, remove the profile object object from the
+ // appropriate profilelist, and stop profile interrupts if the number of
+ // active profile objects is zero.
+ //
+
+ if (Profile->Started != FALSE) {
+
+ Stopped = TRUE;
+ Profile->Started = FALSE;
+ RemoveEntryList(&Profile->ProfileListEntry);
+
+ //
+ // Search the profile source list to find the entry for this
+ // profile source.
+ //
+
+ ListEntry = KiProfileSourceListHead.Flink;
+ do {
+ ASSERT(ListEntry != &KiProfileSourceListHead);
+ ActiveSource = CONTAINING_RECORD(ListEntry,
+ KACTIVE_PROFILE_SOURCE,
+ ListEntry);
+ ListEntry = ListEntry->Flink;
+ } while ( ActiveSource->Source != Profile->Source );
+
+ //
+ // Decrement the reference counts for each processor in the
+ // affinity set and build up a mask of the processors that
+ // now have a reference count of zero.
+ //
+
+ CurrentProcessor = 1;
+ TargetProcessors = 0;
+ AffinitySet = Profile->Affinity;
+ Reference = &ActiveSource->ProcessorCount[0];
+ while (AffinitySet != 0) {
+ if (AffinitySet & 1) {
+ *Reference = *Reference - 1;
+ if (*Reference == 0) {
+ TargetProcessors = TargetProcessors | CurrentProcessor;
+ }
+ }
+
+ AffinitySet = AffinitySet >> 1;
+ Reference = Reference + 1;
+ CurrentProcessor = CurrentProcessor << 1;
+ }
+
+ //
+ // Compute the processors whose profile interrupt reference
+ // count has dropped to zero.
+ //
+
+ AffinitySet = TargetProcessors;
+ TargetProcessors = AffinitySet & ~Prcb->SetMember;
+
+ //
+ // Update set of processors on which this source is active.
+ //
+
+ ActiveSource->Affinity &= ~AffinitySet;
+
+ //
+ // Determine whether this profile source is stopped on all
+ // processors. If so, remove it from the list and free it.
+ //
+
+ if (ActiveSource->Affinity == 0) {
+ RemoveEntryList(&ActiveSource->ListEntry);
+ PoolToFree = ActiveSource;
+ if (Profile->Source == ProfileAlignmentFixup) {
+ KiProfileAlignmentFixup = FALSE;
+ }
+ }
+ }
+
+ //
+ // Release the profile lock, lower IRQL to its previous value, and
+ // return whether profiling was stopped.
+ //
+
+ KiReleaseSpinLock(&KiProfileLock);
+ KeLowerIrql(OldIrql2);
+
+ //
+ // Stop profile interrupt on pending processors
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiStopProfileInterrupt,
+ (PVOID)Profile->Source,
+ NULL,
+ NULL);
+ }
+
+#endif
+
+ if (AffinitySet & Prcb->SetMember) {
+ HalStopProfileInterrupt(Profile->Source);
+ }
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower to original IRQL
+ //
+
+ KeLowerIrql (OldIrql);
+
+ //
+ // Now that IRQL has been lowered, free the profile source if
+ // necessary.
+ //
+
+ if (PoolToFree != NULL) {
+ ExFreePool(PoolToFree);
+ }
+
+ return Stopped;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiStopProfileInterrupt (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for stopping the profile interrupt on target
+ processors.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed
+
+ Parameter1 - Supplies the profile source
+
+ Parameter2 - Parameter3 - not used
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KPROFILE_SOURCE ProfileSource;
+
+ //
+ // Stop the profile interrupt on the current processor and clear the
+ // data cache packet address to signal the source to continue.
+ //
+
+ ProfileSource = (KPROFILE_SOURCE)Parameter1;
+ HalStopProfileInterrupt(ProfileSource);
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+VOID
+KiStartProfileInterrupt (
+ IN PKIPI_CONTEXT SignalDone,
+ IN PVOID Parameter1,
+ IN PVOID Parameter2,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This is the target function for stopping the profile interrupt on target
+ processors.
+
+Arguments:
+
+ SignalDone - Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed
+
+ Parameter1 - Supplies the profile source
+
+ Parameter2 - Parameter3 - not used
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KPROFILE_SOURCE ProfileSource;
+
+ //
+ // Start the profile interrupt on the current processor and clear the
+ // data cache packet address to signal the source to continue.
+ //
+
+ ProfileSource = (KPROFILE_SOURCE)Parameter1;
+ HalStartProfileInterrupt(ProfileSource);
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+#endif
diff --git a/private/ntos/ke/queueobj.c b/private/ntos/ke/queueobj.c
new file mode 100644
index 000000000..bb0300bd9
--- /dev/null
+++ b/private/ntos/ke/queueobj.c
@@ -0,0 +1,814 @@
+/*++
+
+Copyright (c) 1993 Microsoft Corporation
+
+Module Name:
+
+ queueobj.c
+
+Abstract:
+
+ This module implements the kernel queue object. Functions are
+ provided to initialize, read, insert, and remove queue objects.
+
+Author:
+
+ David N. Cutler (davec) 31-Dec-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input event is
+// really a kernel event and not something else, like deallocated pool.
+//
+
+#define ASSERT_QUEUE(Q) ASSERT((Q)->Header.Type == QueueObject);
+
+VOID
+KeInitializeQueue (
+ IN PRKQUEUE Queue,
+ IN ULONG Count OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel queue object.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type event.
+
+ Count - Supplies the target maximum number of threads that should
+ be concurrently active. If this parameter is not specified,
+ then the number of processors is used.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard dispatcher object header and set initial
+ // state of queue object.
+ //
+
+ Queue->Header.Type = QueueObject;
+ Queue->Header.Size = sizeof(KQUEUE) / sizeof(LONG);
+ Queue->Header.SignalState = 0;
+ InitializeListHead(&Queue->Header.WaitListHead);
+
+ //
+ // Initialize queue listhead, the thread list head, the current number
+ // of threads, and the target maximum number of threads.
+ //
+
+ InitializeListHead(&Queue->EntryListHead);
+ InitializeListHead(&Queue->ThreadListHead);
+ Queue->CurrentCount = 0;
+ if (ARGUMENT_PRESENT(Count)) {
+ Queue->MaximumCount = Count;
+
+ } else {
+ Queue->MaximumCount = KeNumberProcessors;
+ }
+
+ return;
+}
+
+LONG
+KeReadStateQueue (
+ IN PRKQUEUE Queue
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a Queue object.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+Return Value:
+
+ The current signal state of the Queue object.
+
+--*/
+
+{
+
+ ASSERT_QUEUE(Queue);
+
+ //
+ // Return current signal state of Queue object.
+ //
+
+ return Queue->Header.SignalState;
+}
+
+LONG
+KeInsertQueue (
+ IN PRKQUEUE Queue,
+ IN PLIST_ENTRY Entry
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts the specified entry in the queue object entry
+ list and attempts to satisfy the wait of a single waiter.
+
+ N.B. The wait discipline for Queue object is LIFO.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+ Entry - Supplies a pointer to a list entry that is inserted in the
+ queue object entry list.
+
+Return Value:
+
+ The previous signal state of the Queue object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+
+ ASSERT_QUEUE(Queue);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Insert the specified entry in the queue object entry list.
+ //
+
+ OldState = KiInsertQueue(Queue, Entry, FALSE);
+
+ //
+ // Unlock the dispather database, lower IRQL to the previous level, and
+ // return signal state of Queue object.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return OldState;
+}
+
+LONG
+KeInsertHeadQueue (
+ IN PRKQUEUE Queue,
+ IN PLIST_ENTRY Entry
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts the specified entry in the queue object entry
+ list and attempts to satisfy the wait of a single waiter.
+
+ N.B. The wait discipline for Queue object is LIFO.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+ Entry - Supplies a pointer to a list entry that is inserted in the
+ queue object entry list.
+
+Return Value:
+
+ The previous signal state of the Queue object.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ LONG OldState;
+
+ ASSERT_QUEUE(Queue);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Insert the specified entry in the queue object entry list.
+ //
+
+ OldState = KiInsertQueue(Queue, Entry, TRUE);
+
+ //
+ // Unlock the dispather database, lower IRQL to the previous level, and
+ // return signal state of Queue object.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return OldState;
+}
+
+PLIST_ENTRY
+KeRemoveQueue (
+ IN PRKQUEUE Queue,
+ IN KPROCESSOR_MODE WaitMode,
+ IN PLARGE_INTEGER Timeout OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function removes the next entry from the Queue object entry
+ list. If no list entry is available, then the calling thread is
+ put in a wait state.
+
+ N.B. The wait discipline for Queue object LIFO.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+ Timeout - Supplies a pointer to an optional absolute of relative time over
+ which the wait is to occur.
+
+Return Value:
+
+ The address of the entry removed from the Queue object entry list or
+ STATUS_TIMEOUT.
+
+ N.B. These values can easily be distinguished by the fact that all
+ addresses in kernel mode have the high order bit set.
+
+--*/
+
+{
+
+ LARGE_INTEGER NewTime;
+ PLIST_ENTRY Entry;
+ PRKTHREAD NextThread;
+ KIRQL OldIrql;
+ PRKQUEUE OldQueue;
+ PLARGE_INTEGER OriginalTime;
+ PRKTHREAD Thread;
+ PRKTIMER Timer;
+ PRKWAIT_BLOCK WaitBlock;
+ NTSTATUS WaitStatus;
+
+ ASSERT_QUEUE(Queue);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // If the dispatcher database lock is not already held, then set the wait
+ // IRQL and lock the dispatcher database. Else set boolean wait variable
+ // to FALSE.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Thread->WaitNext) {
+ Thread->WaitNext = FALSE;
+
+ } else {
+ KiLockDispatcherDatabase(&OldIrql);
+ Thread->WaitIrql = OldIrql;
+ }
+
+ //
+ // Check if the thread is currently processing a queue entry and whether
+ // the new queue is the same as the old queue.
+ //
+
+ OldQueue = Thread->Queue;
+ Thread->Queue = Queue;
+ if (Queue != OldQueue) {
+
+ //
+ // If the thread was previously associated with a queue, then remove
+ // the thread from the old queue object thread list and attempt to
+ // activate another thread.
+ //
+
+ Entry = &Thread->QueueListEntry;
+ if (OldQueue != NULL) {
+ RemoveEntryList(Entry);
+ KiActivateWaiterQueue(OldQueue);
+ }
+
+ //
+ // Insert thread in the thread list of the new queue that the thread
+ // will be associate with.
+ //
+
+ InsertTailList(&Queue->ThreadListHead, Entry);
+
+ } else {
+
+ //
+ // The previous and current queue are the same queue - decrement the
+ // current number of threads.
+ //
+
+ Queue->CurrentCount -= 1;
+ }
+
+ //
+ //
+ // Start of wait loop.
+ //
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the
+ // middle of the wait or a kernel APC is pending on the first attempt
+ // through the loop.
+ //
+ // If the Queue object entry list is not empty, then remove the next
+ // entry from the Queue object entry list. Otherwise, wait for an entry
+ // to be inserted in the queue.
+ //
+
+ OriginalTime = Timeout;
+ do {
+
+ //
+ // Check if there is a queue entry available and the current
+ // number of active threads is less than target maximum number
+ // of threads.
+ //
+
+ Entry = Queue->EntryListHead.Flink;
+ if ((Entry != &Queue->EntryListHead) &&
+ (Queue->CurrentCount < Queue->MaximumCount)) {
+
+ //
+ // Decrement the number of entires in the Queue object entry list,
+ // increment the number of active threads, remove the next entry
+ // rom the list, and set the forward link to NULL.
+ //
+
+ Queue->Header.SignalState -= 1;
+ Queue->CurrentCount += 1;
+ if ((Entry->Flink == NULL) || (Entry->Blink == NULL)) {
+ KeBugCheckEx(INVALID_WORK_QUEUE_ITEM,
+ (ULONG)Entry,
+ (ULONG)Queue,
+ (ULONG)&ExWorkerQueue[0],
+ (ULONG)((PWORK_QUEUE_ITEM)Entry)->WorkerRoutine);
+ }
+
+ RemoveEntryList(Entry);
+ Entry->Flink = NULL;
+ break;
+
+ } else {
+
+ //
+ // Set address of wait block list in thread object.
+ //
+
+ Thread->WaitBlockList = &Thread->WaitBlock[0];
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor
+ // just after IRQL was raised to DISPATCH_LEVEL, but before the
+ // dispatcher database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (Thread->ApcState.KernelApcPending && (Thread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Increment the current thread count, unlock the dispatcher
+ // database, and lower IRQL to previous value. An APC interrupt
+ // will immediately occur which will result in the delivery of
+ // the kernel APC if possible.
+ //
+
+ Queue->CurrentCount += 1;
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+
+ } else {
+
+ //
+ // Test if a user APC is pending.
+ //
+
+ if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending)) {
+ Entry = (PLIST_ENTRY)STATUS_USER_APC;
+ Queue->CurrentCount += 1;
+ break;
+ }
+
+ //
+ // Construct a wait block and check to determine if the wait
+ // is already satisfied. If the wait is satisfied, then perform
+ // wait completion and return. Else put current thread in a
+ // wait state if an explicit timeout value of zero is not
+ // specified.
+ //
+
+ Thread->WaitStatus = (NTSTATUS)0;
+ WaitBlock = &Thread->WaitBlock[0];
+ WaitBlock->Object = (PVOID)Queue;
+ WaitBlock->WaitKey = (CSHORT)(STATUS_SUCCESS);
+ WaitBlock->WaitType = WaitAny;
+ WaitBlock->Thread = Thread;
+
+ //
+ // Check to determine if a timeout value is specified.
+ //
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // If the timeout value is zero, then return immediately
+ // without waiting.
+ //
+
+ if (!(Timeout->LowPart | Timeout->HighPart)) {
+ Entry = (PLIST_ENTRY)STATUS_TIMEOUT;
+ Queue->CurrentCount += 1;
+ break;
+ }
+
+ //
+ // Initialize a wait block for the thread specific timer,
+ // insert wait block in timer wait list, insert the timer
+ // in the timer tree.
+ //
+
+ Timer = &Thread->Timer;
+ WaitBlock->NextWaitBlock = &Thread->WaitBlock[1];
+ WaitBlock = &Thread->WaitBlock[1];
+ WaitBlock->Object = (PVOID)Timer;
+ WaitBlock->WaitKey = (CSHORT)(STATUS_TIMEOUT);
+ WaitBlock->WaitType = WaitAny;
+ WaitBlock->Thread = Thread;
+ Timer->Header.WaitListHead.Flink = &WaitBlock->WaitListEntry;
+ Timer->Header.WaitListHead.Blink = &WaitBlock->WaitListEntry;
+ WaitBlock->WaitListEntry.Flink = &Timer->Header.WaitListHead;
+ WaitBlock->WaitListEntry.Blink = &Timer->Header.WaitListHead;
+ if (KiInsertTreeTimer(Timer, *Timeout) == FALSE) {
+ Entry = (PLIST_ENTRY)STATUS_TIMEOUT;
+ Queue->CurrentCount += 1;
+ break;
+ }
+ }
+
+ //
+ // Close up the circular list of wait control blocks.
+ //
+
+ WaitBlock->NextWaitBlock = &Thread->WaitBlock[0];
+
+ //
+ // Insert wait block in object wait list.
+ //
+
+ WaitBlock = &Thread->WaitBlock[0];
+ InsertTailList(&Queue->Header.WaitListHead, &WaitBlock->WaitListEntry);
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher
+ // state to Waiting, and insert the thread in the wait list.
+ //
+
+ Thread->Alertable = FALSE;
+ Thread->WaitMode = WaitMode;
+ Thread->WaitReason = WrQueue;
+ Thread->WaitTime = KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(Thread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ Thread->WaitReason = 0;
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return (PLIST_ENTRY)WaitStatus;
+ }
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // Reduce the amount of time remaining before timeout occurs.
+ //
+
+ Timeout = KiComputeWaitInterval(Timer, OriginalTime, &NewTime);
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL, lock the dispatcher database,
+ // and decrement the count of active threads.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ Thread->WaitIrql = OldIrql;
+ Queue->CurrentCount -= 1;
+ }
+
+ } while (TRUE);
+
+ //
+ // Unlock the dispatcher database and return the list entry address or a
+ // status of timeout.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return Entry;
+}
+
+PLIST_ENTRY
+KeRundownQueue (
+ IN PRKQUEUE Queue
+ )
+
+/*++
+
+Routine Description:
+
+ This function runs down the specified queue by removing the listhead
+ from the queue list, removing any associated threads from the thread
+ list, and returning the address of the first entry.
+
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+Return Value:
+
+ If the queue list is not empty, then the address of the first entry in
+ the queue is returned as the function value. Otherwise, a value of NULL
+ is returned.
+
+--*/
+
+{
+
+ PLIST_ENTRY Entry;
+ PLIST_ENTRY FirstEntry;
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ ASSERT_QUEUE(Queue);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatch level and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Get the address of the first entry in the queue and check if the
+ // list is empty or contains entries that should be flushed. If there
+ // are no entries in the list, then set the return value to NULL.
+ // Otherwise, set the return value to the address of the first list
+ // entry and remove the listhead from the list.
+ //
+
+ FirstEntry = Queue->EntryListHead.Flink;
+ if (FirstEntry == &Queue->EntryListHead) {
+ FirstEntry = NULL;
+
+ } else {
+ RemoveEntryList(&Queue->EntryListHead);
+ }
+
+ //
+ // Remove all associated threads from the thread list of the queue.
+ //
+
+ while (Queue->ThreadListHead.Flink != &Queue->ThreadListHead) {
+ Entry = Queue->ThreadListHead.Flink;
+ Thread = CONTAINING_RECORD(Entry, KTHREAD, QueueListEntry);
+ Thread->Queue = NULL;
+ RemoveEntryList(Entry);
+ }
+
+ //
+ // Unlock the dispatcher database, lower IRQL to its previous level,
+ // and return the function value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return FirstEntry;
+}
+
+VOID
+FASTCALL
+KiActivateWaiterQueue (
+ IN PRKQUEUE Queue
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called when the current thread is about to enter a
+ wait state and is currently processing a queue entry. The current
+ number of threads processign entries for the queue is decrement and
+ an attempt is made to activate another thread if the current count
+ is less than the maximum count, there is a waiting thread, and the
+ queue is not empty.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type event.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRLIST_ENTRY Entry;
+ PRKTHREAD Thread;
+ PRKWAIT_BLOCK WaitBlock;
+ PRLIST_ENTRY WaitEntry;
+
+ //
+ // Decrement the current count of active threads and check if another
+ // thread can be activated. If the current number of active threads is
+ // less than the target maximum number of threads, there is a entry in
+ // in the queue, and a thread is waiting, then remove the entry from the
+ // queue, decrement the number of entries in the queue, and unwait the
+ // respectiive thread.
+ //
+
+ Queue->CurrentCount -= 1;
+ if (Queue->CurrentCount < Queue->MaximumCount) {
+ Entry = Queue->EntryListHead.Flink;
+ WaitEntry = Queue->Header.WaitListHead.Blink;
+ if ((Entry != &Queue->EntryListHead) &&
+ (WaitEntry != &Queue->Header.WaitListHead)) {
+ RemoveEntryList(Entry);
+ Entry->Flink = NULL;
+ Queue->Header.SignalState -= 1;
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ Thread = WaitBlock->Thread;
+ KiUnwaitThread(Thread, (NTSTATUS)Entry, 0);
+ }
+ }
+
+ return;
+}
+
+LONG
+FASTCALL
+KiInsertQueue (
+ IN PRKQUEUE Queue,
+ IN PLIST_ENTRY Entry,
+ IN BOOLEAN Head
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts the specified entry in the queue object entry
+ list and attempts to satisfy the wait of a single waiter.
+
+ N.B. The wait discipline for Queue object is LIFO.
+
+Arguments:
+
+ Queue - Supplies a pointer to a dispatcher object of type Queue.
+
+ Entry - Supplies a pointer to a list entry that is inserted in the
+ queue object entry list.
+
+ Head - Supplies a boolean value that determines whether the queue
+ entry is inserted at the head or tail of the queue if it can
+ not be immediately dispatched.
+
+Return Value:
+
+ The previous signal state of the Queue object.
+
+--*/
+
+{
+
+ LONG OldState;
+ PRKTHREAD Thread;
+ PKTIMER Timer;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+
+ ASSERT_QUEUE(Queue);
+
+ //
+ // Capture the current signal state of queue object and check if there
+ // is a thread waiting on the queue object, the current number of active
+ // threads is less than the target number of threads, and the wait reason
+ // of the current thread is not queue wait or the wait queue is not the
+ // same queue as the insertion queue. If these conditions are satisfied,
+ // then satisfy the thread wait and pass the thread the address of the
+ // queue entry as the wait status. Otherwise, set the state of the queue
+ // object to signaled and insert the specified entry in the queue object
+ // entry list.
+ //
+
+ OldState = Queue->Header.SignalState;
+ Thread = KeGetCurrentThread();
+ WaitEntry = Queue->Header.WaitListHead.Blink;
+ if ((WaitEntry != &Queue->Header.WaitListHead) &&
+ (Queue->CurrentCount < Queue->MaximumCount) &&
+ ((Thread->Queue != Queue) ||
+ (Thread->WaitReason != WrQueue))) {
+
+ //
+ // Remove the last wait block from the wait list and get the address
+ // of the waiting thread object.
+ //
+
+ RemoveEntryList(WaitEntry);
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ Thread = WaitBlock->Thread;
+
+ //
+ // Set the wait completion status, remove the thread from its wait
+ // list, increment the number of active threads, and clear the wait
+ // reason.
+ //
+
+ Thread->WaitStatus = (NTSTATUS)Entry;
+ RemoveEntryList(&Thread->WaitListEntry);
+ Queue->CurrentCount += 1;
+ Thread->WaitReason = 0;
+
+ //
+ // If thread timer is still active, then cancel thread timer.
+ //
+
+ Timer = &Thread->Timer;
+ if (Timer->Header.Inserted == TRUE) {
+ KiRemoveTreeTimer(Timer);
+ }
+
+ //
+ // Ready the thread for execution.
+ //
+
+ KiReadyThread(Thread);
+
+ } else {
+ Queue->Header.SignalState += 1;
+ if (Head != FALSE) {
+ InsertHeadList(&Queue->EntryListHead, Entry);
+
+ } else {
+ InsertTailList(&Queue->EntryListHead, Entry);
+ }
+ }
+
+ return OldState;
+}
diff --git a/private/ntos/ke/raisexcp.c b/private/ntos/ke/raisexcp.c
new file mode 100644
index 000000000..1d9716ebe
--- /dev/null
+++ b/private/ntos/ke/raisexcp.c
@@ -0,0 +1,264 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ raisexcp.c
+
+Abstract:
+
+ This module implements the internal kernel code to continue execution
+ and raise a exception.
+
+Author:
+
+ David N. Cutler (davec) 8-Aug-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+KiContinue (
+ IN PCONTEXT ContextRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to copy the specified context frame to the
+ specified exception and trap frames for the continue system service.
+
+Arguments:
+
+ ContextRecord - Supplies a pointer to a context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+Return Value:
+
+ STATUS_ACCESS_VIOLATION is returned if the context record is not readable
+ from user mode.
+
+ STATUS_DATATYPE_MISALIGNMENT is returned if the context record is not
+ properly aligned.
+
+ STATUS_SUCCESS is returned if the context frame is copied successfully
+ to the specified exception and trap frames.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord2;
+ KPROCESSOR_MODE PreviousMode;
+ NTSTATUS Status;
+ KIRQL OldIrql;
+ BOOLEAN IrqlChanged = FALSE;
+
+ //
+ // Synchronize with other context operations.
+ //
+
+ Status = STATUS_SUCCESS;
+ if (KeGetCurrentIrql() < APC_LEVEL) {
+
+ //
+ // To support try-except and ExRaiseStatus in device driver code we
+ // need to check if we are already at raised level.
+ //
+
+ IrqlChanged = TRUE;
+ KeRaiseIrql(APC_LEVEL, &OldIrql);
+ }
+
+ //
+ // Establish an exception handler and probe and capture the specified
+ // context record if the previous mode is user. If the probe or copy
+ // fails, then return the exception code as the function value. Else
+ // copy the context record to the specified exception and trap frames,
+ // and return success as the function value.
+ //
+
+ try {
+
+ //
+ // Get the previous processor mode. If the previous processor mode is
+ // user, then probe and copy the specified context record.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(ContextRecord, sizeof(CONTEXT), CONTEXT_ALIGN);
+ RtlMoveMemory(&ContextRecord2, ContextRecord, sizeof(CONTEXT));
+ ContextRecord = &ContextRecord2;
+ }
+
+ //
+ // Move information from the context record to the exception and
+ // trap frames.
+ //
+
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags,
+ PreviousMode);
+
+ //
+ // If an exception occurs during the probe or copy of the context
+ // record, then always handle the exception and return the exception
+ // code as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Status = GetExceptionCode();
+ }
+
+ if (IrqlChanged) {
+ KeLowerIrql (OldIrql);
+ }
+
+ return Status;
+}
+
+NTSTATUS
+KiRaiseException (
+ IN PEXCEPTION_RECORD ExceptionRecord,
+ IN PCONTEXT ContextRecord,
+ IN PKEXCEPTION_FRAME ExceptionFrame,
+ IN PKTRAP_FRAME TrapFrame,
+ IN BOOLEAN FirstChance
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called to raise an exception. The exception can be
+ raised as a first or second chance exception.
+
+Arguments:
+
+ ExceptionRecord - Supplies a pointer to an exception record.
+
+ ContextRecord - Supplies a pointer to a context record.
+
+ ExceptionFrame - Supplies a pointer to an exception frame.
+
+ TrapFrame - Supplies a pointer to a trap frame.
+
+ FirstChance - Supplies a boolean value that specifies whether this is
+ the first (TRUE) or second (FALSE) chance for the exception.
+
+Return Value:
+
+ STATUS_ACCESS_VIOLATION is returned if either the exception or the context
+ record is not readable from user mode.
+
+ STATUS_DATATYPE_MISALIGNMENT is returned if the exception record or the
+ context record are not properly aligned.
+
+ STATUS_INVALID_PARAMETER is returned if the number of exception parameters
+ is greater than the maximum allowable number of exception parameters.
+
+ STATUS_SUCCESS is returned if the exception is dispatched and handled.
+
+--*/
+
+{
+
+ CONTEXT ContextRecord2;
+ EXCEPTION_RECORD ExceptionRecord2;
+ LONG Length;
+ KPROCESSOR_MODE PreviousMode;
+
+ //
+ // Establish an exception handler and probe the specified exception and
+ // context records for read accessibility. If the probe fails, then
+ // return the exception code as the service status. Else call the exception
+ // dispatcher to dispatch the exception.
+ //
+
+ try {
+
+ //
+ // Get the previous processor mode. If the previous processor mode
+ // is user, then probe and copy the specified exception and context
+ // records.
+ //
+
+ PreviousMode = KeGetPreviousMode();
+ if (PreviousMode != KernelMode) {
+ ProbeForRead(ContextRecord, sizeof(CONTEXT), CONTEXT_ALIGN);
+ Length = ExceptionRecord->NumberParameters;
+ if (Length > EXCEPTION_MAXIMUM_PARAMETERS) {
+ return STATUS_INVALID_PARAMETER;
+ }
+
+ Length = (sizeof(EXCEPTION_RECORD) +
+ ((Length - EXCEPTION_MAXIMUM_PARAMETERS) * sizeof(ULONG)));
+
+ ProbeForRead(ExceptionRecord, Length, sizeof(ULONG));
+
+ //
+ // Copy the exception and context record to local storage so an
+ // access violation cannot occur during exception dispatching.
+ //
+
+ RtlMoveMemory(&ContextRecord2, ContextRecord, sizeof(CONTEXT));
+ RtlMoveMemory(&ExceptionRecord2, ExceptionRecord, Length);
+ ContextRecord = &ContextRecord2;
+ ExceptionRecord = &ExceptionRecord2;
+ }
+
+ //
+ // If an exception occurs during the probe of the exception or context
+ // record, then always handle the exception and return the exception code
+ // as the status value.
+ //
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ return GetExceptionCode();
+ }
+
+ //
+ // Move information from the context record to the exception and
+ // trap frames.
+ //
+
+ KeContextToKframes(TrapFrame,
+ ExceptionFrame,
+ ContextRecord,
+ ContextRecord->ContextFlags,
+ PreviousMode);
+
+ //
+ // Make sure the reserved bit is clear in the exception code and
+ // perform exception dispatching.
+ //
+ // N.B. The reserved bit is used to differentiate internally gerarated
+ // codes from codes generated by application programs.
+ //
+
+ ExceptionRecord->ExceptionCode &= 0xefffffff;
+ KiDispatchException(ExceptionRecord,
+ ExceptionFrame,
+ TrapFrame,
+ PreviousMode,
+ FirstChance);
+
+ return STATUS_SUCCESS;
+}
diff --git a/private/ntos/ke/semphobj.c b/private/ntos/ke/semphobj.c
new file mode 100644
index 000000000..4def47c0e
--- /dev/null
+++ b/private/ntos/ke/semphobj.c
@@ -0,0 +1,222 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ semphobj.c
+
+Abstract:
+
+ This module implements the kernel semaphore object. Functions
+ are provided to initialize, read, and release semaphore objects.
+
+Author:
+
+ David N. Cutler (davec) 28-Feb-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input semaphore is
+// really a ksemaphore and not something else, like deallocated pool.
+//
+
+#define ASSERT_SEMAPHORE(E) { \
+ ASSERT((E)->Header.Type == SemaphoreObject); \
+}
+
+
+VOID
+KeInitializeSemaphore (
+ IN PRKSEMAPHORE Semaphore,
+ IN LONG Count,
+ IN LONG Limit
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel semaphore object. The initial
+ count and limit of the object are set to the specified values.
+
+Arguments:
+
+ Semaphore - Supplies a pointer to a dispatcher object of type
+ semaphore.
+
+ Count - Supplies the initial count value to be assigned to the
+ semaphore.
+
+ Limit - Supplies the maximum count value that the semaphore
+ can attain.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard dispatcher object header and set initial
+ // count and maximum count values.
+ //
+
+ Semaphore->Header.Type = SemaphoreObject;
+ Semaphore->Header.Size = sizeof(KSEMAPHORE) / sizeof(LONG);
+ Semaphore->Header.SignalState = Count;
+ InitializeListHead(&Semaphore->Header.WaitListHead);
+ Semaphore->Limit = Limit;
+ return;
+}
+
+LONG
+KeReadStateSemaphore (
+ IN PRKSEMAPHORE Semaphore
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a semaphore object.
+
+Arguments:
+
+ Semaphore - Supplies a pointer to a dispatcher object of type
+ semaphore.
+
+Return Value:
+
+ The current signal state of the semaphore object.
+
+--*/
+
+{
+
+ ASSERT_SEMAPHORE( Semaphore );
+
+ //
+ // Return current signal state of semaphore object.
+ //
+
+ return Semaphore->Header.SignalState;
+}
+
+LONG
+KeReleaseSemaphore (
+ IN PRKSEMAPHORE Semaphore,
+ IN KPRIORITY Increment,
+ IN LONG Adjustment,
+ IN BOOLEAN Wait
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a semaphore by adding the specified adjustment
+ value to the current semaphore count and attempts to satisfy as many
+ Waits as possible. The previous signal state of the semaphore object
+ is returned as the function value.
+
+Arguments:
+
+ Semaphore - Supplies a pointer to a dispatcher object of type
+ semaphore.
+
+ Increment - Supplies the priority increment that is to be applied
+ if releasing the semaphore causes a Wait to be satisfied.
+
+ Adjustment - Supplies value that is to be added to the current
+ semaphore count.
+
+ Wait - Supplies a boolean value that signifies whether the call to
+ KeReleaseSemaphore will be immediately followed by a call to one
+ of the kernel Wait functions.
+
+Return Value:
+
+ The previous signal state of the semaphore object.
+
+--*/
+
+{
+
+ LONG NewState;
+ KIRQL OldIrql;
+ LONG OldState;
+ PRKTHREAD Thread;
+
+ ASSERT_SEMAPHORE( Semaphore );
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current signal state of the semaphore object and
+ // compute the new count value.
+ //
+
+ OldState = Semaphore->Header.SignalState;
+ NewState = OldState + Adjustment;
+
+ //
+ // If the new state value is greater than the limit or a carry occurs,
+ // then unlock the dispatcher database, and raise an exception.
+ //
+
+ if ((NewState > Semaphore->Limit) || (NewState < OldState)) {
+ KiUnlockDispatcherDatabase(OldIrql);
+ ExRaiseStatus(STATUS_SEMAPHORE_LIMIT_EXCEEDED);
+ }
+
+ //
+ // Set the new signal state of the semaphore object and set the wait
+ // next value. If the previous signal state was Not-Signaled (i.e.
+ // the count was zero), and the wait queue is not empty, then attempt
+ // to satisfy as many Waits as possible.
+ //
+
+ Semaphore->Header.SignalState = NewState;
+ if ((OldState == 0) && (IsListEmpty(&Semaphore->Header.WaitListHead) == FALSE)) {
+ KiWaitTest(Semaphore, Increment);
+ }
+
+ //
+ // If the value of the Wait argument is TRUE, then return to the
+ // caller with IRQL raised and the dispatcher database locked. Else
+ // release the dispatcher database lock and lower IRQL to its
+ // previous value.
+ //
+
+ if (Wait != FALSE) {
+ Thread = KeGetCurrentThread();
+ Thread->WaitNext = Wait;
+ Thread->WaitIrql = OldIrql;
+
+ } else {
+ KiUnlockDispatcherDatabase(OldIrql);
+ }
+
+ //
+ // Return previous signal state of sempahore object.
+ //
+
+ return OldState;
+}
diff --git a/private/ntos/ke/services.tab b/private/ntos/ke/services.tab
new file mode 100644
index 000000000..a560c16a4
--- /dev/null
+++ b/private/ntos/ke/services.tab
@@ -0,0 +1,212 @@
+AcceptConnectPort,6
+AccessCheck,8
+AccessCheckAndAuditAlarm,11
+AddAtom,2
+AdjustGroupsToken,6
+AdjustPrivilegesToken,6
+AlertResumeThread,2
+AlertThread,1
+AllocateLocallyUniqueId,1
+AllocateUuids,3
+AllocateVirtualMemory,6
+CallbackReturn,3
+CancelIoFile,2
+CancelTimer,2
+ClearEvent,1
+Close,1
+CloseObjectAuditAlarm,3
+CompleteConnectPort,1
+ConnectPort,8
+Continue,2
+CreateDirectoryObject,3
+CreateEvent,5
+CreateEventPair,3
+CreateFile,11
+CreateIoCompletion,4
+CreateKey,7
+CreateMailslotFile,8
+CreateMutant,4
+CreateNamedPipeFile,14
+CreatePagingFile,4
+CreatePort,5
+CreateProcess,8
+CreateProfile,9
+CreateSection,7
+CreateSemaphore,5
+CreateSymbolicLinkObject,4
+CreateThread,8
+CreateTimer,4
+CreateToken,13
+DelayExecution,2
+DeleteAtom,1
+DeleteFile,1
+DeleteKey,1
+DeleteObjectAuditAlarm,3
+DeleteValueKey,2
+DeviceIoControlFile,10
+DisplayString,1
+DuplicateObject,7
+DuplicateToken,6
+EnumerateKey,6
+EnumerateValueKey,6
+ExtendSection,2
+FindAtom,2
+FlushBuffersFile,2
+FlushInstructionCache,3
+FlushKey,1
+FlushVirtualMemory,4
+FlushWriteBuffer,0
+FreeVirtualMemory,4
+FsControlFile,10
+GetContextThread,2
+GetPlugPlayEvent,4
+GetTickCount,0
+ImpersonateClientOfPort,2
+ImpersonateThread,3
+InitializeRegistry,1
+ListenPort,2
+LoadDriver,1
+LoadKey,2
+LoadKey2,3
+LockFile,10
+LockVirtualMemory,4
+MakeTemporaryObject,1
+MapViewOfSection,10
+NotifyChangeDirectoryFile,9
+NotifyChangeKey,10
+OpenDirectoryObject,3
+OpenEvent,3
+OpenEventPair,3
+OpenFile,6
+OpenIoCompletion,3
+OpenKey,3
+OpenMutant,3
+OpenObjectAuditAlarm,12
+OpenProcess,4
+OpenProcessToken,3
+OpenSection,3
+OpenSemaphore,3
+OpenSymbolicLinkObject,3
+OpenThread,4
+OpenThreadToken,4
+OpenTimer,3
+PlugPlayControl,4
+PrivilegeCheck,3
+PrivilegedServiceAuditAlarm,5
+PrivilegeObjectAuditAlarm,6
+ProtectVirtualMemory,5
+PulseEvent,2
+QueryInformationAtom,5
+QueryAttributesFile,2
+QueryDefaultLocale,2
+QueryDirectoryFile,11
+QueryDirectoryObject,7
+QueryEaFile,9
+QueryEvent,5
+QueryFullAttributesFile,2
+QueryInformationFile,5
+QueryIoCompletion,5
+QueryInformationPort,5
+QueryInformationProcess,5
+QueryInformationThread,5
+QueryInformationToken,5
+QueryIntervalProfile,2
+QueryKey,5
+QueryMultipleValueKey,6
+QueryMutant,5
+QueryObject,5
+QueryOleDirectoryFile,11
+QueryPerformanceCounter,2
+QuerySection,5
+QuerySecurityObject,5
+QuerySemaphore,5
+QuerySymbolicLinkObject,3
+QuerySystemEnvironmentValue,4
+QuerySystemInformation,4
+QuerySystemTime,1
+QueryTimer,5
+QueryTimerResolution,3
+QueryValueKey,6
+QueryVirtualMemory,6
+QueryVolumeInformationFile,5
+QueueApcThread,5
+RaiseException,3
+RaiseHardError,6
+ReadFile,9
+ReadFileScatter,9
+ReadRequestData,6
+ReadVirtualMemory,5
+RegisterThreadTerminatePort,1
+ReleaseMutant,2
+ReleaseSemaphore,3
+RemoveIoCompletion,5
+ReplaceKey,3
+ReplyPort,2
+ReplyWaitReceivePort,4
+ReplyWaitReplyPort,2
+RequestPort,2
+RequestWaitReplyPort,3
+ResetEvent,2
+RestoreKey,3
+ResumeThread,2
+SaveKey,2
+SetIoCompletion,5
+SetContextThread,2
+SetDefaultHardErrorPort,1
+SetDefaultLocale,2
+SetEaFile,4
+SetEvent,2
+SetHighEventPair,1
+SetHighWaitLowEventPair,1
+SetHighWaitLowThread,0
+SetInformationFile,5
+SetInformationKey,4
+SetInformationObject,4
+SetInformationProcess,4
+SetInformationThread,4
+SetInformationToken,4
+SetIntervalProfile,2
+SetLdtEntries,6
+SetLowEventPair,1
+SetLowWaitHighEventPair,1
+SetLowWaitHighThread,0
+SetSecurityObject,3
+SetSystemEnvironmentValue,2
+SetSystemInformation,3
+SetSystemPowerState,3
+SetSystemTime,2
+SetTimer,7
+SetTimerResolution,3
+SetValueKey,6
+SetVolumeInformationFile,5
+ShutdownSystem,1
+SignalAndWaitForSingleObject,4
+StartProfile,1
+StopProfile,1
+SuspendThread,2
+SystemDebugControl,6
+TerminateProcess,2
+TerminateThread,2
+TestAlert,0
+UnloadDriver,1
+UnloadKey,1
+UnlockFile,5
+UnlockVirtualMemory,4
+UnmapViewOfSection,2
+VdmControl,2
+WaitForMultipleObjects,5
+WaitForSingleObject,3
+WaitHighEventPair,1
+WaitLowEventPair,1
+WriteFile,9
+WriteFileGather,9
+WriteRequestData,6
+WriteVirtualMemory,5
+W32Call,5
+CreateChannel,2
+ListenChannel,2
+OpenChannel,2
+ReplyWaitSendChannel,3
+SendWaitReplyChannel,4
+SetContextChannel,1
+YieldExecution,0
diff --git a/private/ntos/ke/sources.inc b/private/ntos/ke/sources.inc
new file mode 100644
index 000000000..65a4f260f
--- /dev/null
+++ b/private/ntos/ke/sources.inc
@@ -0,0 +1,76 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+MAJORCOMP=ntos
+MINORCOMP=ke
+
+TARGETNAME=ke
+TARGETTYPE=LIBRARY
+
+INCLUDES=..;..\..\inc
+
+C_DEFINES=$(C_DEFINES) -D_NTSYSTEM_
+
+SOURCES=..\apcobj.c \
+ ..\apcsup.c \
+ ..\balmgr.c \
+ ..\bugcheck.c \
+ ..\channel.c \
+ ..\config.c \
+ ..\debug.c \
+ ..\devquobj.c \
+ ..\dpcobj.c \
+ ..\dpcsup.c \
+ ..\eventobj.c \
+ ..\kernldat.c \
+ ..\kiinit.c \
+ ..\miscc.c \
+ ..\mutntobj.c \
+ ..\procobj.c \
+ ..\profobj.c \
+ ..\queueobj.c \
+ ..\raisexcp.c \
+ ..\semphobj.c \
+ ..\suspend.c \
+ ..\thredobj.c \
+ ..\thredsup.c \
+ ..\timerobj.c \
+ ..\timersup.c \
+ ..\wait.c \
+ ..\waitsup.c \
+ ..\xipi.c \
+ ..\yield.c
+
+NTTEST=
+UMTYPE=console
+UMLIBS=$(BASEDIR)\public\sdk\lib\*\user32.lib
+UMTEST=
+
+NTTARGETFILES=
+
+PRECOMPILED_INCLUDE=..\ki.h
+PRECOMPILED_PCH=ki.pch
+PRECOMPILED_OBJ=ki.obj
+
+SOURCES_USED=..\sources.inc
diff --git a/private/ntos/ke/suspend.c b/private/ntos/ke/suspend.c
new file mode 100644
index 000000000..851b91c2d
--- /dev/null
+++ b/private/ntos/ke/suspend.c
@@ -0,0 +1,312 @@
+/*++
+
+Copyright (c) 1989-1993 Microsoft Corporation
+Copyright (c) 1994 International Business Machines Corporation
+
+Module Name:
+
+Abstract:
+
+ Suspend processor
+
+Author:
+
+ Ken Reneris (kenr) 19-July-1994
+
+Environment:
+
+ Kernel mode
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+#ifdef _PNP_POWER_
+
+VOID
+KiHibernateTargetProcessor (
+ IN PKDPC Dpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ );
+
+
+#ifdef ALLOC_PRAGMA
+#pragma alloc_text(PAGEPO, KeSuspendHibernateSystem)
+#pragma alloc_text(PAGEPO, KiHibernateTargetProcessor)
+#endif
+
+//
+// KiSuspendFlag state
+//
+
+#define NormalOperation 0
+#define HaltingProcessors 1
+#define GoToHighLevel 2
+#define HibernateProcessors 3
+#define UnHibernateProcessors 4
+#define ThawProcessors 5
+
+
+NTSTATUS
+KeSuspendHibernateSystem (
+ IN PTIME_FIELDS ResumeTime OPTIONAL,
+ IN PVOID SystemCallback
+ )
+{
+ KIRQL OldIrql, OldIrql2;
+ NTSTATUS Status;
+ volatile KAFFINITY Targets;
+ KAFFINITY HoldActiveProcessors;
+ KAFFINITY Waiting, Affinity;
+ KSPIN_LOCK SpinLock;
+ KDPC Dpc;
+ ULONG i;
+ PKPRCB Prcb;
+
+ //
+ // No spinlocks can be held when this call is made
+ //
+
+ ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL);
+
+ KeInitializeSpinLock (&SpinLock);
+ KeInitializeDpc (&Dpc, KiHibernateTargetProcessor, NULL);
+
+ //
+ // Set system affinity to processor 0.
+ //
+
+ KeSetSystemAffinityThread(1);
+
+ //
+ // Raise to DISPATCH_LEVEL level to avoid getting any DPCs
+ //
+
+ KeRaiseIrql (DISPATCH_LEVEL, &OldIrql);
+ KiSuspendState = HaltingProcessors;
+
+ //
+ // Stop all other processors
+ //
+
+ Targets = KeActiveProcessors & (~1);
+ while (Targets) {
+
+ //
+ // Find processor in Targets
+ //
+
+ Waiting = Targets;
+ Affinity = Targets;
+ for (i=0; (Affinity & 1) == 0; Affinity >>=1, i++) ;
+
+ //
+ // Queue DPC on target processors queue
+ //
+
+ KeRaiseIrql (HIGH_LEVEL, &OldIrql2);
+ Prcb = KiProcessorBlock[i];
+ KiAcquireSpinLock (&Prcb->DpcLock);
+
+ Dpc.SystemArgument1 = (PVOID) &Targets;
+ Dpc.SystemArgument2 = (PVOID) &SpinLock;
+ InsertTailList(&Prcb->DpcListHead, &Dpc.DpcListEntry);
+ Prcb->DpcCount += 1;
+ Dpc.Lock = &Prcb->DpcLock;
+
+ KiReleaseSpinLock (&Prcb->DpcLock);
+ KeLowerIrql (OldIrql2);
+ KiRequestDispatchInterrupt(i);
+
+ //
+ // Wait for DPC to be processed. (The processor
+ // which runs it will clear it's bit).
+ //
+
+ while (Waiting == Targets) ;
+ }
+
+ //
+ // Send all processors to HIGH_LEVEL
+ //
+
+ Targets = 0;
+ KiSuspendState = GoToHighLevel;
+ while ((UCHAR) Targets != KeNumberProcessors - 1);
+ KeRaiseIrql (HIGH_LEVEL, &OldIrql2);
+
+ //
+ // Adjust KeActiveProcessors to allow the kernel debugger to
+ // work without the other "suspended" processors, then tell
+ // then other processors to hibernate.
+ //
+
+ HoldActiveProcessors = KeActiveProcessors;
+ (volatile) KeActiveProcessors = 1;
+
+ //
+ // Hibernate all other processors
+ //
+
+ Targets = 0;
+ KiSuspendState = HibernateProcessors;
+ while ((UCHAR) Targets != KeNumberProcessors - 1);
+
+ //
+ // Ask HAL to suspend/hibernate system
+ //
+
+ Status = HalSuspendHibernateSystem (
+ ResumeTime,
+ (PHIBERNATE_CALLBACK) SystemCallback
+ );
+
+ //
+ // Wait for all other processors to return from Hibernation
+ //
+
+ Targets = 0;
+ KiSuspendState = UnHibernateProcessors;
+ while ((UCHAR) Targets != KeNumberProcessors - 1);
+
+ //
+ // If sucessful suspend/hibernate, Notify PowerManager of Resume which
+ // just occured
+ //
+
+ if (NT_SUCCESS(Status)) {
+ PoSystemResume ();
+ }
+
+ //
+ // Restore KeActiveProcessors, and let other processors continue
+ //
+
+ (volatile) KeActiveProcessors = HoldActiveProcessors;
+
+ Targets = 0;
+ KiSuspendState = ThawProcessors;
+ KeLowerIrql (OldIrql2);
+ while ((UCHAR) Targets != KeNumberProcessors - 1);
+
+ //
+ // Continue with normal operations
+ //
+
+ Targets = 0;
+ KiSuspendState = NormalOperation;
+ while ((UCHAR) Targets != KeNumberProcessors - 1);
+
+ KeLowerIrql (OldIrql);
+
+ //
+ // Set system affinity to previous value.
+ //
+
+ KeRevertToUserAffinityThread();
+ return Status;
+}
+
+
+VOID
+KiHibernateTargetProcessor (
+ IN PKDPC Dpc,
+ IN PVOID DeferredContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+{
+ PKPRCB Prcb;
+ KIRQL junkIrql;
+ PKAFFINITY Targets;
+ PULONG TargetCount;
+ ULONG CurrentState;
+
+
+ Prcb = KeGetCurrentPrcb ();
+ Targets = (PKAFFINITY) SystemArgument1;
+ TargetCount = (PULONG) SystemArgument1;
+ CurrentState = KiSuspendState;
+ ASSERT (CurrentState == HaltingProcessors);
+ ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL);
+
+ //
+ // Remove our bit from the target processors
+ //
+
+ *Targets &= ~Prcb->SetMember;
+ while (CurrentState != NormalOperation) {
+
+ //
+ // Wait for state to change
+ //
+
+ while (CurrentState == KiSuspendState);
+
+ //
+ // Enter new state
+ //
+
+ CurrentState = KiSuspendState;
+ switch (CurrentState) {
+ case GoToHighLevel:
+ //
+ // Raise to HIGH_LEVEL, and then signal complete
+ //
+
+ KeRaiseIrql (HIGH_LEVEL, &junkIrql);
+ InterlockedIncrement (TargetCount);
+ break;
+
+ case HibernateProcessors:
+ //
+ // Signal about to hibernate, then do it
+ //
+
+ InterlockedIncrement (TargetCount);
+ HalHibernateProcessor ();
+ break;
+
+ case UnHibernateProcessors:
+ //
+ // Signal processor has returned from Hibernation
+ //
+
+ InterlockedIncrement (TargetCount);
+ break;
+
+ case ThawProcessors:
+ //
+ // Lower to DPC level, and signal when complete
+ //
+
+ KeLowerIrql (DISPATCH_LEVEL);
+ InterlockedIncrement (TargetCount);
+ break;
+
+ case NormalOperation:
+ //
+ // Signal that processor is being released
+ //
+
+ InterlockedIncrement (TargetCount);
+ break;
+
+ default:
+#if DBG
+ HalDisplayString ("KiHibernateTargetProcessor: bug\n");
+#endif
+ break;
+ }
+
+ }
+
+ ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL);
+}
+
+#endif // _PNP_POWER_
diff --git a/private/ntos/ke/tests/mipsflt/flpt.c b/private/ntos/ke/tests/mipsflt/flpt.c
new file mode 100644
index 000000000..036411d74
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/flpt.c
@@ -0,0 +1,8333 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ flpt.c
+
+Abstract:
+
+ This module implements user mode IEEE floating point tests.
+
+Author:
+
+ David N. Cutler (davec) 20-Jun-1991
+
+Environment:
+
+ User mode only.
+
+Revision History:
+
+--*/
+
+#include "flpt.h"
+
+VOID
+main(
+ int argc,
+ char *argv[]
+ )
+
+{
+ //
+ // Anounce start of floting point tests.
+ //
+
+ printf("\nStart of floating point test\n");
+ Test1();
+ Test2();
+ Test3();
+ Test4();
+ Test5();
+ Test6();
+ Test7();
+ Test8();
+ Test9();
+ Test10();
+ Test11();
+ Test12();
+ Test13();
+ Test14();
+ Test15();
+ Test16();
+ Test17();
+ Test18();
+ Test19();
+ Test20();
+ Test21();
+ Test22();
+ Test23();
+ Test24();
+ Test25();
+ Test26();
+
+ //
+ // Announce end of floating point test.
+ //
+
+ printf("End of floating point test\n");
+ return;
+}
+
+VOID
+Test1 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 1 - Add single denormalized test.
+ //
+
+ Subtest = 0;
+ printf(" Test 1 - add/subtract single denormalized ...");
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x3ff,
+ 0x1,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1,
+ 0x7fff,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x8000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x440000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x40000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x440000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x40000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x800000,
+ 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0xffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EI | ROUND_TO_NEAREST,
+ 0x800000,
+ 0x3f800000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EO | ROUND_TO_NEAREST,
+ 0x7f000000,
+ 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EI | ROUND_TO_NEAREST,
+ 0x7f000000,
+ 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EI | EO | ROUND_TO_NEAREST,
+ 0x7f000000,
+ 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x3ff,
+ SIGN | 0x1,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x1,
+ SIGN | 0x7fff,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x8000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x440000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x40000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SIGN | 0x440000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x40000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SubtractSingle(ROUND_TO_NEAREST,
+ 0x800000,
+ SIGN | 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0xffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractSingle(EI | ROUND_TO_NEAREST,
+ 0x800000,
+ SIGN | 0x3f800000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractSingle(EO | ROUND_TO_NEAREST,
+ 0x7f000000,
+ SIGN | 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractSingle(EI | ROUND_TO_NEAREST,
+ 0x7f000000,
+ SIGN | 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractSingle(EI | EO | ROUND_TO_NEAREST,
+ 0x7f000000,
+ SIGN | 0x7f000000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 1.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 1 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test2 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 2 - Add single round to nearest test.
+ //
+
+ Subtest = 0;
+ printf(" Test 2 - add single round to nearest ...");
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x1a00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x1a00000)) {
+ goto TestFailed;
+ }
+
+ Count = 0;
+ try {
+ Subtest += 1;
+ Fsr.Data = AddSingle(EI | ROUND_TO_NEAREST,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 2.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 2 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test3 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 3 - Add single round to zero test.
+ //
+
+ Subtest = 0;
+ printf(" Test 3 - add single round to zero ...");
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Count = 0;
+ try {
+ Subtest += 1;
+ Fsr.Data = AddSingle(EI | ROUND_TO_ZERO,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 3.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 3 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test4 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 4 - Add single round to positive infinity test.
+ //
+
+ Subtest = 0;
+ printf(" Test 4 - add single round to positive infinity ...");
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x1a00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x1a00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != 0x1a00000)) {
+ goto TestFailed;
+ }
+
+ Count = 0;
+ try {
+ Subtest += 1;
+ Fsr.Data = AddSingle(EI | ROUND_TO_PLUS_INFINITY,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (SingleResult != (SIGN | 0x19ffffe))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19ffffe))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19ffffe))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19ffffe))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 4.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 4 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test5 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 5 - Add single round to negative infinity test.
+ //
+
+ Subtest = 0;
+ printf(" Test 5 - add single round to negative infinity ...");
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (SingleResult != (SIGN | 0x19ffffe))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (SingleResult != (SIGN | 0x19fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x1a00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x1a00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | 0x1800000,
+ SIGN | 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != (SIGN | 0x1a00000))) {
+ goto TestFailed;
+ }
+
+ Count = 0;
+ try {
+ Subtest += 1;
+ Fsr.Data = AddSingle(EI | ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffff8,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffff9,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffffa,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffffb,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19ffffe)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffffc,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffffd,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7ffffe,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_MINUS_INFINITY,
+ 0x1800000,
+ 0x7fffff,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != 0x19fffff)) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 5.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 5 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test6 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 6 - Add single infinity and NaN test.
+ //
+
+ Subtest = 0;
+ printf(" Test 6 - add single infinity and NaN ...");
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(FS | ROUND_TO_NEAREST,
+ 0x200000,
+ 0x200000,
+ &SingleResult);
+
+ if ((Fsr.Data != (FS | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_SIGNAL_NAN_PREFIX,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN_PREFIX,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AddSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ try {
+ Fsr.Data = AddSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AddSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 6.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 6 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test7 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 7 - Multiply test.
+ //
+
+ Subtest = 0;
+ printf(" Test 7 - multiply single ...");
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x0,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x0,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ 0x0,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ 0x0,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x7f000000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x3f800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x7f000000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x3f800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x7f000000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x3f800000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x7f000000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x3f800000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400004,
+ 0x7f000001,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x3f800009)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400004,
+ SIGN | 0x7f000001,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != (SIGN | 0x3f800009))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ 0x400004,
+ 0x7f000001,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ 0x400004,
+ SIGN | 0x7f000001,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EU | ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EU | ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EU | EI | ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EU | EI | ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_NEAREST,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_ZERO,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (SingleResult != SINGLE_MAXIMUM_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_ZERO,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (SingleResult != (SIGN | SINGLE_MAXIMUM_VALUE))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_ZERO,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (SingleResult != (SIGN | SINGLE_MAXIMUM_VALUE))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_ZERO,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (SingleResult != SINGLE_MAXIMUM_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_PLUS_INFINITY,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | SINGLE_MAXIMUM_VALUE))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_PLUS_INFINITY,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != (SIGN | SINGLE_MAXIMUM_VALUE))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_PLUS_INFINITY,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_MINUS_INFINITY,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != SINGLE_MAXIMUM_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_MINUS_INFINITY,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MultiplySingle(ROUND_TO_MINUS_INFINITY,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (SingleResult != SINGLE_MAXIMUM_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EI | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EO | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EO | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EO | EI | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplySingle(EO | EI | ROUND_TO_NEAREST,
+ SINGLE_MAXIMUM_VALUE,
+ SIGN | SINGLE_MAXIMUM_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 7.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 7 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test8 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 8 - Divide test.
+ //
+
+ Subtest = 0;
+ printf(" Test 8 - divide single ...");
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x0,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x0,
+ SIGN | 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x0,
+ SIGN | 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x0,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SZ | XZ | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x3f80000,
+ 0x0,
+ &SingleResult);
+
+ if ((Fsr.Data != (SZ | XZ | ROUND_TO_NEAREST)) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = DivideSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = DivideSingle(EV | ROUND_TO_NEAREST,
+ 0x0,
+ 0x0,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = DivideSingle(EZ | ROUND_TO_NEAREST,
+ 0x3f800000,
+ 0x0,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_DIVIDE_BY_ZERO) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ 0x3f800000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ try {
+ Fsr.Data = DivideSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = DivideSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = DivideSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x3f800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x3f800000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x3f800000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x3f800000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x3f800000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x7f000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x40000000,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SO | SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = DivideSingle(ROUND_TO_NEAREST,
+ 0x3fffffff,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x7f7fffff)) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 8.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 8 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test9 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 9 - Compare single test.
+ //
+
+ Subtest = 0;
+ printf(" Test 9 - compare single ...");
+
+// ****** //
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x0,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x0,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x0);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0X400000,
+ SIGN | 0x0);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ MINUS_SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SIGN | 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x410000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x200000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x410000,
+ SIGN | 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ SIGN | 0x200000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareFSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUnSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUnSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ 0x400000);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareEqSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUeqSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUeqSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareOltSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_QUIET_NAN);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareOltSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUltSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareUltSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareOleSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareOleSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareSfSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareSfSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_QUIET_NAN);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgleSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgleSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareSeqSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareSeqSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNglSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNglSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLtSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgeSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgeSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareLeSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgtSingle(ROUND_TO_NEAREST,
+ 0x410000,
+ 0x400000);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = CompareNgtSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareSfSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ SINGLE_QUIET_NAN);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgleSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareSeqSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNglSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareLtSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgeSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareLeSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgtSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = CompareEqSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ SINGLE_INFINITY_VALUE);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 9.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 9 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx\n", Subtest, Fsr.Data);
+ return;
+}
+
+VOID
+Test10 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+ ULONG SingleResult;
+
+ //
+ // Test 10 - Absolute, move, and negate single test.
+ //
+
+ Subtest = 0;
+ printf(" Test 10 - absolute, move, and negate single ...");
+ Subtest += 1;
+ Fsr.Data = AbsoluteSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AbsoluteSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AbsoluteSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = AbsoluteSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = AbsoluteSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MoveSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MoveSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x400000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MoveSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MoveSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_SIGNAL_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = MoveSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_SIGNAL_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = NegateSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (SIGN | 0x400000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = NegateSingle(ROUND_TO_NEAREST,
+ SIGN | 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = NegateSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = NegateSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = NegateSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 10.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 10 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test11 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 11 - Add double denormalized test.
+ //
+
+ Subtest = 0;
+ printf(" Test 11 - add/subtract double denormalized ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x3ff;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x1;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x400) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x1;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x8000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x84000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x4000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x84000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x4000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x100000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x100000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x1fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x1600000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x40000000) ||
+ (DoubleResult.HighPart != 0x1600000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x2600000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x4000) ||
+ (DoubleResult.HighPart != 0x2600000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3f000000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3f000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x100000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x3ff;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x1;
+ DoubleOperand2.HighPart = SIGN;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x400) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x1;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fff;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x8000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x84000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x4000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x84000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x4000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x100000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x100000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x1fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x1600000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x40000000) ||
+ (DoubleResult.HighPart != 0x1600000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x2600000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x4000) ||
+ (DoubleResult.HighPart != 0x2600000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x3f000000;
+ Fsr.Data = SubtractDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3f000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x3ff00000;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractDouble(EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00000;
+ Count = 0;
+ try {
+ Fsr.Data = SubtractDouble(EI | EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 11.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 11 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test12 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 12 - Add double round to nearest test.
+ //
+
+ Subtest = 0;
+ printf(" Test 12 - add double round to nearest ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x340000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x340000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 12.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 12 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test13 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 13 - Add double round to zero test.
+ //
+
+ Subtest = 0;
+ printf(" Test 13 - add double round to zero ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 13.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 13 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test14 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleResult;
+ ULONG Subtest;
+
+ //
+ // Test 14 - Add double round to positive infinity test.
+ //
+
+ Subtest = 0;
+ printf(" Test 14 - add double round to positive infinity ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x340000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x340000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x340000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_PLUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 14.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 14 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test15 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 15 - Add double round to negative infinity test.
+ //
+
+ Subtest = 0;
+ printf(" Test 15 - add double round to negative infinity ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != (SIGN | 0x33ffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x340000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x340000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x340000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = SIGN | 0xfffff;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EI | ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff8;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffff9;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffa;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffb;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xfffffffe) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffc;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_MINUS_INFINITY) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffd;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xfffffffe;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x300000;
+ DoubleOperand2.LowPart = 0xffffffff;
+ DoubleOperand2.HighPart = 0xfffff;
+ Fsr.Data = AddDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x33ffff)) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 15.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 15 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test16 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 16 - Add double infinity and NaN test.
+ //
+
+ Subtest = 0;
+ printf(" Test 16 - add double infinity and NaN ...");
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x40000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x40000;
+ Fsr.Data = AddDouble(FS | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (FS | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN_PREFIX;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN_PREFIX;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = AddDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ try {
+ Fsr.Data = AddDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Count = 0;
+ try {
+ Fsr.Data = AddDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 16.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 16 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test17 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 17 - Multiply double test.
+ //
+
+ Subtest = 0;
+ printf(" Test 17 - multiply double ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3ff00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fe00000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3ff00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x7fe00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x3ff00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x3ff00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80008;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x7fe00001;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x10000) ||
+ (DoubleResult.HighPart != 0x3ff00011)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80008;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00001;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x10000) ||
+ (DoubleResult.HighPart != (SIGN | 0x3ff00011))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80008;
+ DoubleOperand2.LowPart = 0x1;
+ DoubleOperand2.HighPart = 0x7fe00001;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80008;
+ DoubleOperand2.LowPart = 0x1;
+ DoubleOperand2.HighPart = SIGN | 0x7fe00001;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EU | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EU | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EU | EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EU | EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_MAXIMUM_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != (SIGN | DOUBLE_MAXIMUM_VALUE_HIGH))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != (SIGN | DOUBLE_MAXIMUM_VALUE_HIGH))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_ZERO,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_ZERO)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_MAXIMUM_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != (SIGN | DOUBLE_MAXIMUM_VALUE_HIGH))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != (SIGN | DOUBLE_MAXIMUM_VALUE_HIGH))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_PLUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_PLUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_MAXIMUM_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Fsr.Data = MultiplyDouble(ROUND_TO_MINUS_INFINITY,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | XO | XI | ROUND_TO_MINUS_INFINITY)) ||
+ (DoubleResult.LowPart != DOUBLE_MAXIMUM_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_MAXIMUM_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EO | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EO | EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_MAXIMUM_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_MAXIMUM_VALUE_LOW;
+ DoubleOperand2.HighPart = SIGN | DOUBLE_MAXIMUM_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = MultiplyDouble(EO | EI | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 17.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 17 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test18 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ ULARGE_INTEGER DoubleResult;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 18 - Divide double test.
+ //
+
+ Subtest = 0;
+ printf(" Test 18 - divide double ...");
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SZ | XZ | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x3ff00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SZ | XZ | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = DivideDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Count = 0;
+ try {
+ Fsr.Data = DivideDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Count = 0;
+ try {
+ Fsr.Data = DivideDouble(EZ | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_DIVIDE_BY_ZERO) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x3ff00000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ try {
+ Fsr.Data = DivideDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ goto TestFailed;
+ }
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_SIGNAL_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = DivideDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = DivideDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count != 1) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3ff00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x3ff00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x3ff00000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x3ff00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x3ff00000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x7fe00000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x40000000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SO | SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0xffffffff;
+ DoubleOperand1.HighPart = 0x3fffffff;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = DivideDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0xffffffff) ||
+ (DoubleResult.HighPart != 0x7fefffff)) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 18.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 18 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test19 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ ULARGE_INTEGER DoubleOperand1;
+ ULARGE_INTEGER DoubleOperand2;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+
+ //
+ // Test 19 - Compare double test.
+ //
+
+ Subtest = 0;
+ printf(" Test 19 - compare double ...");
+
+// ****** //
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x0;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x0;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x0;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x1000;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x81000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x40000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x1000;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = SIGN | 0x40000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = SIGN | 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareFDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareUnDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareUnDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareEqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareUeqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareUeqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = CompareOltDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareOltDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x80000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareUltDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareUltDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareOleDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareOleDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareSfDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = CompareSfDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareNgleDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareNgleDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareSeqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareSeqDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareNglDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareNglDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareNgeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareNgeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareLeDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = 0x0;
+ DoubleOperand1.HighPart = 0x81000;
+ DoubleOperand2.LowPart = 0x0;
+ DoubleOperand2.HighPart = 0x80000;
+ Fsr.Data = CompareNgtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != ROUND_TO_NEAREST) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = CompareNgtDouble(ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ if (Fsr.Data != (CC | SV | XV | ROUND_TO_NEAREST)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand1.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ DoubleOperand2.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand2.HighPart = DOUBLE_QUIET_NAN;
+ Count = 0;
+ try {
+ Fsr.Data = CompareSfDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgleDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareSeqDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNglDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareLtDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgeDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareLeDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_QUIET_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareNgtDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand1.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand1.HighPart = DOUBLE_SIGNAL_NAN;
+ DoubleOperand2.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand2.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Count = 0;
+ try {
+ Fsr.Data = CompareEqDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand1,
+ &DoubleOperand2);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 19.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 19 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx\n", Subtest, Fsr.Data);
+ return;
+}
diff --git a/private/ntos/ke/tests/mipsflt/flpt.cmd b/private/ntos/ke/tests/mipsflt/flpt.cmd
new file mode 100644
index 000000000..e98e9fc17
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/flpt.cmd
@@ -0,0 +1 @@
+link32 -link -out:obj\mips\flpt.exe @flpt.rsp
diff --git a/private/ntos/ke/tests/mipsflt/flpt.h b/private/ntos/ke/tests/mipsflt/flpt.h
new file mode 100644
index 000000000..251cacdb2
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/flpt.h
@@ -0,0 +1,631 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ flpt.h
+
+Abstract:
+
+ This module is the header file for the user mode IEEE floating point
+ tests.
+
+Author:
+
+ David N. Cutler (davec) 1-Jul-1991
+
+Environment:
+
+ User mode only.
+
+Revision History:
+
+--*/
+
+#include "stdio.h"
+#include "string.h"
+#include "ntos.h"
+
+//
+// Floating status register bits.
+//
+
+#define SI (1 << 2)
+#define SU (1 << 3)
+#define SO (1 << 4)
+#define SZ (1 << 5)
+#define SV (1 << 6)
+
+#define EI (1 << 7)
+#define EU (1 << 8)
+#define EO (1 << 9)
+#define EZ (1 << 10)
+#define EV (1 << 11)
+
+#define XI (1 << 12)
+#define XU (1 << 13)
+#define XO (1 << 14)
+#define XZ (1 << 15)
+#define XV (1 << 16)
+
+#define CC (1 << 23)
+
+#define FS (1 << 24)
+
+//
+// Define negative infinity.
+//
+
+#define MINUS_DOUBLE_INFINITY_VALUE (DOUBLE_INFINITY_VALUE_HIGH | (1 << 31))
+#define MINUS_SINGLE_INFINITY_VALUE (SINGLE_INFINITY_VALUE | (1 << 31))
+
+//
+// Define signaling NaN prefix values.
+//
+
+#define DOUBLE_SIGNAL_NAN_PREFIX 0x7ff80000
+#define SINGLE_SIGNAL_NAN_PREFIX 0x7fc00000
+
+//
+// Define sign bit.
+//
+
+#define SIGN (1 << 31)
+
+//
+// Define floating status union.
+//
+
+typedef union _FLOATING_STATUS {
+ FSR Status;
+ ULONG Data;
+} FLOATING_STATUS, *PFLOATING_STATUS;
+
+//
+// Define procedure prootypes.
+//
+
+ULONG
+AddDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Addend1,
+ IN PULARGE_INTEGER Addend2,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+DivideDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Dividend,
+ IN PULARGE_INTEGER Divisor,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+MultiplyDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Multiplicand,
+ IN PULARGE_INTEGER Multiplier,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+SubtractDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Subtrahend,
+ IN PULARGE_INTEGER Minuend,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+AddSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Addend1,
+ IN ULONG Addend2,
+ OUT PULONG Result
+ );
+
+ULONG
+DivideSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Dividend,
+ IN ULONG Divisor,
+ OUT PULONG Result
+ );
+
+ULONG
+MultiplySingle (
+ IN ULONG RoundingMode,
+ IN ULONG Multiplicand,
+ IN ULONG Multiplier,
+ OUT PULONG Result
+ );
+
+ULONG
+SubtractSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Subtrahend,
+ IN ULONG Minuend,
+ OUT PULONG Result
+ );
+
+ULONG
+AbsoluteDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Operand,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+CeilToLongwordFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+CeilToLongwordFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULONG Result
+ );
+
+ULONG
+ConvertToDoubleFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+ConvertToLongwordFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+ConvertToLongwordFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULONG Result
+ );
+
+ULONG
+ConvertToSingleFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+CompareFDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareUnDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareEqDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareUeqDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareOltDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareUltDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareOleDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareUleDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareSfDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareNgleDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareSeqDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareNglDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareLtDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareNgeDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareLeDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareNgtDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Comparand1,
+ IN PULARGE_INTEGER Comparand2
+ );
+
+ULONG
+CompareFSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareUnSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareEqSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareUeqSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareOltSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareUltSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareOleSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareUleSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareSfSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareNgleSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareSeqSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareNglSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareLtSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareNgeSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareLeSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+CompareNgtSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Comparand1,
+ IN ULONG Comparand2
+ );
+
+ULONG
+FloorToLongwordFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+FloorToLongwordFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULONG Result
+ );
+
+ULONG
+MoveDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Operand,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+NegateDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Operand,
+ OUT PULARGE_INTEGER Result
+ );
+
+ULONG
+AbsoluteSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Operand,
+ OUT PULONG Result
+ );
+
+ULONG
+MoveSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Operand,
+ OUT PULONG Result
+ );
+
+ULONG
+NegateSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Operand,
+ OUT PULONG Result
+ );
+
+ULONG
+RoundToLongwordFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+RoundToLongwordFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULONG Result
+ );
+
+ULONG
+TruncateToLongwordFromDouble (
+ IN ULONG RoundingMode,
+ IN PULARGE_INTEGER Source,
+ OUT PULONG Result
+ );
+
+ULONG
+TruncateToLongwordFromSingle (
+ IN ULONG RoundingMode,
+ IN ULONG Source,
+ OUT PULONG Result
+ );
+
+VOID
+Test1 (
+ VOID
+ );
+
+VOID
+Test2 (
+ VOID
+ );
+
+VOID
+Test3 (
+ VOID
+ );
+
+VOID
+Test4 (
+ VOID
+ );
+
+VOID
+Test5 (
+ VOID
+ );
+
+VOID
+Test6 (
+ VOID
+ );
+
+VOID
+Test7 (
+ VOID
+ );
+
+VOID
+Test8 (
+ VOID
+ );
+
+VOID
+Test9 (
+ VOID
+ );
+
+VOID
+Test10 (
+ VOID
+ );
+
+VOID
+Test11 (
+ VOID
+ );
+
+VOID
+Test12 (
+ VOID
+ );
+
+VOID
+Test13 (
+ VOID
+ );
+
+VOID
+Test14 (
+ VOID
+ );
+
+VOID
+Test15 (
+ VOID
+ );
+
+VOID
+Test16 (
+ VOID
+ );
+
+VOID
+Test17 (
+ VOID
+ );
+
+VOID
+Test18 (
+ VOID
+ );
+
+VOID
+Test19 (
+ VOID
+ );
+
+VOID
+Test20 (
+ VOID
+ );
+
+VOID
+Test21 (
+ VOID
+ );
+
+VOID
+Test22 (
+ VOID
+ );
+
+VOID
+Test23 (
+ VOID
+ );
+
+VOID
+Test24 (
+ VOID
+ );
+
+VOID
+Test25 (
+ VOID
+ );
+
+VOID
+Test26 (
+ VOID
+ );
diff --git a/private/ntos/ke/tests/mipsflt/flpt.rsp b/private/ntos/ke/tests/mipsflt/flpt.rsp
new file mode 100644
index 000000000..7fa1ba0b4
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/flpt.rsp
@@ -0,0 +1,14 @@
+-machine:mips
+-gpsize:32
+-base:@f:\nt\public\sdk\lib\coffbase.txt,usermode
+-subsystem:console
+-entry:mainCRTStartup
+obj\mips\flpt.obj
+obj\mips\flpt2.obj
+obj\mips\flptx.obj
+f:\nt\public\sdk\lib\mips\small.lib
+f:\nt\public\sdk\lib\mips\user32.lib
+f:\nt\public\sdk\lib\mips\kernel32.lib
+f:\nt\public\sdk\lib\mips\advapi32.lib
+f:\nt\public\sdk\lib\mips\libc.lib
+f:\nt\public\sdk\lib\mips\ntdll.lib
diff --git a/private/ntos/ke/tests/mipsflt/flpt2.c b/private/ntos/ke/tests/mipsflt/flpt2.c
new file mode 100644
index 000000000..7bda5ce2f
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/flpt2.c
@@ -0,0 +1,2167 @@
+/*++
+
+Copyright (c) 1991 Microsoft Corporation
+
+Module Name:
+
+ flpt2.c
+
+Abstract:
+
+ This module implements user mode IEEE floating point tests.
+
+Author:
+
+ David N. Cutler (davec) 1-Jul-1991
+
+Environment:
+
+ User mode only.
+
+Revision History:
+
+--*/
+
+#include "flpt.h"
+
+VOID
+Test20 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleOperand;
+ ULARGE_INTEGER DoubleResult;
+ ULONG Subtest;
+
+ //
+ // Test 20 - Absolute, move, and negate double test.
+ //
+
+ Subtest = 0;
+ printf(" Test 20 - absolute, move, and negate double ...");
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = AbsoluteDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | 0x80000;
+ Fsr.Data = AbsoluteDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = AbsoluteDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = AbsoluteDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = AbsoluteDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = MoveDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | 0x80000;
+ Fsr.Data = MoveDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x80000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = MoveDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = MoveDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_SIGNAL_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = MoveDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (EV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_SIGNAL_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = NegateDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x80000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | 0x80000;
+ Fsr.Data = NegateDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = NegateDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = NegateDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = NegateDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 20.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 20 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test21 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleOperand;
+ ULONG SingleResult;
+ ULONG Subtest;
+
+ //
+ // Test 21 - Convert to single.
+ //
+
+ Subtest = 0;
+ printf(" Test 21 - convert to single ...");
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != MINUS_SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart =
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (SingleResult != 0x7ffffc)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN |
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (SingleResult != (SIGN | 0x7ffffc))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0xf0000000;
+ DoubleOperand.HighPart =
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SU | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x7fffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0xf0000000;
+ DoubleOperand.HighPart = SIGN |
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SU | ROUND_TO_ZERO)) ||
+ (SingleResult != (SIGN | 0x7fffff))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0xf0000000;
+ DoubleOperand.HighPart = SIGN |
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EU | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0xf0000000;
+ DoubleOperand.HighPart = SIGN |
+ ((DOUBLE_EXPONENT_BIAS - SINGLE_EXPONENT_BIAS) << (52 - 32)) | 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SU | ROUND_TO_ZERO)) ||
+ (SingleResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SU | ROUND_TO_ZERO)) ||
+ (SingleResult != (SIGN | 0x0))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EU | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_UNDERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS +
+ SINGLE_EXPONENT_BIAS + 1) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SO | XI | XO | ROUND_TO_ZERO)) ||
+ (SingleResult != SINGLE_MAXIMUM_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS +
+ SINGLE_EXPONENT_BIAS + 1) << (52 - 32)) | 0xfffff;
+ Fsr.Data = ConvertToSingleFromDouble(ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ if ((Fsr.Data != (SI | SO | XI | XO | ROUND_TO_ZERO)) ||
+ (SingleResult != (SIGN | SINGLE_MAXIMUM_VALUE))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS +
+ SINGLE_EXPONENT_BIAS + 1) << (52 - 32)) | 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EO | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS +
+ SINGLE_EXPONENT_BIAS + 1) << (52 - 32)) | 0xfffff;
+ try {
+ Fsr.Data = ConvertToSingleFromDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 21.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 21 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
+
+VOID
+Test22 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleResult;
+ ULONG SingleOperand;
+ ULONG Subtest;
+
+ //
+ // Test 22 - Convert to double.
+ //
+
+ Subtest = 0;
+ printf(" Test 22 - convert to double ...");
+ Subtest += 1;
+ SingleOperand = SINGLE_QUIET_NAN;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = MINUS_SINGLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != MINUS_DOUBLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = ConvertToDoubleFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x400000;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_ZERO,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x38000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | 0x400000;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_ZERO,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x38000000))) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x440000;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_ZERO,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x38010000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | 0x440000;
+ Fsr.Data = ConvertToDoubleFromSingle(ROUND_TO_ZERO,
+ SingleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_ZERO) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != (SIGN | 0x38010000))) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 22.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 22 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test23 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG LongwordResult;
+ ULONG SingleOperand;
+ ULONG Subtest;
+
+ //
+ // Test 23 - Convert to longword from single.
+ //
+
+ Subtest = 0;
+ printf(" Test 23 - convert to longword from single ...");
+ Subtest += 1;
+ SingleOperand = SINGLE_QUIET_NAN;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x7fffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = MINUS_SINGLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_QUIET_NAN;
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = MINUS_SINGLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x400000;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x1;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = 0x400000;
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EI | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 32) << 23);
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | ((SINGLE_EXPONENT_BIAS + 31) << 23) | 0x1;
+ Fsr.Data = ConvertToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ try {
+ Fsr.Data = ConvertToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_QUIET_NAN;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SINGLE_INFINITY_VALUE;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x7fffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = MINUS_SINGLE_INFINITY_VALUE;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_QUIET_NAN;
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = SINGLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = MINUS_SINGLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x400000;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = 0x1;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = 0x400000;
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EI | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 32) << 23);
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ SingleOperand = SIGN | ((SINGLE_EXPONENT_BIAS + 31) << 23) | 0x1;
+ Fsr.Data = RoundToLongwordFromSingle(ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ SingleOperand = ((SINGLE_EXPONENT_BIAS + 31) << 23);
+ try {
+ Fsr.Data = RoundToLongwordFromSingle(EV | ROUND_TO_NEAREST,
+ SingleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 23.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 23 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ LongwordResult);
+
+ return;
+}
+
+VOID
+Test24 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleOperand;
+ ULONG LongwordResult;
+ ULONG Subtest;
+
+ //
+ // Test 24 - Convert to longword from double.
+ //
+
+ Subtest = 0;
+ printf(" Test 24 - convert to longword from double ...");
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x7fffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x7fffffff)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x1;
+ DoubleOperand.HighPart = 0x0;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x1;
+ DoubleOperand.HighPart = 0x0;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SI | ROUND_TO_NEAREST)) ||
+ (LongwordResult != 0x0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EI | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INEXACT_RESULT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 32) << 20);
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 31) << 20);
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20);
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20) | 0x1;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 32) << 20);
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 31) << 20);
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20);
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (LongwordResult != 0x80000000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20) | 0x1;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20) | 0x1;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = SIGN | ((DOUBLE_EXPONENT_BIAS + 31) << 20) | 0x1;
+ try {
+ Fsr.Data = TruncateToLongwordFromDouble(EV | ROUND_TO_ZERO,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0xfff00000;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 30) << 20) | 0xfffff;
+ Fsr.Data = ConvertToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0xfff00000;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 30) << 20) | 0xfffff;
+ try {
+ Fsr.Data = ConvertToLongwordFromDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0xfff00000;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 30) << 20) | 0xfffff;
+ Fsr.Data = RoundToLongwordFromDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ if ((Fsr.Data != (SV | ROUND_TO_NEAREST)) ||
+ (LongwordResult != INTEGER_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = 0xfff00000;
+ DoubleOperand.HighPart = ((DOUBLE_EXPONENT_BIAS + 30) << 20) | 0xfffff;
+ try {
+ Fsr.Data = RoundToLongwordFromDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &LongwordResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ //
+ // End of test 24.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 24 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ LongwordResult);
+
+ return;
+}
+
+VOID
+Test25 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULARGE_INTEGER DoubleOperand;
+ ULARGE_INTEGER DoubleResult;
+ ULONG Subtest;
+
+ //
+ // Test 25 - Square root double test.
+ //
+
+ Subtest = 0;
+ printf(" Test 25 - square root double ...");
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_QUIET_NAN;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ DoubleOperand.LowPart = DOUBLE_NAN_LOW;
+ DoubleOperand.HighPart = DOUBLE_SIGNAL_NAN;
+ try {
+ Fsr.Data = SquareRootDouble(EV | ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = DOUBLE_INFINITY_VALUE_HIGH;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != DOUBLE_INFINITY_VALUE_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_INFINITY_VALUE_HIGH)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+ DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != DOUBLE_NAN_LOW) ||
+ (DoubleResult.HighPart != DOUBLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+// Subtest += 1;
+// Count = 0;
+// DoubleOperand.LowPart = DOUBLE_INFINITY_VALUE_LOW;
+// DoubleOperand.HighPart = MINUS_DOUBLE_INFINITY_VALUE;
+// try {
+// Fsr.Data = SquareRootDouble(EV | ROUND_TO_NEAREST,
+// &DoubleOperand,
+// &DoubleResult);
+//
+// } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+// EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+// Count += 1;
+// }
+//
+// if (Count == 0) {
+// goto TestFailed;
+// }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0;
+ DoubleOperand.HighPart = 0;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0) ||
+ (DoubleResult.HighPart != 0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0;
+ DoubleOperand.HighPart = 0 | SIGN;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0) ||
+ (DoubleResult.HighPart != (0 | SIGN))) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x40000;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x10000;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x40000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x4000;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x20000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x1000;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (DoubleResult.LowPart != 0x0) ||
+ (DoubleResult.HighPart != 0x10000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ DoubleOperand.LowPart = 0x0;
+ DoubleOperand.HighPart = 0x80000;
+ Fsr.Data = SquareRootDouble(ROUND_TO_NEAREST,
+ &DoubleOperand,
+ &DoubleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (DoubleResult.LowPart != 0x333f9de6) ||
+ (DoubleResult.HighPart != 0xb504f)) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ //
+ // End of test 25.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 25 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx, %lx\n",
+ Subtest,
+ Fsr.Data,
+ DoubleResult.LowPart,
+ DoubleResult.HighPart);
+
+ return;
+}
+
+VOID
+Test26 (
+ VOID
+ )
+
+{
+
+ ULONG Count;
+ FLOATING_STATUS Fsr;
+ ULONG Subtest;
+ ULONG SingleResult;
+
+ //
+ // Test 26 - Square root single test.
+ //
+
+ Subtest = 0;
+ printf(" Test 26 - square root single ...");
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ SINGLE_QUIET_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Count = 0;
+ try {
+ Fsr.Data = SquareRootSingle(EV | ROUND_TO_NEAREST,
+ SINGLE_SIGNAL_NAN,
+ &SingleResult);
+
+ } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Count += 1;
+ }
+
+ if (Count == 0) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != SINGLE_INFINITY_VALUE)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ MINUS_SINGLE_INFINITY_VALUE,
+ &SingleResult);
+
+ if ((Fsr.Data != (SV | XV | ROUND_TO_NEAREST)) ||
+ (SingleResult != SINGLE_QUIET_NAN)) {
+ goto TestFailed;
+ }
+
+// Subtest += 1;
+// Count = 0;
+// try {
+// Fsr.Data = SquareRootSingle(EV | ROUND_TO_NEAREST,
+// MINUS_SINGLE_INFINITY_VALUE,
+// &SingleResult);
+//
+// } except ((GetExceptionCode() == STATUS_FLOAT_INVALID_OPERATION) ?
+// EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+// Count += 1;
+// }
+//
+// if (Count == 0) {
+// goto TestFailed;
+// }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0 | SIGN,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != (0 | SIGN))) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0x200000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x400000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0x80000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x200000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0x20000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x100000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0x8000,
+ &SingleResult);
+
+ if ((Fsr.Data != ROUND_TO_NEAREST) ||
+ (SingleResult != 0x80000)) {
+ goto TestFailed;
+ }
+
+ Subtest += 1;
+ Fsr.Data = SquareRootSingle(ROUND_TO_NEAREST,
+ 0x400000,
+ &SingleResult);
+
+ if ((Fsr.Data != (SU | SI | ROUND_TO_NEAREST)) ||
+ (SingleResult != 0x5a8279)) {
+ goto TestFailed;
+ }
+
+// ****** //
+
+ //
+ // End of test 26.
+ //
+
+ printf("succeeded\n");
+ return;
+
+ //
+ // Test 26 failed.
+ //
+
+TestFailed:
+ printf(" subtest %d failed, fsr = %lx, result = %lx\n",
+ Subtest,
+ Fsr.Data,
+ SingleResult);
+
+ return;
+}
diff --git a/private/ntos/ke/tests/mipsflt/mips/flptx.s b/private/ntos/ke/tests/mipsflt/mips/flptx.s
new file mode 100644
index 000000000..66f118390
--- /dev/null
+++ b/private/ntos/ke/tests/mipsflt/mips/flptx.s
@@ -0,0 +1,1527 @@
+// TITLE("Floating Point Tests")
+//++
+//
+// Copyright (c) 1991 Microsoft Corporation
+//
+// Module Name:
+//
+// flptx.s
+//
+// Abstract:
+//
+// This module implements the floating point tests.
+//
+// Author:
+//
+// David N. Cutler (davec) 20-Jun-1991
+//
+// Environment:
+//
+// User mode only.
+//
+// Revision History:
+//
+//--
+
+#include "ksmips.h"
+
+ SBTTL("Add Double Test")
+//++
+//
+// ULONG
+// AddDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Addend1,
+// IN PULARGE_INTEGER Addend2,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the add double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer the first addend value.
+// a2 - Supplies a pointer to the second addend value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(AddDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first addend value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second addend value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first operand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second operand value
+ mtc1 t3,f3 //
+ add.d f4,f0,f2 // form sum
+ mfc1 v0,f4 // get result value
+ mfc1 v1,f5 //
+ sw v0,0(a3) // store result value
+ sw v1,4(a3) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end AddDouble
+
+ SBTTL("Divide Double Test")
+//++
+//
+// ULONG
+// DivideDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Dividend,
+// IN PULARGE_INTEGER Divisor,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the divide double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer ot the dividend value.
+// a2 - Supplies a pointer ot the divisor value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(DivideDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get dividend value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get divisor value
+ lw t3,4(a2) //
+ mtc1 t0,f10 // set dividend value
+ mtc1 t1,f11 //
+ mtc1 t2,f12 // set divisor value
+ mtc1 t3,f13 //
+ div.d f14,f10,f12 // form quotient
+ mfc1 v0,f14 // get result value
+ mfc1 v1,f15 //
+ sw v0,0(a3) // store result value
+ sw v1,4(a3) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end DivideDouble
+
+ SBTTL("Multiply Double Test")
+//++
+//
+// ULONG
+// MultiplySingle (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Multiplicand,
+// IN PULARGE_INTEGER Multiplier,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the multiply double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the multiplicand value.
+// a2 - Supplies a pointer to the multipler value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(MultiplyDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get multiplicand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get multiplier value
+ lw t3,4(a2) //
+ mtc1 t0,f18 // set multiplicand value
+ mtc1 t1,f19 //
+ mtc1 t2,f24 // set multiplier value
+ mtc1 t3,f25 //
+ mul.d f10,f18,f24 // form product
+ mfc1 v0,f10 // get result value
+ mfc1 v1,f11 //
+ sw v0,0(a3) // store result value
+ sw v1,4(a3) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end MultiplyDouble
+
+ SBTTL("Subtract Double Test")
+//++
+//
+// ULONG
+// SubtractDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Subtrahend,
+// IN PULARGE_INTEGER Minuend,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the subtract double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the subtrahend value.
+// a2 - Supplies a pointer to the minuend value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(SubtractDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get subtrahend value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get minuend value
+ lw t3,4(a2) //
+ mtc1 t0,f20 // set subtrahend value
+ mtc1 t1,f21 //
+ mtc1 t2,f22 // set minuend value
+ mtc1 t3,f23 //
+ sub.d f8,f20,f22 // form difference
+ mfc1 v0,f8 // get result value
+ mfc1 v1,f9 //
+ sw v0,0(a3) // store result value
+ sw v1,4(a3) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end SubtractDouble
+
+ SBTTL("Add Single Test")
+//++
+//
+// ULONG
+// AddSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Addend1,
+// IN ULONG Addend2,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the add single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the first addend value.
+// a2 - Supplies the second addend value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(AddSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ add.s f4,f0,f2 // form sum
+ mfc1 v0,f4 // get result value
+ sw v0,0(a3) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end AddSingle
+
+ SBTTL("Divide Single Test")
+//++
+//
+// ULONG
+// DivideSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Dividend,
+// IN ULONG Divisor,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the divide single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the dividend value.
+// a2 - Supplies the divisor value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(DivideSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f10 // set dividend value
+ mtc1 a2,f12 // set divisor value
+ div.s f14,f10,f12 // form quotient
+ mfc1 v0,f14 // get result value
+ sw v0,0(a3) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end DivideSingle
+
+ SBTTL("Multiply Single Test")
+//++
+//
+// ULONG
+// MultiplySingle (
+// IN ULONG RoundingMode,
+// IN ULONG Multiplicand,
+// IN ULONG Multiplier,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the multiply single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the multiplicand value.
+// a2 - Supplies the multipler value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(MultiplySingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f18 // set multiplicand value
+ mtc1 a2,f24 // set multiplier value
+ mul.s f10,f18,f24 // form product
+ mfc1 v0,f10 // get result value
+ sw v0,0(a3) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end MultiplySingle
+
+ SBTTL("Subtract Single Test")
+//++
+//
+// ULONG
+// SubtractSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Subtrahend,
+// IN ULONG Minuend,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the subtract single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the subtrahend value.
+// a2 - Supplies the minuend value.
+// a3 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(SubtractSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f20 // set subtrahend value
+ mtc1 a2,f22 // set minuend value
+ sub.s f8,f20,f22 // form difference
+ mfc1 v0,f8 // get result value
+ sw v0,0(a3) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end SubtractSingle
+
+ SBTTL("Convert To Double From Single")
+//++
+//
+// ULONG
+// ConvertToDoubleFromSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Source,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the convert to double from
+// single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(ConvertToDoubleFromSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set the source value
+ cvt.d.s f2,f0 // convert to double from single
+ mfc1 v0,f2 // get result value
+ mfc1 v1,f3 //
+ sw v0,0(a2) // store result value
+ sw v1,4(a2) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end ConvertToDoubleFromSingle
+
+ SBTTL("Convert To Longword From Double")
+//++
+//
+// ULONG
+// ConvertToLongwordFromDouble (
+// IN ULONG RoundingMode,
+// IN ULARGE_INTEGER Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the convert to longword from
+// double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(ConvertToLongwordFromDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get the source value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set the source value
+ mtc1 t1,f1 //
+ cvt.w.d f2,f0 // convert to longword from double
+ mfc1 v0,f2 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end ConvertToLongwordFromDouble
+
+ SBTTL("Convert To Longword From Single")
+//++
+//
+// ULONG
+// ConvertToLongwordFromSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the convert to longword from
+// single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(ConvertToLongwordFromSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set the source value
+ cvt.w.s f2,f0 // convert to longword from double
+ mfc1 v0,f2 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end ConvertToLongwordFromSingle
+
+ SBTTL("Convert To Single From Double")
+//++
+//
+// ULONG
+// ConvertToSingleFromDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the convert to single from
+// double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(ConvertToSingleFromDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get the source value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set the source value
+ mtc1 t1,f1 //
+ cvt.s.d f2,f0 // convert to single from double
+ mfc1 v0,f2 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end ConvertToSingleFromDouble
+
+ SBTTL("Compare Double Test")
+//++
+//
+// ULONG
+// CompareXyDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Comparand1,
+// IN PULARGE_INTEGER Comparand2
+// );
+//
+// Routine Description:
+//
+// The following routines implements the compare double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the first comparand value.
+// a2 - Supplies a pointer to the second comparand value.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(CompareFDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.f.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareFDouble
+
+ LEAF_ENTRY(CompareUnDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.un.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUnDouble
+
+ LEAF_ENTRY(CompareEqDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.eq.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareEqDouble
+
+ LEAF_ENTRY(CompareUeqDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ueq.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUeqDouble
+
+ LEAF_ENTRY(CompareOltDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.olt.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareOltDouble
+
+ LEAF_ENTRY(CompareUltDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ult.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUltDouble
+
+ LEAF_ENTRY(CompareOleDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ole.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareOleDouble
+
+ LEAF_ENTRY(CompareUleDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ule.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUleDouble
+
+ LEAF_ENTRY(CompareSfDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.sf.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareSfDouble
+
+ LEAF_ENTRY(CompareNgleDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ngle.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgleDouble
+
+ LEAF_ENTRY(CompareSeqDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.seq.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareSeqDouble
+
+ LEAF_ENTRY(CompareNglDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ngl.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNglDouble
+
+ LEAF_ENTRY(CompareLtDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.lt.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareLtDouble
+
+ LEAF_ENTRY(CompareNgeDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.nge.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgeDouble
+
+ LEAF_ENTRY(CompareLeDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.le.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareLeDouble
+
+ LEAF_ENTRY(CompareNgtDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get first comparand value
+ lw t1,4(a1) //
+ lw t2,0(a2) // get second comparand value
+ lw t3,4(a2) //
+ mtc1 t0,f0 // set first comparand value
+ mtc1 t1,f1 //
+ mtc1 t2,f2 // set second comparand value
+ mtc1 t3,f3 //
+ c.ngt.d f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgtDouble
+
+ SBTTL("Compare Single Test")
+//++
+//
+// ULONG
+// CompareXySingle (
+// IN ULONG RoundingMode,
+// IN ULONG Comparand1,
+// IN ULONG Comparand2
+// );
+//
+// Routine Description:
+//
+// The following routine implements the compare single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the first comparand value.
+// a2 - Supplies the second comparand value.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(CompareFSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.f.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareFSingle
+
+ LEAF_ENTRY(CompareUnSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.un.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUnSingle
+
+ LEAF_ENTRY(CompareEqSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.eq.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareEqSingle
+
+ LEAF_ENTRY(CompareUeqSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ueq.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUeqSingle
+
+ LEAF_ENTRY(CompareOltSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.olt.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareOltSingle
+
+ LEAF_ENTRY(CompareUltSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ult.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUltSingle
+
+ LEAF_ENTRY(CompareOleSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ole.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareOleSingle
+
+ LEAF_ENTRY(CompareUleSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ule.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareUleSingle
+
+ LEAF_ENTRY(CompareSfSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.sf.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareSfSingle
+
+ LEAF_ENTRY(CompareNgleSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ngle.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgleSingle
+
+ LEAF_ENTRY(CompareSeqSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.seq.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareSeqSingle
+
+ LEAF_ENTRY(CompareNglSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ngl.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNglSingle
+
+ LEAF_ENTRY(CompareLtSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.lt.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareLtSingle
+
+ LEAF_ENTRY(CompareNgeSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.nge.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgeSingle
+
+ LEAF_ENTRY(CompareLeSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.le.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareLeSingle
+
+ LEAF_ENTRY(CompareNgtSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mtc1 a2,f2 // set second operand value
+ c.ngt.s f0,f2 // compare operands
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end CompareNgtSingle
+
+ SBTTL("Absolute Double Test")
+//++
+//
+// ULONG
+// AbsoluteDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Operand,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the absolute double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(AbsoluteDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get double operand value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set double operand value
+ mtc1 t1,f1 //
+ abs.d f4,f0 // form absolute value
+ mfc1 v0,f4 // get double result value
+ mfc1 v1,f5 //
+ sw v0,0(a2) // store double result value
+ sw v1,4(a2) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end AbsoluteDouble
+
+ SBTTL("Move Double Test")
+//++
+//
+// ULONG
+// MoveDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Operand,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the move double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(MoveDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get double operand value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set double operand value
+ mtc1 t1,f1 //
+ mov.d f4,f0 // move value
+ mfc1 v0,f4 // get double result value
+ mfc1 v1,f5 //
+ sw v0,0(a2) // store double result value
+ sw v1,4(a2) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end MoveDouble
+
+ SBTTL("Negate Double Test")
+//++
+//
+// ULONG
+// NegateDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Operand,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the negate double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(NegateDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get double operand value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set double operand value
+ mtc1 t1,f1 //
+ neg.d f4,f0 // form negative value
+ mfc1 v0,f4 // get double result value
+ mfc1 v1,f5 //
+ sw v0,0(a2) // store double result value
+ sw v1,4(a2) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end NegateDouble
+
+ SBTTL("Absolute Single Test")
+//++
+//
+// ULONG
+// AbsoluteSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Operand,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the absolute single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(AbsoluteSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ abs.s f4,f0 // form absolute value
+ mfc1 v0,f4 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end AbsoluteSingle
+
+ SBTTL("Move Single Test")
+//++
+//
+// ULONG
+// MoveSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Operand,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the move single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(MoveSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ mov.s f4,f0 // move value
+ mfc1 v0,f4 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end MoveSingle
+
+ SBTTL("Negate Single Test")
+//++
+//
+// ULONG
+// NegateSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Operand,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the negate single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(NegateSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set first operand value
+ neg.s f4,f0 // form negative value
+ mfc1 v0,f4 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end NegateSingle
+
+ SBTTL("Round To Longword From Double")
+//++
+//
+// ULONG
+// RoundToLongwordFromDouble (
+// IN ULONG RoundingMode,
+// IN ULARGE_INTEGER Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the round to longword from double
+// test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(RoundToLongwordFromDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lw t0,0(a1) // get the source value
+ lw t1,4(a1) //
+ mtc1 t0,f0 // set the source value
+ mtc1 t1,f1 //
+ round.w.d f2,f0 // convert to longword from double
+ mfc1 v0,f2 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end RoundToLongwordFromDouble
+
+ SBTTL("Round To Longword From Single")
+//++
+//
+// ULONG
+// RoundToLongwordFromSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the round to longword from single
+// test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(RoundToLongwordFromSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set the source value
+ round.w.s f2,f0 // convert to longword from double
+ mfc1 v0,f2 // get result value
+ sw v0,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end RoundToLongwordFromSingle
+
+ SBTTL("Truncate To Longword From Double")
+//++
+//
+// ULONG
+// TruncateToLongwordFromDouble (
+// IN ULONG RoundingMode,
+// IN ULARGE_INTEGER Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the truncate to longword from double
+// test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(TruncateToLongwordFromDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lwc1 f0,0(a1) // get the source value
+ lwc1 f1,4(a1) //
+ trunc.w.d f2,f0 // convert to longword from double
+ swc1 f2,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end TruncateToLongwordFromDouble
+
+ SBTTL("Truncate To Longword From Single")
+//++
+//
+// ULONG
+// TruncateToLongwordFromSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the truncate to longword from single
+// test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(TruncateToLongwordFromSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set the source value
+ trunc.w.s f2,f0 // convert to longword from double
+ swc1 f2,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end TruncateToLongwordFromSingle
+
+ SBTTL("Square Root Double Test")
+//++
+//
+// ULONG
+// SquareRootDouble (
+// IN ULONG RoundingMode,
+// IN PULARGE_INTEGER Operand,
+// OUT PULARGE_INTEGER Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the negate double test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies a pointer to the operand value.
+// a2 - Supplies a pointer to a variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(SquareRootDouble)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ lwc1 f0,0(a1) // get double operand value
+ lwc1 f1,4(a1) //
+ sqrt.d f4,f0 // form square root double
+ swc1 f4,0(a2) // store double result value
+ swc1 f5,4(a2) //
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end SquareRootDouble
+
+ SBTTL("Square Root Single")
+//++
+//
+// ULONG
+// SquareRootSingle (
+// IN ULONG RoundingMode,
+// IN ULONG Source,
+// OUT PULONG Result
+// );
+//
+// Routine Description:
+//
+// The following routine implements the square root single test.
+//
+// Arguments:
+//
+// a0 - Supplies the rounding mode.
+// a1 - Supplies the source operand value.
+// a2 - Supplies a pointer to the variable that receives the result.
+//
+// Return Value:
+//
+// The resultant floating status is returned as the function value.
+//
+//--
+
+ LEAF_ENTRY(SquareRootSingle)
+
+ ctc1 a0,fsr // set rounding mode and enables
+ mtc1 a1,f0 // set the source value
+ sqrt.s f2,f0 // compute square root single
+ swc1 f2,0(a2) // store result value
+ cfc1 v0,fsr // get floating status
+ j ra // return
+
+ .end SquareRootSingle
diff --git a/private/ntos/ke/tests/x86div/i386/test.c b/private/ntos/ke/tests/x86div/i386/test.c
new file mode 100644
index 000000000..49821dc4d
--- /dev/null
+++ b/private/ntos/ke/tests/x86div/i386/test.c
@@ -0,0 +1,173 @@
+/*****************************************************************/
+/** Microsoft LAN Manager **/
+/** Copyright(c) Microsoft Corp., 1988-1991 **/
+/*****************************************************************/
+
+#include <stdio.h>
+#include <process.h>
+#include <setjmp.h>
+
+#include <time.h>
+
+#include <nt.h>
+#include <ntrtl.h>
+#include <nturtl.h>
+#include <windows.h>
+
+// declare a BSS value - see what the assemble looks like
+
+CONTEXT RegContext;
+ULONG DefaultValue;
+ULONG TestCount;
+ULONG ExpectedException;
+
+extern ULONG DivOperand;
+extern ULONG DivRegPointer;
+extern LONG DivRegScaler;
+extern ULONG ExceptEip;
+extern ULONG ExceptEsp;
+extern ULONG TestTable[];
+extern ULONG TestTableCenter[];
+#define TESTTABLESIZE (128*sizeof(ULONG))
+
+extern TestDiv();
+
+BOOLEAN vInitialized;
+ULONG vZero = 0;
+ULONG vTwo = 0;
+ULONG vDivOk = 0x7f7f7f7f;
+
+
+VOID _CRTAPI1
+main (argc, argv)
+int argc;
+char *argv[];
+{
+
+ /***
+ * This program tests the kernel's MOD/RM & SIB decoding of
+ * a processor trap 0. The kernel needs to crack the MOD/RM & SIB
+ * on a div to determine if the exception is a divide_by_zero
+ * or an overflow execption.
+ */
+
+ try {
+ //
+ // Setup for divide by zero test
+ //
+
+ DivOperand = 0;
+ DivRegScaler = 0;
+ DivRegPointer = TestTableCenter;
+ DefaultValue = 0x01010101;
+ ExpectedException = STATUS_INTEGER_DIVIDE_BY_ZERO;
+
+ printf ("Begin divide by zero test\n");
+
+ for (DivRegScaler = -7; DivRegScaler < 7; DivRegScaler++) {
+ vInitialized = FALSE;
+ TestDiv ();
+ }
+
+ printf ("End divide by zero test\n\n");
+
+ //
+ // Setup for divide overflow test
+ //
+
+ DivOperand = 2;
+ DivRegPointer = TestTableCenter;
+ DefaultValue = 0;
+ ExpectedException = STATUS_INTEGER_OVERFLOW;
+
+ printf ("Begin divide overflow test\n");
+
+ for (DivRegScaler = -7; DivRegScaler < 7; DivRegScaler++) {
+ vInitialized = FALSE;
+ TestDiv ();
+ }
+ printf ("End divide overflow test\n\n");
+
+ } except (HandleException(GetExceptionInformation())) {
+ printf ("FAIL: in divide by zero exception handler");
+ }
+
+ printf ("%ld varations run ", TestCount);
+}
+
+HandleException (
+ IN PEXCEPTION_POINTERS ExceptionPointers
+ )
+{
+ ULONG i;
+ PUCHAR p;
+ PCONTEXT Context;
+ ULONG def;
+
+ switch (i = ExceptionPointers->ExceptionRecord->ExceptionCode) {
+ case 1:
+ Context = ExceptionPointers->ContextRecord;
+ Context->Eip = ExceptEip;
+ Context->Esp = ExceptEsp;
+
+ if (vInitialized) {
+ printf ("Divide failed - div instruction completed\n");
+ return EXCEPTION_CONTINUE_SEARCH; // to debugger
+ }
+ vInitialized = TRUE;
+ TestCount--;
+ // fall through...
+
+ case STATUS_INTEGER_OVERFLOW:
+ case STATUS_INTEGER_DIVIDE_BY_ZERO:
+ if (i != ExpectedException && i != 1) {
+ break;
+ }
+
+ TestCount++;
+
+ // set context
+ def = DefaultValue;
+ Context = ExceptionPointers->ContextRecord;
+ Context->Eax = def;
+ Context->Ebx = def;
+ Context->Ecx = def;
+ Context->Edx = def;
+ Context->Esi = def;
+ Context->Edi = def;
+ Context->Ebp = def;
+
+ // find next test
+ for (p = (PUCHAR) Context->Eip; ((PULONG) p)[0] != 0xCCCCCCCC; p++) ;
+ Context->Eip = (ULONG) (p + 4);
+
+ // clear global testable
+ RtlFillMemoryUlong (TestTable, TESTTABLESIZE, def);
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+
+ printf ("\nFailed - unexpected exception code %lx (expected %lx)\n",
+ ExceptionPointers->ExceptionRecord->ExceptionCode,
+ ExpectedException
+ );
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+
+
+
+DivMarker()
+{
+ EXCEPTION_RECORD ExceptionRecord;
+
+ //
+ // Construct an exception record.
+ //
+
+ ExceptionRecord.ExceptionCode = 1;
+ ExceptionRecord.ExceptionRecord = (PEXCEPTION_RECORD)NULL;
+ ExceptionRecord.NumberParameters = 0;
+ ExceptionRecord.ExceptionFlags = 0;
+ RtlRaiseException(&ExceptionRecord);
+}
diff --git a/private/ntos/ke/tests/x86div/i386/testa.asm b/private/ntos/ke/tests/x86div/i386/testa.asm
new file mode 100644
index 000000000..ce50a18f8
--- /dev/null
+++ b/private/ntos/ke/tests/x86div/i386/testa.asm
@@ -0,0 +1,284 @@
+; Static Name Aliases
+;
+ TITLE testa.asm
+ NAME testa
+
+ .386p
+ include callconv.inc
+
+ EXTRNP _DivMarker,0
+
+_DATA SEGMENT DWORD USE32 PUBLIC 'DATA'
+
+ public _DivOperand, _DivRegPointer, _DivRegScaler, _ExceptEip, _ExceptEsp
+_DivOperand dd ?
+_DivRegPointer dd ?
+_DivRegScaler dd ?
+_ExceptEip dd ?
+_ExceptEsp dd ?
+
+ public _TestTable, _TestTableCenter
+_TestTable dd 64 dup (?)
+_TestTableCenter dd 64 dup (?)
+_DATA ENDS
+
+DivTest macro div,reg,type,labelmod
+ ; public &div&_&type&_&reg&labelmod - too many labels
+&div&_&type&_&reg&labelmod:
+endm
+
+endtest macro
+ call Marker
+ dd 0CCCCCCCCh ; marker for expcetion
+ ; handler to find next test
+endm
+
+REGDiv macro type, reglist
+ irp reg,<reglist>
+ DivTest div, reg, type, _reg
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, type ptr _DivOperand
+ div reg
+ endtest
+
+ DivTest idiv, reg, type, _reg
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, type ptr _DivOperand
+ idiv reg
+ endtest
+ endm
+endm
+
+PTRDiv macro typelist
+ irp type,<typelist>
+
+ DivTest div, reg, type, _ptr
+ mov edx, _DivOperand
+ifidni <type>,<byte>
+ mov byte ptr _TestTableCenter, dl
+else
+ifidni <type>,<word>
+ mov word ptr _TestTableCenter, dx
+else
+ mov dword ptr _TestTableCenter, edx
+endif
+endif
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ div type ptr _TestTableCenter
+ endtest
+
+ DivTest idiv, reg, type, _ptr
+ mov edx, _DivOperand
+ifidni <type>,<byte>
+ mov byte ptr _TestTableCenter, dl
+else
+ifidni <type>,<word>
+ mov word ptr _TestTableCenter, dx
+else
+ mov dword ptr _TestTableCenter, edx
+endif
+endif
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ idiv type ptr _TestTableCenter
+ endtest
+ endm
+endm
+
+REGDivP1 macro type, offset, divpointer, labelmod, reglist
+ irp reg,<reglist>
+ DivTest div, reg, type, labelmod
+ mov eax, divpointer
+ mov edx, _DivOperand
+ifidni <type>,<byte>
+ mov byte ptr [eax + offset], dl
+else
+ifidni <type>,<word>
+ mov word ptr [eax + offset], dx
+else
+ mov dword ptr [eax + offset], edx
+endif
+endif
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, divpointer
+ div type ptr [reg + offset]
+ endtest
+
+ DivTest idiv, reg, type, labelmod
+ mov eax, divpointer
+ mov edx, _DivOperand
+ifidni <type>,<byte>
+ mov byte ptr [eax + offset], dl
+else
+ifidni <type>,<word>
+ mov word ptr [eax + offset], dx
+else
+ mov dword ptr [eax + offset], edx
+endif
+endif
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, divpointer
+ idiv type ptr [reg + offset]
+ endtest
+ endm
+endm
+
+REGDivSIB1 macro type, offset, divpointer, labelmod, toscale, scaler, reglist
+ irp reg,<reglist>
+ DivTest div, reg, type, labelmod
+ push ebx
+ mov eax, divpointer
+ mov edx, _DivOperand
+ mov ebx, _DivRegScaler
+ifidni <type>,<byte>
+ mov byte ptr [ebx * toscale + eax + offset], dl
+else
+ifidni <type>,<word>
+ mov word ptr [ebx * toscale + eax + offset], dx
+else
+ mov dword ptr [ebx * toscale + eax + offset], edx
+endif
+endif
+ pop ebx
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, divpointer
+ mov scaler, _DivRegScaler
+ div type ptr [scaler * toscale + reg + offset]
+ endtest
+
+ DivTest idiv, reg, type, labelmod
+ push ebx
+ mov eax, divpointer
+ mov edx, _DivOperand
+ mov ebx, _DivRegScaler
+ifidni <type>,<byte>
+ mov byte ptr [ebx * toscale + eax + offset], dl
+else
+ifidni <type>,<word>
+ mov word ptr [ebx * toscale + eax + offset], dx
+else
+ mov dword ptr [ebx * toscale + eax + offset], edx
+endif
+endif
+ pop ebx
+ mov eax, 7f7f7f7fh
+ mov edx, eax
+ mov reg, divpointer
+ mov scaler, _DivRegScaler
+ idiv type ptr [scaler * toscale + reg + offset]
+ endtest
+ endm
+endm
+
+
+REGDivP macro typelist, reglist
+ irp type, <typelist>
+ REGDivP1 type, 0, _DivRegPointer, _d, <reglist>
+ REGDivP1 type, 1, _DivRegPointer, _p, <reglist>
+ REGDivP1 type, -1, _DivRegPointer, _m, <reglist>
+ REGDivP1 type, _TestTableCenter, 0, _rd, <reglist>
+ REGDivP1 type, _TestTableCenter, 1, _rp, <reglist>
+ REGDivP1 type, _TestTableCenter, -1, _rm, <reglist>
+ endm
+endm
+
+REGDivSIB macro typelist, scaler, reglist
+ irp type, <typelist>
+ REGDivSIB1 type, 0, _DivRegPointer, _&scaler&_d, 1, scaler, <reglist>
+ REGDivSIB1 type, 1, _DivRegPointer, _&scaler&_p, 1, scaler, <reglist>
+ REGDivSIB1 type, -1, _DivRegPointer, _&scaler&_m, 1, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 0, _&scaler&_rd, 1, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 1, _&scaler&_rp, 1, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, -1, _&scaler&_rm, 1, scaler, <reglist>
+ ;---
+ REGDivSIB1 type, 0, _DivRegPointer, _&scaler&_d2, 2, scaler, <reglist>
+ REGDivSIB1 type, 1, _DivRegPointer, _&scaler&_p2, 2, scaler, <reglist>
+ REGDivSIB1 type, -1, _DivRegPointer, _&scaler&_m2, 2, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 0, _&scaler&_r2d, 2, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 1, _&scaler&_r2p, 2, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, -1, _&scaler&_r2m, 2, scaler, <reglist>
+ ;---
+ REGDivSIB1 type, 0, _DivRegPointer, _&scaler&_d4, 4, scaler, <reglist>
+ REGDivSIB1 type, 1, _DivRegPointer, _&scaler&_p4, 4, scaler, <reglist>
+ REGDivSIB1 type, -1, _DivRegPointer, _&scaler&_m4, 4, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 0, _&scaler&_r4d, 4, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 1, _&scaler&_r4p, 4, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, -1, _&scaler&_r4m, 4, scaler, <reglist>
+ ;---
+ REGDivSIB1 type, 0, _DivRegPointer, _&scaler&_d8, 8, scaler, <reglist>
+ REGDivSIB1 type, 1, _DivRegPointer, _&scaler&_p8, 8, scaler, <reglist>
+ REGDivSIB1 type, -1, _DivRegPointer, _&scaler&_m8, 8, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 0, _&scaler&_r8d, 8, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, 1, _&scaler&_r8p, 8, scaler, <reglist>
+ REGDivSIB1 type, _TestTableCenter, -1, _&scaler&_r8m, 8, scaler, <reglist>
+ endm
+endm
+
+
+_TEXT SEGMENT DWORD PUBLIC USE32 'CODE' ; Start 32 bit code
+ ASSUME CS:FLAT, DS:FLAT, ES:FLAT, SS:NOTHING, FS:NOTHING, GS:NOTHING
+
+
+cPublicProc _TestDiv
+
+ ; save c runtime registers
+
+ push ebp
+ push ebx
+ push esi
+ push edi
+
+ ; prime outer loop with initial exception
+ endtest
+
+ ; start of div test
+
+ PTRDiv <byte, word, dword>
+
+ REGDiv <byte>, <bl,bh,cl,ch,dl,dh>
+ REGDiv <word>, <ax,bx,cx,si,di,bp>
+ REGDiv <dword>, <ebx,ecx,edx,esi,edi,ebp>
+
+ REGDivP <byte>, <ebx,ecx,edx,esi,edi,ebp>
+ REGDivP <word, dword>, <eax,ebx,ecx,esi,edi,ebp>
+
+ REGDivSIB <byte>, <ebx>, <ecx,edx,esi,edi,ebp>
+ REGDivSIB <byte>, <ecx>, <ebx,edx,esi,edi,ebp>
+ REGDivSIB <byte>, <edx>, <ebx,ecx,esi,edi,ebp>
+ REGDivSIB <byte>, <esi>, <ebx,ecx,edx,edi,ebp>
+ REGDivSIB <byte>, <edi>, <ebx,ecx,edx,esi,ebp>
+ REGDivSIB <byte>, <ebp>, <ebx,ecx,edx,edi,esi>
+
+ REGDivSIB <word, dword>, <eax>, <ebx,ecx,esi,edi,ebp>
+ REGDivSIB <word, dword>, <ebx>, <eax,ecx,esi,edi,ebp>
+ REGDivSIB <word, dword>, <ecx>, <eax,ebx,esi,edi,ebp>
+ REGDivSIB <word, dword>, <esi>, <eax,ebx,ecx,edi,ebp>
+ REGDivSIB <word, dword>, <edi>, <eax,ebx,ecx,esi,ebp>
+ REGDivSIB <word, dword>, <ebp>, <eax,ebx,ecx,edi,esi>
+
+ ; end of test
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ stdRET _TestDiv
+stdENDP _TestDiv
+
+cPublicProc Marker
+ pop eax
+ mov _ExceptEip, eax
+ mov _ExceptEsp, esp
+ stdCall _DivMarker
+ int 3
+stdENDP Marker
+
+_TEXT ENDS
+END
diff --git a/private/ntos/ke/tests/x86div/makefile b/private/ntos/ke/tests/x86div/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/ke/tests/x86div/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/ke/tests/x86div/sources b/private/ntos/ke/tests/x86div/sources
new file mode 100644
index 000000000..6a5a02ce1
--- /dev/null
+++ b/private/ntos/ke/tests/x86div/sources
@@ -0,0 +1,23 @@
+!IF 0
+Sources file for building hexedit.lib & he.exe
+
+Author: kenr
+!ENDIF
+
+MAJORCOMP=sdktools
+MINORCOMP=he
+
+TARGETNAME=test
+TARGETPATH=obj
+TARGETTYPE=LIBRARY
+
+INCLUDES=.
+
+SOURCES=
+
+i386_SOURCES=i386\test.c \
+ i386\testa.asm
+
+UMTYPE=console
+UMAPPL=test
+UMLIBS=\nt\public\sdk\lib\*\libcmt.lib \nt\public\sdk\lib\*\ntdll.lib obj\*\test.lib
diff --git a/private/ntos/ke/tests/xcphnd/makefile b/private/ntos/ke/tests/xcphnd/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/ke/tests/xcphnd/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/ke/tests/xcphnd/sources b/private/ntos/ke/tests/xcphnd/sources
new file mode 100644
index 000000000..d42231993
--- /dev/null
+++ b/private/ntos/ke/tests/xcphnd/sources
@@ -0,0 +1,36 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+MAJORCOMP=ntos
+MINORCOMP=xcpt
+
+TARGETNAME=xcpt
+TARGETPATH=obj
+TARGETTYPE=PROGRAM
+
+SOURCES=xcpt4.c
+
+UMTYPE=console
+UMAPPL=xcpt4
+UMLIBS=$(BASEDIR)\public\sdk\lib\*\ntdll.lib
diff --git a/private/ntos/ke/tests/xcphnd/xcpt4.c b/private/ntos/ke/tests/xcphnd/xcpt4.c
new file mode 100644
index 000000000..affd0a39a
--- /dev/null
+++ b/private/ntos/ke/tests/xcphnd/xcpt4.c
@@ -0,0 +1,2400 @@
+/*++
+
+Copyright (c) 1990 Microsoft Corporation
+
+Module Name:
+
+ xcpt4.c
+
+Abstract:
+
+ This module implements user mode exception tests.
+
+Author:
+
+ David N. Cutler (davec) 18-Sep-1990
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "stdio.h"
+#include "nt.h"
+#include "ntrtl.h"
+#include "setjmpex.h"
+
+#include "float.h"
+
+
+
+//
+// Define switch constants.
+//
+
+#define BLUE 0
+#define RED 1
+
+//
+// Define function prototypes.
+//
+
+VOID
+addtwo (
+ IN LONG First,
+ IN LONG Second,
+ IN PLONG Place
+ );
+
+VOID
+bar1 (
+ IN NTSTATUS Status,
+ IN PLONG Counter
+ );
+
+VOID
+bar2 (
+ IN PLONG BlackHole,
+ IN PLONG BadAddress,
+ IN PLONG Counter
+ );
+
+VOID
+dojump (
+ IN jmp_buf JumpBuffer,
+ IN PLONG Counter
+ );
+
+LONG
+Echo(
+ IN LONG Value
+ );
+
+VOID
+eret (
+ IN NTSTATUS Status,
+ IN PLONG Counter
+ );
+
+VOID
+except1 (
+ IN PLONG Counter
+ );
+
+ULONG
+except2 (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PLONG Counter
+ );
+
+ULONG
+except3 (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PLONG Counter
+ );
+
+VOID
+foo1 (
+ IN NTSTATUS Status
+ );
+
+VOID
+foo2 (
+ IN PLONG BlackHole,
+ IN PLONG BadAddress
+ );
+
+VOID
+fret (
+ IN PLONG Counter
+ );
+
+BOOLEAN
+Tkm (
+ VOID
+ );
+
+VOID
+Test61Part2 (
+ IN OUT PULONG Counter
+ );
+
+VOID
+PerformFpTest(
+ VOID
+ );
+
+double
+SquareDouble (
+ IN double op
+ );
+
+VOID
+SquareDouble17E300 (
+ OUT PVOID ans
+ );
+
+
+VOID
+_CRTAPI1
+main(
+ int argc,
+ char *argv[]
+ )
+
+{
+
+ PLONG BadAddress;
+ PCHAR BadByte;
+ PLONG BlackHole;
+ ULONG Index1;
+ ULONG Index2 = RED;
+ jmp_buf JumpBuffer;
+ LONG Counter;
+ EXCEPTION_RECORD ExceptionRecord;
+ double doubleresult;
+
+ //
+ // Announce start of exception test.
+ //
+
+ printf("Start of exception test\n");
+
+ //
+ // Initialize exception record.
+ //
+
+ ExceptionRecord.ExceptionCode = STATUS_INTEGER_OVERFLOW;
+ ExceptionRecord.ExceptionFlags = 0;
+ ExceptionRecord.ExceptionRecord = NULL;
+ ExceptionRecord.NumberParameters = 0;
+
+ //
+ // Initialize pointers.
+ //
+
+ BadAddress = (PLONG)NULL;
+ BadByte = (PCHAR)NULL;
+ BadByte += 1;
+ BlackHole = &Counter;
+
+ //
+ // Simply try statement with a finally clause that is entered sequentially.
+ //
+
+ printf(" test1...");
+ Counter = 0;
+ try {
+ Counter += 1;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 1;
+ }
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try statement with an exception clause that is never executed
+ // because there is no exception raised in the try clause.
+ //
+
+ printf(" test2...");
+ Counter = 0;
+ try {
+ Counter += 1;
+
+ } except (Counter) {
+ Counter += 1;
+ }
+
+ if (Counter != 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try statement with an exception handler that is never executed
+ // because the exception expression continues execution.
+ //
+
+ printf(" test3...");
+ Counter = 0;
+ try {
+ Counter -= 1;
+ RtlRaiseException(&ExceptionRecord);
+
+ } except (Counter) {
+ Counter -= 1;
+ }
+
+ if (Counter != - 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try statement with an exception clause that is always executed.
+ //
+
+ printf(" test4...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except (Counter) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try statement with an exception clause that is always executed.
+ //
+
+ printf(" test5...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ *BlackHole += *BadAddress;
+
+ } except (Counter) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simply try statement with a finally clause that is entered as the
+ // result of an exception.
+ //
+
+ printf(" test6...");
+ Counter = 0;
+ try {
+ try {
+ Counter += 1;
+ RtlRaiseException(&ExceptionRecord);
+
+ } finally {
+ if (abnormal_termination() != FALSE) {
+ Counter += 1;
+ }
+ }
+
+ } except (Counter) {
+ if (Counter == 2) {
+ Counter += 1;
+ }
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simply try statement with a finally clause that is entered as the
+ // result of an exception.
+ //
+
+ printf(" test7...");
+ Counter = 0;
+ try {
+ try {
+ Counter += 1;
+ *BlackHole += *BadAddress;
+
+ } finally {
+ if (abnormal_termination() != FALSE) {
+ Counter += 1;
+ }
+ }
+
+ } except (Counter) {
+ if (Counter == 2) {
+ Counter += 1;
+ }
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that calls a function which raises an exception.
+ //
+
+ printf(" test8...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ foo1(STATUS_ACCESS_VIOLATION);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that calls a function which raises an exception.
+ //
+
+ printf(" test9...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ foo2(BlackHole, BadAddress);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that calls a function which calls a function that
+ // raises an exception. The first function has a finally clause
+ // that must be executed for this test to work.
+ //
+
+ printf(" test10...");
+ Counter = 0;
+ try {
+ bar1(STATUS_ACCESS_VIOLATION, &Counter);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter -= 1;
+ }
+
+ if (Counter != 98) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that calls a function which calls a function that
+ // raises an exception. The first function has a finally clause
+ // that must be executed for this test to work.
+ //
+
+ printf(" test11...");
+ Counter = 0;
+ try {
+ bar2(BlackHole, BadAddress, &Counter);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter -= 1;
+ }
+
+ if (Counter != 98) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A try within an except
+ //
+
+ printf(" test12...");
+ Counter = 0;
+ try {
+ foo1(STATUS_ACCESS_VIOLATION);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ try {
+ foo1(STATUS_SUCCESS);
+
+ } except ((GetExceptionCode() == STATUS_SUCCESS) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ if (Counter != 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded...");
+ }
+
+ Counter += 1;
+ }
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A try within an except
+ //
+
+ printf(" test13...");
+ Counter = 0;
+ try {
+ foo2(BlackHole, BadAddress);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ try {
+ foo1(STATUS_SUCCESS);
+
+ } except ((GetExceptionCode() == STATUS_SUCCESS) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ if (Counter != 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded...");
+ }
+
+ Counter += 1;
+ }
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A goto from an exception clause that needs to pass
+ // through a finally
+ //
+
+ printf(" test14...");
+ Counter = 0;
+ try {
+ try {
+ foo1(STATUS_ACCESS_VIOLATION);
+
+ } except ((GetExceptionCode() == STATUS_ACCESS_VIOLATION) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ goto t9;
+ }
+
+ } finally {
+ Counter += 1;
+ }
+
+t9:;
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A goto from an finally clause that needs to pass
+ // through a finally
+ //
+
+ printf(" test15...");
+ Counter = 0;
+ try {
+ try {
+ Counter += 1;
+
+ } finally {
+ Counter += 1;
+ goto t10;
+ }
+
+ } finally {
+ Counter += 1;
+ }
+
+t10:;
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A goto from an exception clause that needs to pass
+ // through a finally into the outer finally clause.
+ //
+
+ printf(" test16...");
+ Counter = 0;
+ try {
+ try {
+ try {
+ Counter += 1;
+ foo1(STATUS_INTEGER_OVERFLOW);
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 1;
+ goto t11;
+ }
+
+ } finally {
+ Counter += 1;
+ }
+t11:;
+ } finally {
+ Counter += 1;
+ }
+
+ if (Counter != 4) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A goto from an finally clause that needs to pass
+ // through a finally into the outer finally clause.
+ //
+
+ printf(" test17...");
+ Counter = 0;
+ try {
+ try {
+ Counter += 1;
+
+ } finally {
+ Counter += 1;
+ goto t12;
+ }
+t12:;
+ } finally {
+ Counter += 1;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A return from an except clause
+ //
+
+ printf(" test18...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ eret(STATUS_ACCESS_VIOLATION, &Counter);
+
+ } finally {
+ Counter += 1;
+ }
+
+ if (Counter != 4) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A return from a finally clause
+ //
+
+ printf(" test19...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ fret(&Counter);
+
+ } finally {
+ Counter += 1;
+ }
+
+ if (Counter != 5) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A simple set jump followed by a long jump.
+ //
+
+ printf(" test20...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump followed by a long jump out of a finally clause that is
+ // sequentially executed.
+ //
+
+ printf(" test21...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ try {
+ Counter += 1;
+
+ } finally {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump within a try clause followed by a long jump out of a
+ // finally clause that is sequentially executed.
+ //
+
+ printf(" test22...");
+ Counter = 0;
+ try {
+ if (setjmp(JumpBuffer) == 0) {
+ Counter += 1;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 1;
+ if (Counter == 2) {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+ }
+
+ if (Counter != 5) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump followed by a try/except, followed by a try/finally where
+ // the try body of the try/finally raises an exception that is handled
+ // by the try/excecpt which causes the try/finally to do a long jump out
+ // of a finally clause. This will create a collided unwind.
+ //
+
+ printf(" test23...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ try {
+ try {
+ Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } finally {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 1;
+ }
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump followed by a try/except, followed by a several nested
+ // try/finally's where the inner try body of the try/finally raises an
+ // exception that is handled by the try/except which causes the
+ // try/finally to do a long jump out of a finally clause. This will
+ // create a collided unwind.
+ //
+
+ printf(" test24...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ try {
+ try {
+ try {
+ try {
+ Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } finally {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+
+ } finally {
+ Counter += 1;
+ }
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 1;
+ }
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 5) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump followed by a try/except, followed by a try/finally which
+ // calls a subroutine which contains a try finally that raises an
+ // exception that is handled to the try/except.
+ //
+
+ printf(" test25...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ try {
+ try {
+ try {
+ Counter += 1;
+ dojump(JumpBuffer, &Counter);
+
+ } finally {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 1;
+ }
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 1;
+ }
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 7) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // A set jump followed by a try/except, followed by a try/finally which
+ // calls a subroutine which contains a try finally that raises an
+ // exception that is handled to the try/except.
+ //
+
+ printf(" test26...");
+ Counter = 0;
+ if (setjmp(JumpBuffer) == 0) {
+ try {
+ try {
+ try {
+ try {
+ Counter += 1;
+ dojump(JumpBuffer, &Counter);
+
+ } finally {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+
+ } finally {
+ Counter += 1;
+ }
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 1;
+ }
+
+ } else {
+ Counter += 1;
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Test nested exceptions.
+ //
+
+ printf(" test27...");
+ Counter = 0;
+ try {
+ try {
+ Counter += 1;
+ except1(&Counter);
+
+ } except(except2(GetExceptionInformation(), &Counter)) {
+ Counter += 2;
+ }
+
+ } except(EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 3;
+ }
+
+ if (Counter != 55) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that causes an integer overflow exception.
+ //
+
+ printf(" test28...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ addtwo(0x7fff0000, 0x10000, &Counter);
+
+ } except ((GetExceptionCode() == STATUS_INTEGER_OVERFLOW) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Simple try that raises an misaligned data exception.
+ //
+
+#ifndef i386
+ printf(" test29...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ foo2(BlackHole, (PLONG)BadByte);
+
+ } except ((GetExceptionCode() == STATUS_DATATYPE_MISALIGNMENT) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ Counter += 1;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+#endif
+
+ //
+ // Continue from a try body with an exception clause in a loop.
+ //
+
+ printf(" test30...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ continue;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 40;
+ }
+
+ Counter += 2;
+ }
+
+ if (Counter != 15) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from a try body with an finally clause in a loop.
+ //
+
+ printf(" test31...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ continue;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 40) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from doubly nested try body with an exception clause in a
+ // loop.
+ //
+
+ printf(" test32...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ continue;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 10;
+ }
+
+ Counter += 2;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 20;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 30) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from doubly nested try body with an finally clause in a loop.
+ //
+
+ printf(" test33...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ continue;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+
+ } finally {
+ Counter += 4;
+ }
+
+ Counter += 5;
+ }
+
+ if (Counter != 105) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from a finally clause in a loop.
+ //
+
+ printf(" test34...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ continue;
+ }
+
+ Counter += 4;
+ }
+
+ if (Counter != 25) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from a doubly nested finally clause in a loop.
+ //
+
+ printf(" test35...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ continue;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 75) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Continue from a doubly nested finally clause in a loop.
+ //
+
+ printf(" test36...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 0) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ continue;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 115) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a try body with an exception clause in a loop.
+ //
+
+ printf(" test37...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 40;
+ }
+
+ Counter += 2;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a try body with an finally clause in a loop.
+ //
+
+ printf(" test38...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from doubly nested try body with an exception clause in a
+ // loop.
+ //
+
+ printf(" test39...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 10;
+ }
+
+ Counter += 2;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 20;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 6) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from doubly nested try body with an finally clause in a loop.
+ //
+
+ printf(" test40...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+
+ } finally {
+ Counter += 4;
+ }
+
+ Counter += 5;
+ }
+
+ if (Counter != 21) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a finally clause in a loop.
+ //
+
+ printf(" test41...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ break;
+ }
+
+ Counter += 4;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a doubly nested finally clause in a loop.
+ //
+
+ printf(" test42...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ break;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 7) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a doubly nested finally clause in a loop.
+ //
+
+ printf(" test43...");
+ Counter = 0;
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ break;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 11) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a try body with an exception clause in a switch.
+ //
+
+ printf(" test44...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 40;
+ }
+
+ Counter += 2;
+ break;
+ }
+
+ if (Counter != 0) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a try body with an finally clause in a switch.
+ //
+
+ printf(" test45...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 2) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from doubly nested try body with an exception clause in a
+ // switch.
+ //
+
+ printf(" test46...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 10;
+ }
+
+ Counter += 2;
+
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 20;
+ }
+
+ Counter += 3;
+ }
+
+ if (Counter != 0) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from doubly nested try body with an finally clause in a switch.
+ //
+
+ printf(" test47...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ break;
+
+ } else {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 3;
+
+ } finally {
+ Counter += 4;
+ }
+
+ Counter += 5;
+ }
+
+ if (Counter != 6) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a finally clause in a switch.
+ //
+
+ printf(" test48...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ break;
+ }
+
+ Counter += 4;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a doubly nested finally clause in a switch.
+ //
+
+ printf(" test49...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ break;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Break from a doubly nested finally clause in a switch.
+ //
+
+ printf(" test50...");
+ Counter = 0;
+ Index1 = 1;
+ switch (Index2) {
+ case BLUE:
+ Counter += 100;
+ break;
+
+ case RED:
+ try {
+ try {
+ if ((Index1 & 0x1) == 1) {
+ Counter += 1;
+ }
+
+ } finally {
+ Counter += 2;
+ }
+
+ Counter += 4;
+
+ } finally {
+ Counter += 5;
+ break;
+ }
+
+ Counter += 6;
+ }
+
+ if (Counter != 12) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Leave from an if in a simple try/finally.
+ //
+
+ printf(" test51...");
+ Counter = 0;
+ try {
+ if (Echo(Counter) == Counter) {
+ Counter += 3;
+ leave;
+
+ } else {
+ Counter += 100;
+ }
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Leave from a loop in a simple try/finally.
+ //
+
+ printf(" test52...");
+ Counter = 0;
+ try {
+ for (Index1 = 0; Index1 < 10; Index1 += 1) {
+ if (Echo(Index1) == Index1) {
+ Counter += 3;
+ leave;
+ }
+
+ Counter += 100;
+ }
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Leave from a switch in a simple try/finally.
+ //
+
+ printf(" test53...");
+ Counter = 0;
+ try {
+ switch (Index2) {
+ case BLUE:
+ break;
+
+ case RED:
+ Counter += 3;
+ leave;
+ }
+
+ Counter += 100;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 8) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Leave from an if in doubly nested try/finally followed by a leave
+ // from an if in the outer try/finally.
+ //
+
+ printf(" test54...");
+ Counter = 0;
+ try {
+ try {
+ if (Echo(Counter) == Counter) {
+ Counter += 3;
+ leave;
+
+ } else {
+ Counter += 100;
+ }
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Echo(Counter) == Counter) {
+ Counter += 3;
+ leave;
+
+ } else {
+ Counter += 100;
+ }
+
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 16) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Leave from an if in doubly nested try/finally followed by a leave
+ // from the finally of the outer try/finally.
+ //
+
+ printf(" test55...");
+ Counter = 0;
+ try {
+ try {
+ if (Echo(Counter) == Counter) {
+ Counter += 3;
+ leave;
+
+ } else {
+ Counter += 100;
+ }
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ leave;
+ }
+ }
+
+ Counter += 100;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 13) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Try/finally within the except clause of a try/except that is always
+ // executed.
+ //
+
+ printf(" test56...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except (Counter) {
+ try {
+ Counter += 3;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+ }
+
+ if (Counter != 9) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Try/finally within the finally clause of a try/finally.
+ //
+
+ printf(" test57...");
+ Counter = 0;
+ try {
+ Counter += 1;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ try {
+ Counter += 3;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+ }
+ }
+
+ if (Counter != 9) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Try/except within the finally clause of a try/finally.
+ //
+/*
+ printf(" test58...");
+ Counter = 0;
+ try {
+ Counter -= 1;
+
+ } finally {
+ try {
+ Counter += 2;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except (Counter) {
+ try {
+ Counter += 3;
+
+ } finally {
+ if (abnormal_termination() == FALSE) {
+ Counter += 5;
+ }
+ }
+ }
+ }
+
+ if (Counter != 9) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+*/
+ //
+ // Try/except within the except clause of a try/except that is always
+ // executed.
+ //
+
+ printf(" test59...");
+ Counter = 0;
+ try {
+ Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except (Counter) {
+ try {
+ Counter += 3;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except(Counter - 3) {
+ Counter += 5;
+ }
+ }
+
+ if (Counter != 9) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Try with a Try which exits the scope with a goto
+ //
+
+ printf(" test60...");
+ Counter = 0;
+ try {
+ try {
+ goto outside;
+
+ } except(1) {
+ Counter += 1;
+ }
+
+outside:
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except(1) {
+ Counter += 3;
+ }
+
+ if (Counter != 3) {
+ printf("failed, count = %d\n", Counter);
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Try/except which gets an exception from a subfunction within
+ // a try/finally which has a try/except in the finally clause
+ //
+
+ printf(" test61...");
+ Counter = 0;
+ try {
+ Test61Part2 (&Counter);
+ } except (EXCEPTION_EXECUTE_HANDLER) {
+ Counter += 11;
+ }
+
+ if (Counter != 24) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ //
+ // Check for precision of floating point exception
+ //
+
+ printf(" test62...");
+
+ /* enable floating point overflow */
+ _controlfp(_controlfp(0,0) & ~EM_OVERFLOW, _MCW_EM);
+
+ Counter = 0;
+ try {
+ doubleresult = SquareDouble (1.7e300);
+
+ try {
+ doubleresult = SquareDouble (1.0);
+
+ } except (1) {
+ Counter += 3;
+ }
+
+ } except (1) {
+ Counter += 1;
+ }
+
+ if (Counter != 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ _clearfp ();
+
+ //
+ // Callout for test #63 due to bogus compiler behaviour caused by test #62.
+ //
+
+ PerformFpTest ();
+
+ //
+ // Announce end of exception test.
+ //
+
+ printf("End of exception test\n");
+ return;
+}
+
+VOID
+PerformFpTest()
+{
+ LONG Counter;
+ double doubleresult;
+
+ //
+ // Check for precision of floating point exception in subfunction
+ //
+
+ printf(" test63...");
+
+ Counter = 0;
+ try {
+ SquareDouble17E300 ((PVOID) &doubleresult);
+
+ try {
+ SquareDouble17E300 ((PVOID) &doubleresult);
+
+ } except (1) {
+ Counter += 3;
+ }
+
+ } except (1) {
+ Counter += 1;
+ }
+
+ if (Counter != 1) {
+ printf("failed, count = %d\n", Counter);
+
+ } else {
+ printf("succeeded\n");
+ }
+
+ _clearfp ();
+
+}
+
+VOID
+addtwo (
+ long First,
+ long Second,
+ long *Place
+ )
+
+{
+
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+ *Place = First + Second;
+ return;
+}
+
+VOID
+bar1 (
+ IN NTSTATUS Status,
+ IN PLONG Counter
+ )
+{
+
+ try {
+ foo1(Status);
+
+ } finally {
+ if (abnormal_termination() != FALSE) {
+ *Counter = 99;
+
+ } else {
+ *Counter = 100;
+ }
+ }
+
+ return;
+}
+
+VOID
+bar2 (
+ IN PLONG BlackHole,
+ IN PLONG BadAddress,
+ IN PLONG Counter
+ )
+{
+
+ try {
+ foo2(BlackHole, BadAddress);
+
+ } finally {
+ if (abnormal_termination() != FALSE) {
+ *Counter = 99;
+
+ } else {
+ *Counter = 100;
+ }
+ }
+
+ return;
+}
+
+VOID
+dojump (
+ IN jmp_buf JumpBuffer,
+ IN PLONG Counter
+ )
+
+{
+
+ try {
+ try {
+ *Counter += 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } finally {
+ *Counter += 1;
+ }
+
+ } finally {
+ *Counter += 1;
+ longjmp(JumpBuffer, 1);
+ }
+}
+
+VOID
+eret(
+ IN NTSTATUS Status,
+ IN PLONG Counter
+ )
+
+{
+
+ try {
+ try {
+ foo1(Status);
+
+ } except ((GetExceptionCode() == Status) ?
+ EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
+ *Counter += 1;
+ return;
+ }
+
+ } finally {
+ *Counter += 1;
+ }
+
+ return;
+}
+
+VOID
+except1 (
+ IN PLONG Counter
+ )
+
+{
+
+ try {
+ *Counter += 5;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+
+ } except (except3(GetExceptionInformation(), Counter)) {
+ *Counter += 7;
+ }
+
+ *Counter += 9;
+ return;
+}
+
+ULONG
+except2 (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PLONG Counter
+ )
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+
+ ExceptionRecord = ExceptionPointers->ExceptionRecord;
+ if ((ExceptionRecord->ExceptionCode == STATUS_UNSUCCESSFUL) &&
+ ((ExceptionRecord->ExceptionFlags & EXCEPTION_NESTED_CALL) == 0)) {
+ *Counter += 11;
+ return EXCEPTION_EXECUTE_HANDLER;
+
+ } else {
+ *Counter += 13;
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+}
+
+ULONG
+except3 (
+ IN PEXCEPTION_POINTERS ExceptionPointers,
+ IN PLONG Counter
+ )
+
+{
+
+ PEXCEPTION_RECORD ExceptionRecord;
+
+ ExceptionRecord = ExceptionPointers->ExceptionRecord;
+ if ((ExceptionRecord->ExceptionCode == STATUS_INTEGER_OVERFLOW) &&
+ ((ExceptionRecord->ExceptionFlags & EXCEPTION_NESTED_CALL) == 0)) {
+ *Counter += 17;
+ RtlRaiseStatus(STATUS_UNSUCCESSFUL);
+
+ } else if ((ExceptionRecord->ExceptionCode == STATUS_UNSUCCESSFUL) &&
+ ((ExceptionRecord->ExceptionFlags & EXCEPTION_NESTED_CALL) != 0)) {
+ *Counter += 19;
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ *Counter += 23;
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+VOID
+foo1 (
+ IN NTSTATUS Status
+ )
+
+{
+
+ //
+ // Raise exception.
+ //
+
+ RtlRaiseStatus(Status);
+ return;
+}
+
+VOID
+foo2 (
+ IN PLONG BlackHole,
+ IN PLONG BadAddress
+ )
+
+{
+
+ //
+ // Raise exception.
+ //
+
+ *BlackHole += *BadAddress;
+ return;
+}
+
+VOID
+fret(
+ IN PLONG Counter
+ )
+
+{
+
+ try {
+ try {
+ *Counter += 1;
+
+ } finally {
+ *Counter += 1;
+ return;
+ }
+ } finally {
+ *Counter += 1;
+ }
+
+ return;
+}
+
+LONG
+Echo(
+ IN LONG Value
+ )
+
+{
+ return Value;
+}
+
+VOID
+Test61Part2 (
+ IN OUT PULONG Counter
+ )
+{
+
+ try {
+ *Counter -= 1;
+ RtlRaiseStatus(STATUS_INTEGER_OVERFLOW);
+ } finally {
+ *Counter += 2;
+ *Counter += 5;
+ *Counter += 7;
+ }
+}
+
+
+double
+SquareDouble (
+ IN double op
+ )
+{
+ return op * op;
+}
+
+VOID
+SquareDouble17E300 (
+ OUT PVOID output
+ )
+{
+ double ans;
+
+ ans = SquareDouble (1.7e300);
+ *(double *) output = ans;
+}
diff --git a/private/ntos/ke/thredobj.c b/private/ntos/ke/thredobj.c
new file mode 100644
index 000000000..f7fa780e0
--- /dev/null
+++ b/private/ntos/ke/thredobj.c
@@ -0,0 +1,2216 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ threadobj.c
+
+Abstract:
+
+ This module implements the machine independent functions to manipulate
+ the kernel thread object. Functions are provided to initialize, ready,
+ alert, test alert, boost priority, enable APC queuing, disable APC
+ queuing, confine, set affinity, set priority, suspend, resume, alert
+ resume, terminate, read thread state, freeze, unfreeze, query data
+ alignment handling mode, force resume, and enter and leave critical
+ regions for thread objects.
+
+Author:
+
+ David N. Cutler (davec) 4-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input thread object is
+// really a kthread and not something else, like deallocated pool.
+//
+
+#define ASSERT_THREAD(E) { \
+ ASSERT((E)->Header.Type == ThreadObject); \
+}
+
+VOID
+KeInitializeThread (
+ IN PKTHREAD Thread,
+ IN PVOID KernelStack,
+ IN PKSYSTEM_ROUTINE SystemRoutine,
+ IN PKSTART_ROUTINE StartRoutine OPTIONAL,
+ IN PVOID StartContext OPTIONAL,
+ IN PCONTEXT ContextFrame OPTIONAL,
+ IN PVOID Teb OPTIONAL,
+ IN PKPROCESS Process
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a thread object. The priority, affinity,
+ and initial quantum are taken from the parent process object. The
+ thread object is inserted at the end of the thread list for the
+ parent process.
+
+ N.B. This routine is carefully written so that if an access violation
+ occurs while reading the specified context frame, then no kernel
+ data structures will have been modified. It is the responsibility
+ of the caller to handle the exception and provide necessary clean
+ up.
+
+ N.B. It is assumed that the thread object is zeroed.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ KernelStack - Supplies a pointer to the base of a kernel stack on which
+ the context frame for the thread is to be constructed.
+
+ SystemRoutine - Supplies a pointer to the system function that is to be
+ called when the thread is first scheduled for execution.
+
+ StartRoutine - Supplies an optional pointer to a function that is to be
+ called after the system has finished initializing the thread. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ StartContext - Supplies an optional pointer to an arbitrary data structure
+ which will be passed to the StartRoutine as a parameter. This
+ parameter is specified if the thread is a system thread and will
+ execute totally in kernel mode.
+
+ ContextFrame - Supplies an optional pointer a context frame which contains
+ the initial user mode state of the thread. This parameter is specified
+ if the thread is a user thread and will execute in user mode. If this
+ parameter is not specified, then the Teb parameter is ignored.
+
+ Teb - Supplies an optional pointer to the user mode thread environment
+ block. This parameter is specified if the thread is a user thread and
+ will execute in user mode. This parameter is ignored if the ContextFrame
+ parameter is not specified.
+
+ Process - Supplies a pointer to a control object of type process.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ KIRQL OldIrql;
+ PKTIMER Timer;
+ PKWAIT_BLOCK WaitBlock;
+
+ //
+ // Initialize the standard dispatcher object header and set the initial
+ // state of the thread object.
+ //
+
+ Thread->Header.Type = ThreadObject;
+ Thread->Header.Size = sizeof(KTHREAD) / sizeof(LONG);
+ InitializeListHead(&Thread->Header.WaitListHead);
+
+ //
+ // Initialize the owned mutant listhead.
+ //
+
+ InitializeListHead(&Thread->MutantListHead);
+
+ //
+ // Initialize the thread field of all builtin wait blocks.
+ //
+
+ for (Index = 0; Index < (THREAD_WAIT_OBJECTS + 1); Index += 1) {
+ Thread->WaitBlock[Index].Thread = Thread;
+ }
+
+ //
+ // Initialize the alerted, preempted, debugactive, autoalignment,
+ // kernel stack resident, enable kernel stack swap, and process
+ // ready queue boolean values.
+ //
+ // N.B. Only nonzero values are initialized.
+ //
+
+ Thread->AutoAlignment = Process->AutoAlignment;
+ Thread->EnableStackSwap = TRUE;
+ Thread->KernelStackResident = TRUE;
+
+ //
+ // Set the system service table pointer to the address of the static
+ // system service descriptor table. If the thread is later converted
+ // to a Win32 thread this pointer will be change to a pointer to the
+ // shadow system service descriptor table.
+ //
+
+ Thread->ServiceTable = (PVOID)&KeServiceDescriptorTable[0];
+
+ //
+ // Initialize the APC state pointers, the current APC state, the saved
+ // APC state, and enable APC queuing.
+ //
+
+ Thread->ApcStatePointer[0] = &Thread->ApcState;
+ Thread->ApcStatePointer[1] = &Thread->SavedApcState;
+ InitializeListHead(&Thread->ApcState.ApcListHead[KernelMode]);
+ InitializeListHead(&Thread->ApcState.ApcListHead[UserMode]);
+ Thread->ApcState.Process = Process;
+ Thread->ApcQueueable = TRUE;
+
+ //
+ // Initialize the kernel mode suspend APC and the suspend semaphore object.
+ // and the builtin wait timeout timer object.
+ //
+
+ KeInitializeApc(&Thread->SuspendApc,
+ Thread,
+ OriginalApcEnvironment,
+ (PKKERNEL_ROUTINE)KiSuspendNop,
+ (PKRUNDOWN_ROUTINE)NULL,
+ KiSuspendThread,
+ KernelMode,
+ NULL);
+
+ KeInitializeSemaphore(&Thread->SuspendSemaphore, 0L, 2L);
+
+ //
+ // Initialize the builtin timer trimer wait wait block.
+ //
+ // N.B. This is the only time the wait block is initialized sincs this
+ // information is constant.
+ //
+
+ Timer = &Thread->Timer;
+ KeInitializeTimer(Timer);
+ WaitBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
+ WaitBlock->Object = Timer;
+ WaitBlock->WaitKey = (CSHORT)STATUS_TIMEOUT;
+ WaitBlock->WaitType = WaitAny;
+ WaitBlock->WaitListEntry.Flink = &Timer->Header.WaitListHead;
+ WaitBlock->WaitListEntry.Blink = &Timer->Header.WaitListHead;
+
+ //
+ // Initialize the APC queue spinlock.
+ //
+
+ KeInitializeSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // Initialize the Thread Environment Block (TEB) pointer (can be NULL).
+ //
+
+ Thread->Teb = Teb;
+
+ //
+ // Set the initial kernel stack and the initial thread context.
+ //
+
+ Thread->InitialStack = KernelStack;
+ Thread->StackBase = KernelStack;
+ Thread->StackLimit = (PVOID)((ULONG)KernelStack - KERNEL_STACK_SIZE);
+ KiInitializeContextThread(Thread,
+ SystemRoutine,
+ StartRoutine,
+ StartContext,
+ ContextFrame);
+
+ //
+ // Set the base thread priority, the thread priority, the thread affinity,
+ // the thread quantum, and the scheduling state.
+ //
+
+ Thread->BasePriority = Process->BasePriority;
+ Thread->Priority = Thread->BasePriority;
+ Thread->Affinity = Process->Affinity;
+ Thread->UserAffinity = Process->Affinity;
+ Thread->SystemAffinityActive = FALSE;
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->State = Initialized;
+ Thread->DisableBoost = Process->DisableBoost;
+
+#ifdef i386
+
+ Thread->Iopl = Process->Iopl;
+
+#endif
+
+ //
+ // Lock the dispatcher database, insert the thread in the process
+ // thread list, increment the kernel stack count, and unlock the
+ // dispatcher database.
+ //
+ // N.B. The distinguished value MAXSHORT is used to signify that no
+ // threads have been created for a process.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ InsertTailList(&Process->ThreadListHead, &Thread->ThreadListEntry);
+ if (Process->StackCount == MAXSHORT) {
+ Process->StackCount = 1;
+
+ } else {
+ Process->StackCount += 1;
+ }
+
+ //
+ // Initialize the ideal processor number for the thread.
+ //
+ // N.B. This must be done under the dispatcher lock to prevent byte
+ // granularity problems on Alpha.
+ //
+
+ Process->ThreadSeed += 1;
+ Thread->IdealProcessor = (UCHAR)(Process->ThreadSeed % KeNumberProcessors);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+BOOLEAN
+KeAlertThread (
+ IN PKTHREAD Thread,
+ IN KPROCESSOR_MODE AlertMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function attempts to alert a thread and cause its execution to
+ be continued if it is currently in an alertable Wait state. Otherwise
+ it just sets the alerted variable for the specified processor mode.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ AlertMode - Supplies the processor mode for which the thread is
+ to be alerted.
+
+Return Value:
+
+ The previous state of the alerted variable for the specified processor
+ mode.
+
+--*/
+
+{
+
+ BOOLEAN Alerted;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level, lock dispatcher database, and lock
+ // APC queue.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // Capture the current state of the alerted variable for the specified
+ // processor mode.
+ //
+
+ Alerted = Thread->Alerted[AlertMode];
+
+ //
+ // If the alerted state for the specified processor mode is Not-Alerted,
+ // then attempt to alert the thread.
+ //
+
+ if (Alerted == FALSE) {
+
+ //
+ // If the thread is currently in a Wait state, the Wait is alertable,
+ // and the specified processor mode is less than or equal to the Wait
+ // mode, then the thread is unwaited with a status of "alerted".
+ //
+
+ if ((Thread->State == Waiting) && (Thread->Alertable == TRUE) &&
+ (AlertMode <= Thread->WaitMode)) {
+ KiUnwaitThread(Thread, STATUS_ALERTED, ALERT_INCREMENT);
+
+ } else {
+ Thread->Alerted[AlertMode] = TRUE;
+ }
+ }
+
+ //
+ // Unlock APC queue, unlock dispatcher database, lower IRQL to its
+ // previous value, and return the previous alerted state for the
+ // specified mode.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Alerted;
+}
+
+ULONG
+KeAlertResumeThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function attempts to alert a thread in kernel mode and cause its
+ execution to be continued if it is currently in an alertable Wait state.
+ In addition, a resume operation is performed on the specified thread.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous suspend count.
+
+--*/
+
+{
+
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level, lock dispatcher database, and lock
+ // APC queue.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ KiAcquireSpinLock(&Thread->ApcQueueLock);
+
+ //
+ // If the kernel mode alerted state is FALSE, then attempt to alert
+ // the thread for kernel mode.
+ //
+
+ if (Thread->Alerted[KernelMode] == FALSE) {
+
+ //
+ // If the thread is currently in a Wait state and the Wait is alertable,
+ // then the thread is unwaited with a status of "alerted". Else set the
+ // kernel mode alerted variable.
+ //
+
+ if ((Thread->State == Waiting) && (Thread->Alertable == TRUE)) {
+ KiUnwaitThread(Thread, STATUS_ALERTED, ALERT_INCREMENT);
+
+ } else {
+ Thread->Alerted[KernelMode] = TRUE;
+ }
+ }
+
+ //
+ // Capture the current suspend count.
+ //
+
+ OldCount = Thread->SuspendCount;
+
+ //
+ // If the thread is currently suspended, then decrement its suspend count.
+ //
+
+ if (OldCount != 0) {
+ Thread->SuspendCount -= 1;
+
+ //
+ // If the resultant suspend count is zero and the freeze count is
+ // zero, then resume the thread by releasing its suspend semaphore.
+ //
+
+ if ((Thread->SuspendCount == 0) && (Thread->FreezeCount == 0)) {
+ Thread->SuspendSemaphore.Header.SignalState += 1;
+ KiWaitTest(&Thread->SuspendSemaphore, RESUME_INCREMENT);
+ }
+ }
+
+ //
+ // Unlock APC queue, unlock dispatcher database, lower IRQL to its
+ // previous value, and return the previous suspend count.
+ //
+
+ KiReleaseSpinLock(&Thread->ApcQueueLock);
+ KiUnlockDispatcherDatabase(OldIrql);
+ return OldCount;
+}
+
+VOID
+KeBoostPriorityThread (
+ IN PKTHREAD Thread,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function boosts the priority of the specified thread using the
+ same algorithm used when a thread gets a boost from a wait operation.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Increment - Supplies the priority increment that is to be applied to
+ the thread's priority.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the thread does not run at a realtime priority level, then boost
+ // the thread priority.
+ //
+
+ if (Thread->Priority < LOW_REALTIME_PRIORITY) {
+ KiBoostPriorityThread(Thread, Increment);
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+KAFFINITY
+KeConfineThread (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function confines the execution of the current thread to the current
+ processor.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous affinity value.
+
+--*/
+
+{
+
+ KAFFINITY Affinity;
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current affinity and compute new affinity value by
+ // shifting a one bit left by the current processor number.
+ //
+
+ Affinity = Thread->Affinity;
+ Thread->Affinity = (KAFFINITY)(1 << Thread->NextProcessor);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous affinity value.
+ //
+
+ return Affinity;
+}
+
+BOOLEAN
+KeDisableApcQueuingThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function disables the queuing of APC's to the specified thread.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous value of the APC queuing state variable.
+
+--*/
+
+{
+
+ BOOLEAN ApcQueueable;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current state of the APC queueable state variable and
+ // set its state to FALSE.
+ //
+
+ ApcQueueable = Thread->ApcQueueable;
+ Thread->ApcQueueable = FALSE;
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous APC queueable state.
+ //
+
+ return ApcQueueable;
+}
+
+BOOLEAN
+KeEnableApcQueuingThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function enables the queuing of APC's to the specified thread.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous value of the APC queuing state variable.
+
+--*/
+
+{
+
+ BOOLEAN ApcQueueable;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current state of the APC queueable state variable and
+ // set its state to TRUE.
+ //
+
+ ApcQueueable = Thread->ApcQueueable;
+ Thread->ApcQueueable = TRUE;
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return previous APC queueable state.
+ //
+
+ return ApcQueueable;
+}
+
+ULONG
+KeForceResumeThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function forces resumption of thread execution if the thread is
+ suspended. If the specified thread is not suspended, then no operation
+ is performed.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The sum of the previous suspend count and the freeze count.
+
+--*/
+
+{
+
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current suspend count.
+ //
+
+ OldCount = Thread->SuspendCount + Thread->FreezeCount;
+
+ //
+ // If the thread is currently suspended, then force resumption of
+ // thread execution.
+ //
+
+ if (OldCount != 0) {
+ Thread->FreezeCount = 0;
+ Thread->SuspendCount = 0;
+ Thread->SuspendSemaphore.Header.SignalState += 1;
+ KiWaitTest(&Thread->SuspendSemaphore, RESUME_INCREMENT);
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous suspend count.
+ //
+
+ return OldCount;
+}
+
+VOID
+KeFreezeAllThreads (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function suspends the execution of all thread in the current
+ process except the current thread. If the freeze count overflows
+ the maximum suspend count, then a condition is raised.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKTHREAD CurrentThread;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Get the address of the current thread object, the current process
+ // object, raise IRQL to dispatch level, lock dispatcher database,
+ // and freeze the execution of all threads in the process except the
+ // current thread.
+ //
+
+ CurrentThread = KeGetCurrentThread();
+ Process = CurrentThread->ApcState.Process;
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If the freeze count of the current thread is not zero, then there
+ // is another thread that is trying to freeze this thread. Unlock the
+ // dispatcher, lower IRQL to its previous value, allow the suspend
+ // APC to occur, then raise IRQL to dispatch level, lock the dispatcher
+ // database, and try again.
+ //
+
+ while (CurrentThread->FreezeCount != 0) {
+ KiUnlockDispatcherDatabase(OldIrql);
+ KiLockDispatcherDatabase(&OldIrql);
+ }
+
+ KeEnterCriticalRegion();
+
+ //
+ // Freeze all threads except the current thread.
+ //
+
+ ListHead = &Process->ThreadListHead;
+ NextEntry = ListHead->Flink;
+ do {
+
+ //
+ // Get the address of the next thread and suspend it if it is
+ // not the current thread.
+ //
+
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, ThreadListEntry);
+ if (Thread != CurrentThread) {
+
+ //
+ // Increment the freeze count. If the thread was not previously
+ // suspended, then queue the thread's suspend APC.
+ //
+
+ OldCount = Thread->FreezeCount;
+
+ ASSERT(OldCount != MAXIMUM_SUSPEND_COUNT);
+
+ Thread->FreezeCount += 1;
+ if ((OldCount == 0) && (Thread->SuspendCount == 0)) {
+ if (KiInsertQueueApc(&Thread->SuspendApc, RESUME_INCREMENT) == FALSE) {
+ Thread->SuspendSemaphore.Header.SignalState -= 1;
+ }
+ }
+ }
+
+ NextEntry = NextEntry->Flink;
+ } while (NextEntry != ListHead);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+BOOLEAN
+KeQueryAutoAlignmentThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the data alignment handling mode for the specified
+ thread.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ A value of TRUE is returned if data alignment exceptions are being
+ automatically handled by the kernel. Otherwise, a value of FALSE
+ is returned.
+
+--*/
+
+{
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Return the data alignment handling mode for the thread.
+ //
+
+ return Thread->AutoAlignment;
+}
+
+LONG
+KeQueryBasePriorityThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function returns the base priority increment of the specified
+ thread.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The base priority increment of the specified thread.
+
+--*/
+
+{
+
+ LONG Increment;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If priority saturation occured the last time the thread base priority
+ // was set, then return the saturation increment value. Ohterwise, compute
+ // the increment value as the difference between the thread base priority
+ // and the process base priority.
+ //
+
+ Process = Thread->ApcStatePointer[0]->Process;
+ Increment = Thread->BasePriority - Process->BasePriority;
+ if (Thread->Saturation != FALSE) {
+ if (Increment > 0) {
+ Increment = ((HIGH_PRIORITY + 1) / 2);
+
+ } else {
+ Increment = -((HIGH_PRIORITY + 1) / 2);
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous thread base priority increment.
+ //
+
+ return Increment;
+}
+
+BOOLEAN
+KeReadStateThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a thread object.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The current signal state of the thread object.
+
+--*/
+
+{
+
+ ASSERT_THREAD(Thread);
+
+ //
+ // Return current signal state of thread object.
+ //
+
+ return (BOOLEAN)Thread->Header.SignalState;
+}
+
+VOID
+KeReadyThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function readies a thread for execution. If the thread's process
+ is currently not in the balance set, then the thread is inserted in the
+ thread's process' ready queue. Else if the thread is higher priority than
+ another thread that is currently running on a processor then the thread
+ is selected for execution on that processor. Else the thread is inserted
+ in the dispatcher ready queue selected by its priority.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Ready the specified thread for execution.
+ //
+
+ KiReadyThread(Thread);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+ULONG
+KeResumeThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function resumes the execution of a suspended thread. If the
+ specified thread is not suspended, then no operation is performed.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous suspend count.
+
+--*/
+
+{
+
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current suspend count.
+ //
+
+ OldCount = Thread->SuspendCount;
+
+ //
+ // If the thread is currently suspended, then decrement its suspend count.
+ //
+
+ if (OldCount != 0) {
+ Thread->SuspendCount -= 1;
+
+ //
+ // If the resultant suspend count is zero and the freeze count is
+ // zero, then resume the thread by releasing its suspend semaphore.
+ //
+
+ if ((Thread->SuspendCount == 0) && (Thread->FreezeCount == 0)) {
+ Thread->SuspendSemaphore.Header.SignalState += 1;
+ KiWaitTest(&Thread->SuspendSemaphore, RESUME_INCREMENT);
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous suspend count.
+ //
+
+ return OldCount;
+}
+
+VOID
+KeRevertToUserAffinityThread (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function setss the affinity of the current thread to its user
+ affinity.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKTHREAD CurrentThread;
+ PRKTHREAD NextThread;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ ASSERT(KeGetCurrentThread()->SystemAffinityActive != FALSE);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ CurrentThread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Set the current affinity to the user affinity.
+ //
+ // If the current processor is not in the new affinity set and another
+ // thread has not already been selected for execution on the current
+ // processor, then select a new thread for the current processor.
+ //
+
+ CurrentThread->Affinity = CurrentThread->UserAffinity;
+ CurrentThread->SystemAffinityActive = FALSE;
+ Prcb = KeGetCurrentPrcb();
+ if (((Prcb->SetMember & CurrentThread->Affinity) == 0) &&
+ (Prcb->NextThread == NULL)) {
+ NextThread = KiSelectNextThread(CurrentThread);
+ NextThread->State = Standby;
+ Prcb->NextThread = NextThread;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+VOID
+KeRundownThread (
+ )
+
+/*++
+
+Routine Description:
+
+ This function is called by the executive to rundown thread structures
+ which must be guarded by the dispatcher database lock and which must
+ be processed before actually terminating the thread. An example of such
+ a structure is the mutant ownership list that is anchored in the kernel
+ thread object.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKMUTANT Mutant;
+ PLIST_ENTRY NextEntry;
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Rundown possible associated channel object or receive buffer.
+ //
+
+#if 0
+
+ KiRundownChannel();
+
+#endif
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Scan the list of owned mutant objects and release the mutant objects
+ // with an abandoned status. If the mutant is a kernel mutex, then bug
+ // check.
+ //
+
+ NextEntry = Thread->MutantListHead.Flink;
+ while (NextEntry != &Thread->MutantListHead) {
+ Mutant = CONTAINING_RECORD(NextEntry, KMUTANT, MutantListEntry);
+ if (Mutant->ApcDisable != 0) {
+ KeBugCheckEx(THREAD_TERMINATE_HELD_MUTEX,
+ (ULONG)Thread,
+ (ULONG)Mutant, 0, 0);
+ }
+
+ RemoveEntryList(&Mutant->MutantListEntry);
+ Mutant->Header.SignalState = 1;
+ Mutant->Abandoned = TRUE;
+ Mutant->OwnerThread = (PKTHREAD)NULL;
+ if (IsListEmpty(&Mutant->Header.WaitListHead) != TRUE) {
+ KiWaitTest(Mutant, MUTANT_INCREMENT);
+ }
+
+ NextEntry = Thread->MutantListHead.Flink;
+ }
+
+ //
+ // Rundown any architectural specific structures
+ //
+
+ KiRundownThread(Thread);
+
+ //
+ // Release dispatcher database lock and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+KAFFINITY
+KeSetAffinityThread (
+ IN PKTHREAD Thread,
+ IN KAFFINITY Affinity
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the affinity of a specified thread to a new
+ value. If the new affinity is not a proper subset of the parent
+ process affinity, or is null, then an error condition is raised.
+ If the specified thread is running on, or about to run on, a
+ processor for which it is no longer able to run, then the target
+ processor is rescheduled. If the specified thread is in a ready
+ state and is not in the parent process ready queue, then it is
+ rereadied to reevaluate any additional processors it may run on.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Affinity - Supplies the new of set of processors on which the thread
+ can run.
+
+Return Value:
+
+ The previous affinity of the specified thread.
+
+--*/
+
+{
+
+ KAFFINITY OldAffinity;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+ PKPROCESS Process;
+ ULONG Processor;
+ KPRIORITY ThreadPriority;
+ PRKTHREAD Thread1;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current affinity of the specified thread and get address
+ // of parent process object;
+ //
+
+ OldAffinity = Thread->UserAffinity;
+ Process = Thread->ApcState.Process;
+
+ //
+ // If new affinity is not a proper subset of the parent process affinity,
+ // or the new affinity is null, then bugcheck.
+ //
+
+ if (((Affinity & Process->Affinity) != (Affinity)) || (!Affinity)) {
+ KeBugCheck(INVALID_AFFINITY_SET);
+ }
+
+ //
+ // Set the thread user affinity to the specified value.
+ //
+ // If the thread is not current executing with system affinity active,
+ // then set the thread current affinity and switch on the thread state.
+ //
+
+ Thread->UserAffinity = Affinity;
+ if (Thread->SystemAffinityActive == FALSE) {
+ Thread->Affinity = Affinity;
+ switch (Thread->State) {
+
+ //
+ // Ready State.
+ //
+ // If the thread is not in the process ready queue, then remove
+ // it from its current dispatcher ready queue and reready it for
+ // execution.
+ //
+
+ case Ready:
+ if (Thread->ProcessReadyQueue == FALSE) {
+ RemoveEntryList(&Thread->WaitListEntry);
+ ThreadPriority = Thread->Priority;
+ if (IsListEmpty(&KiDispatcherReadyListHead[ThreadPriority]) != FALSE) {
+ ClearMember(ThreadPriority, KiReadySummary);
+ }
+
+ KiReadyThread(Thread);
+ }
+
+ break;
+
+ //
+ // Standby State.
+ //
+ // If the target processor is not in the new affinity set, then
+ // set the next thread to null for the target processor, select
+ // a new thread to run on the target processor, and reready the
+ // thread for execution.
+ //
+
+ case Standby:
+ Processor = Thread->NextProcessor;
+ Prcb = KiProcessorBlock[Processor];
+ if ((Prcb->SetMember & Affinity) == 0) {
+ Prcb->NextThread = NULL;
+ Thread1 = KiSelectNextThread(Thread);
+ Thread1->State = Standby;
+ Prcb->NextThread = Thread1;
+ KiReadyThread(Thread);
+ }
+
+ break;
+
+ //
+ // Running State.
+ //
+ // If the target processor is not in the new affinity set and
+ // another thread has not already been selected for execution
+ // on the target processor, then select a new thread for the
+ // target processor, and cause the target processor to be
+ // redispatched.
+ //
+
+ case Running:
+ Processor = Thread->NextProcessor;
+ Prcb = KiProcessorBlock[Processor];
+ if (((Prcb->SetMember & Affinity) == 0) &&
+ (Prcb->NextThread == NULL)) {
+ Thread1 = KiSelectNextThread(Thread);
+ Thread1->State = Standby;
+ Prcb->NextThread = Thread1;
+ KiRequestDispatchInterrupt(Processor);
+ }
+
+ break;
+
+ //
+ // Initialized, Terminated, Waiting, Transition case - For these
+ // states it is sufficient to just set the new thread affinity.
+ //
+
+ default:
+ break;
+ }
+ }
+
+ //
+ // Unlock dispatcher database, lower IRQL to its previous value, and
+ // return the previous user affinity.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return OldAffinity;
+}
+
+VOID
+KeSetSystemAffinityThread (
+ IN KAFFINITY Affinity
+ )
+
+/*++
+
+Routine Description:
+
+ This function set the system affinity of the current thread.
+
+Arguments:
+
+ Affinity - Supplies the new of set of processors on which the thread
+ can run.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKTHREAD CurrentThread;
+ PRKTHREAD NextThread;
+ KIRQL OldIrql;
+ PKPRCB Prcb;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ ASSERT((Affinity & KeActiveProcessors) != 0);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ CurrentThread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Set the current affinity to the specified affinity.
+ //
+ // If the current processor is not in the new affinity set and another
+ // thread has not already been selected for execution on the current
+ // processor, then select a new thread for the current processor.
+ //
+
+ CurrentThread->Affinity = Affinity;
+ CurrentThread->SystemAffinityActive = TRUE;
+ Prcb = KeGetCurrentPrcb();
+ if (((Prcb->SetMember & CurrentThread->Affinity) == 0) &&
+ (Prcb->NextThread == NULL)) {
+ NextThread = KiSelectNextThread(CurrentThread);
+ NextThread->State = Standby;
+ Prcb->NextThread = NextThread;
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+LONG
+KeSetBasePriorityThread (
+ IN PKTHREAD Thread,
+ IN LONG Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the base priority of the specified thread to a
+ new value. The new base priority for the thread is the process base
+ priority plus the increment.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Increment - Supplies the base priority increment of the subject thread.
+
+ N.B. If the absolute value of the increment is such that saturation
+ of the base priority is forced, then subsequent changes to the
+ parent process base priority will not change the base priority
+ of the thread.
+
+Return Value:
+
+ The previous base priority increment of the specified thread.
+
+--*/
+
+{
+
+ KPRIORITY NewBase;
+ KPRIORITY NewPriority;
+ KPRIORITY OldBase;
+ LONG OldIncrement;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the base priority of the specified thread and determine
+ // whether saturation if being forced.
+ //
+
+ Process = Thread->ApcStatePointer[0]->Process;
+ OldBase = Thread->BasePriority;
+ OldIncrement = OldBase - Process->BasePriority;
+ Thread->Saturation = FALSE;
+ if (abs(Increment) >= (HIGH_PRIORITY + 1) / 2) {
+ Thread->Saturation = TRUE;
+ }
+
+ //
+ // Set the base priority of the specified thread. If the thread's process
+ // is in the realtime class, then limit the change to the realtime class.
+ // Otherwise, limit the change to the variable class.
+ //
+
+ NewBase = Process->BasePriority + Increment;
+ if (Process->BasePriority >= LOW_REALTIME_PRIORITY) {
+ if (NewBase < LOW_REALTIME_PRIORITY) {
+ NewBase = LOW_REALTIME_PRIORITY;
+
+ } else if (NewBase > HIGH_PRIORITY) {
+ NewBase = HIGH_PRIORITY;
+ }
+
+ //
+ // Set the new priority of the thread to the new base priority.
+ //
+
+ NewPriority = NewBase;
+
+ } else {
+ if (NewBase >= LOW_REALTIME_PRIORITY) {
+ NewBase = LOW_REALTIME_PRIORITY - 1;
+
+ } else if (NewBase <= LOW_PRIORITY) {
+ NewBase = 1;
+ }
+
+ //
+ // Compute the new thread priority. If the new priority is outside
+ // the variable class, then set the new priority to the highest
+ // variable priority.
+ //
+
+ NewPriority = Thread->Priority +
+ (NewBase - OldBase) - Thread->PriorityDecrement;
+
+ if (NewPriority >= LOW_REALTIME_PRIORITY) {
+ NewPriority = LOW_REALTIME_PRIORITY - 1;
+ }
+ }
+
+ Thread->BasePriority = (SCHAR)NewBase;
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->DecrementCount = 0;
+ Thread->PriorityDecrement = 0;
+ KiSetPriorityThread(Thread, NewPriority);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous thread base priority.
+ //
+
+ return OldIncrement;
+}
+
+LOGICAL
+KeSetDisableBoostThread (
+ IN PKTHREAD Thread,
+ IN LOGICAL Disable
+ )
+
+/*++
+
+Routine Description:
+
+ This function disables priority boosts for the specified thread.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Disable - Supplies a logical value that determines whether priority
+ boosts for the thread are disabled or enabled.
+
+Return Value:
+
+ The previous value of the disable boost state variable.
+
+--*/
+
+{
+
+ LOGICAL DisableBoost;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current state of the disable boost variable and set its
+ // state to TRUE.
+ //
+
+ DisableBoost = Thread->DisableBoost;
+ Thread->DisableBoost = (BOOLEAN)Disable;
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous disable boost state.
+ //
+
+ return DisableBoost;
+}
+
+CCHAR
+KeSetIdealProcessorThread (
+ IN PKTHREAD Thread,
+ IN CCHAR Processor
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the ideal processor for the specified thread execution.
+
+Arguments:
+
+ Thread - Supplies a pointer to the thread whose ideal processor number is
+ set to the specfied value.
+
+ Processor - Supplies the number of the ideal processor (the distinguished
+ value MAXIMUM_PROCESSORS indicates that there is no ideal processor).
+
+Return Value:
+
+ The previous ideal processor number.
+
+--*/
+
+{
+
+ CCHAR OldProcessor;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+
+ //
+ // Capture the previous ideal processor value, set the new ideal
+ // processor value, and return the old ideal processor value for the
+ // current thread;
+ //
+ // Note that this is done under the dispatcher lock in order to
+ // synchronize the updates with the other fields that share the
+ // same DWORD. Otherwise there is a granularity problem on Alpha.
+ //
+
+ ASSERT(Processor <= MAXIMUM_PROCESSORS);
+
+ KiLockDispatcherDatabase(&OldIrql);
+ OldProcessor = Thread->IdealProcessor;
+ if (Processor < MAXIMUM_PROCESSORS) {
+ Thread->IdealProcessor = Processor;
+
+ } else {
+ Process = Thread->ApcState.Process;
+ Process->ThreadSeed += 1;
+ Thread->IdealProcessor = (UCHAR)(Process->ThreadSeed % KeNumberProcessors);
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return OldProcessor;
+}
+
+BOOLEAN
+KeSetKernelStackSwapEnable (
+ IN BOOLEAN Enable
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the kernel stack swap enable value for the current
+ thread and returns the old swap enable value.
+
+Arguments:
+
+ Enable - Supplies the new kernel stack swap enable value.
+
+Return Value:
+
+ The previous kernel stack swap enable value.
+
+--*/
+
+{
+
+ BOOLEAN OldState;
+ PKTHREAD Thread;
+
+ //
+ // Capture the previous kernel stack swap enable value, set the new
+ // swap enable value, and return the old swap enable value for the
+ // current thread;
+ //
+
+ Thread = KeGetCurrentThread();
+ OldState = Thread->EnableStackSwap;
+ Thread->EnableStackSwap = Enable;
+ return OldState;
+}
+
+KPRIORITY
+KeSetPriorityThread (
+ IN PKTHREAD Thread,
+ IN KPRIORITY Priority
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the priority of the specified thread to a new value.
+ If the new thread priority is lower than the old thread priority, then
+ resecheduling may take place if the thread is currently running on, or
+ about to run on, a processor.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Priority - Supplies the new priority of the subject thread.
+
+Return Value:
+
+ The previous priority of the specified thread.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ KPRIORITY OldPriority;
+ PKPROCESS Process;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+ ASSERT(((Priority != 0) || (Thread->BasePriority == 0)) &&
+ (Priority <= HIGH_PRIORITY));
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current thread priority, set the thread priority to the
+ // the new value, and replenish the thread quantum. It is assumed that
+ // the priority would not be set unless the thread had already lost it
+ // initial quantum.
+ //
+
+ OldPriority = Thread->Priority;
+ Process = Thread->ApcStatePointer[0]->Process;
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->DecrementCount = 0;
+ Thread->PriorityDecrement = 0;
+ KiSetPriorityThread(Thread, Priority);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous thread priority.
+ //
+
+ return OldPriority;
+}
+
+ULONG
+KeSuspendThread (
+ IN PKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function suspends the execution of a thread. If the suspend count
+ overflows the maximum suspend count, then a condition is raised.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The previous suspend count.
+
+--*/
+
+{
+
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT_THREAD(Thread);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the current suspend count.
+ //
+
+ OldCount = Thread->SuspendCount;
+
+ //
+ // If the suspend count is at its maximum value, then unlock dispatcher
+ // database, lower IRQL to its previous value, and raise an error
+ // condition.
+ //
+
+ if (OldCount == MAXIMUM_SUSPEND_COUNT) {
+
+ //
+ // Unlock the dispatcher database and raise an exception.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ ExRaiseStatus(STATUS_SUSPEND_COUNT_EXCEEDED);
+ }
+
+ //
+ // Increment the suspend count. If the thread was not previously suspended,
+ // then queue the thread's suspend APC.
+ //
+
+ Thread->SuspendCount += 1;
+ if ((OldCount == 0) && (Thread->FreezeCount == 0)) {
+ if (KiInsertQueueApc(&Thread->SuspendApc, RESUME_INCREMENT) == FALSE) {
+ Thread->SuspendSemaphore.Header.SignalState -= 1;
+ }
+ }
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return the previous suspend count.
+ //
+
+ return OldCount;
+}
+
+VOID
+KeTerminateThread (
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function terminates the execution of the current thread, sets the
+ signal state of the thread to Signaled, and attempts to satisfy as many
+ Waits as possible. The scheduling state of the thread is set to terminated,
+ and a new thread is selected to run on the current processor. There is no
+ return from this function.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKTHREAD NextThread;
+ KIRQL OldIrql;
+ PKPROCESS Process;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Insert the thread in the reaper list.
+ //
+ // If a reaper thread is not currently active, then insert a work item in
+ // the hyper critical work queue.
+ //
+ // N.B. This code has knowledge of the reaper data structures and how
+ // worker threads are implemented.
+ //
+ // N.B. The dispatcher database lock is used to synchronize access to the
+ // reaper list.
+ //
+
+ InsertTailList(&PsReaperListHead, &((PETHREAD)Thread)->TerminationPortList);
+ if (PsReaperActive == FALSE) {
+ PsReaperActive = TRUE;
+ KiInsertQueue(&ExWorkerQueue[HyperCriticalWorkQueue],
+ &PsReaperWorkItem.List,
+ FALSE);
+ }
+
+ //
+ // If the current thread is processing a queue entry, then remove
+ // the thrread from the queue object thread list and attempt to
+ // activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ RemoveEntryList(&Thread->QueueListEntry);
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the state of the current thread object to Signaled, and attempt
+ // to satisfy as many Waits as possible.
+ //
+
+ Thread->Header.SignalState = TRUE;
+ if (IsListEmpty(&Thread->Header.WaitListHead) != TRUE) {
+ KiWaitTest((PVOID)Thread, Increment);
+ }
+
+ //
+ // Remove thread from its parent process' thread list.
+ //
+
+ RemoveEntryList(&Thread->ThreadListEntry);
+
+ //
+ // Set thread scheduling state to terminated, decrement the process'
+ // stack count, select a new thread to run on the current processor,
+ // and swap context to the new thread.
+ //
+
+ Thread->State = Terminated;
+ Process = Thread->ApcState.Process;
+ Process->StackCount -= 1;
+ if (Process->StackCount == 0) {
+ if (Process->ThreadListHead.Flink != &Process->ThreadListHead) {
+ Process->State = ProcessInTransition;
+ InsertTailList(&KiProcessOutSwapListHead, &Process->SwapListEntry);
+ KiSwapEvent.Header.SignalState = 1;
+ if (IsListEmpty(&KiSwapEvent.Header.WaitListHead) == FALSE) {
+ KiWaitTest(&KiSwapEvent, BALANCE_INCREMENT);
+ }
+ }
+ }
+
+ KiSwapThread();
+
+ return;
+}
+
+BOOLEAN
+KeTestAlertThread (
+ IN KPROCESSOR_MODE AlertMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function tests to determine if the alerted variable for the
+ specified processor mode has a value of TRUE or whether a user mode
+ APC should be delivered to the current thread.
+
+Arguments:
+
+ AlertMode - Supplies the processor mode which is to be tested
+ for an alerted condition.
+
+Return Value:
+
+ The previous state of the alerted variable for the specified processor
+ mode.
+
+--*/
+
+{
+
+ BOOLEAN Alerted;
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock APC queue.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockApcQueue(Thread, &OldIrql);
+
+ //
+ // If the current thread is alerted for the specified processor mode,
+ // then clear the alerted state. Else if the specified processor mode
+ // is user and the current thread's user mode APC queue contains an entry,
+ // then set user APC pending.
+ //
+
+ Alerted = Thread->Alerted[AlertMode];
+ if (Alerted == TRUE) {
+ Thread->Alerted[AlertMode] = FALSE;
+
+ } else if ((AlertMode == UserMode) &&
+ (IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]) != TRUE)) {
+ Thread->ApcState.UserApcPending = TRUE;
+ }
+
+ //
+ // Unlock APC queue, lower IRQL to its previous value, and return the
+ // previous alerted state for the specified mode.
+ //
+
+ KiUnlockApcQueue(Thread, OldIrql);
+ return Alerted;
+}
+
+VOID
+KeThawAllThreads (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function resumes the execution of all suspended froozen threads
+ in the current process.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ PKPROCESS Process;
+ PKTHREAD Thread;
+ ULONG OldCount;
+ KIRQL OldIrql;
+
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Get the address of the current current process object, raise IRQL
+ // to dispatch level, lock dispatcher database, and thaw the execution
+ // of all threads in the current process that have been frozzen.
+ //
+
+ Process = KeGetCurrentThread()->ApcState.Process;
+ KiLockDispatcherDatabase(&OldIrql);
+ ListHead = &Process->ThreadListHead;
+ NextEntry = ListHead->Flink;
+ do {
+
+ //
+ // Get the address of the next thread and thaw its execution if
+ // if was previously froozen.
+ //
+
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, ThreadListEntry);
+ OldCount = Thread->FreezeCount;
+ if (OldCount != 0) {
+ Thread->FreezeCount -= 1;
+
+ //
+ // If the resultant suspend count is zero and the freeze count is
+ // zero, then resume the thread by releasing its suspend semaphore.
+ //
+
+ if ((Thread->SuspendCount == 0) && (Thread->FreezeCount == 0)) {
+ Thread->SuspendSemaphore.Header.SignalState += 1;
+ KiWaitTest(&Thread->SuspendSemaphore, RESUME_INCREMENT);
+ }
+ }
+
+ NextEntry = NextEntry->Flink;
+ } while (NextEntry != ListHead);
+
+ //
+ // Unlock dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ KeLeaveCriticalRegion();
+ return;
+}
diff --git a/private/ntos/ke/thredsup.c b/private/ntos/ke/thredsup.c
new file mode 100644
index 000000000..3d4f4049b
--- /dev/null
+++ b/private/ntos/ke/thredsup.c
@@ -0,0 +1,884 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ thredsup.c
+
+Abstract:
+
+ This module contains the support routines for the thread object. It
+ contains functions to boost the priority of a thread, find a ready
+ thread, select the next thread, ready a thread, set priority of a
+ thread, and to suspend a thread.
+
+Author:
+
+ David N. Cutler (davec) 5-Mar-1989
+
+Environment:
+
+ All of the functions in this module execute in kernel mode except
+ the function that raises a user mode alert condition.
+
+Revision History:
+
+
+--*/
+
+#include "ki.h"
+
+//
+// Define context switch data collection macro.
+//
+
+//#define _COLLECT_SWITCH_DATA_ 1
+
+#if defined(_COLLECT_SWITCH_DATA_)
+
+#define KiIncrementSwitchCounter(Member) KeThreadSwitchCounters.Member += 1
+
+#else
+
+#define KiIncrementSwitchCounter(Member)
+
+#endif
+
+VOID
+KiSuspendNop (
+ IN PKAPC Apc,
+ IN OUT PKNORMAL_ROUTINE *NormalRoutine,
+ IN OUT PVOID *NormalContext,
+ IN OUT PVOID *SystemArgument1,
+ IN OUT PVOID *SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is the kernel routine for the builtin suspend APC for a
+ thread. It is executed in kernel mode as the result of queuing the
+ builtin suspend APC and performs no operation. It is called just prior
+ to calling the normal routine and simply returns.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+ NormalRoutine - not used
+
+ NormalContext - not used
+
+ SystemArgument1 - not used
+
+ SystemArgument2 - not used
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // No operation is performed by this routine.
+ //
+
+ return;
+}
+
+PKTHREAD
+FASTCALL
+KiFindReadyThread (
+ IN ULONG Processor,
+ IN KPRIORITY LowPriority
+ )
+
+/*++
+
+Routine Description:
+
+ This function searches the dispatcher ready queues from the specified
+ high priority to the specified low priority in an attempt to find a thread
+ that can execute on the specified processor.
+
+Arguments:
+
+ Processor - Supplies the number of the processor to find a thread for.
+
+ LowPriority - Supplies the lowest priority dispatcher ready queue to
+ examine.
+
+Return Value:
+
+ If a thread is located that can execute on the specified processor, then
+ the address of the thread object is returned. Otherwise a null pointer is
+ returned.
+
+--*/
+
+{
+
+ ULONG HighPriority;
+ PRLIST_ENTRY ListHead;
+ PRLIST_ENTRY NextEntry;
+ ULONG PrioritySet;
+ KAFFINITY ProcessorSet;
+ PRKTHREAD Thread;
+ PRKTHREAD Thread1;
+ ULONG TickLow;
+ ULONG WaitTime;
+
+ //
+ // Compute the set of priority levels that should be scanned in an attempt
+ // to find a thread that can run on the specified processor.
+ //
+
+ PrioritySet = (~((1 << LowPriority) - 1)) & KiReadySummary;
+
+#if !defined(NT_UP)
+
+ ProcessorSet = (KAFFINITY)(1 << Processor);
+
+#endif
+
+ FindFirstSetLeftMember(PrioritySet, &HighPriority);
+ ListHead = &KiDispatcherReadyListHead[HighPriority];
+ PrioritySet <<= (31 - HighPriority);
+ while (PrioritySet != 0) {
+
+ //
+ // If the next bit in the priority set is a one, then examine the
+ // corresponding dispatcher ready queue.
+ //
+
+ if ((LONG)PrioritySet < 0) {
+ NextEntry = ListHead->Flink;
+
+ ASSERT(NextEntry != ListHead);
+
+#if defined(NT_UP)
+
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+ RemoveEntryList(&Thread->WaitListEntry);
+ if (IsListEmpty(ListHead)) {
+ ClearMember(HighPriority, KiReadySummary);
+ }
+
+ return (PKTHREAD)Thread;
+
+#else
+
+ //
+ // Scan the specified dispatcher ready queue for a suitable
+ // thread to execute.
+ //
+
+ while (NextEntry != ListHead) {
+ Thread = CONTAINING_RECORD(NextEntry, KTHREAD, WaitListEntry);
+ NextEntry = NextEntry->Flink;
+ if (Thread->Affinity & ProcessorSet) {
+
+ //
+ // If the found thread ran on the specified processor
+ // last, the processor is the ideal processor for the
+ // thread, the thread has been waiting for longer than
+ // a quantum, or its priority is greater than low realtime
+ // plus 8, then the selected thread is returned. Otherwise,
+ // an attempt is made to find a more appropriate thread.
+ //
+
+ TickLow = KiQueryLowTickCount();
+ WaitTime = TickLow - Thread->WaitTime;
+ if ((KiThreadSelectNotifyRoutine ?
+ (KiThreadSelectNotifyRoutine(((PETHREAD)Thread)->Cid.UniqueThread) == FALSE) :
+ (((ULONG)Thread->NextProcessor != Processor) &&
+ ((ULONG)Thread->IdealProcessor != Processor))) &&
+ (WaitTime < (READY_SKIP_QUANTUM + 1)) &&
+ (HighPriority < (LOW_REALTIME_PRIORITY + 9))) {
+
+ //
+ // Search forward in the ready queue until the end
+ // of the list is reached or a more appropriate
+ // thread is found.
+ //
+
+ while (NextEntry != ListHead) {
+ Thread1 = CONTAINING_RECORD(NextEntry,
+ KTHREAD,
+ WaitListEntry);
+
+ NextEntry = NextEntry->Flink;
+ if ((Thread1->Affinity & ProcessorSet) &&
+ (KiThreadSelectNotifyRoutine ?
+ (KiThreadSelectNotifyRoutine(((PETHREAD)Thread)->Cid.UniqueThread) != FALSE) :
+ (((ULONG)Thread1->NextProcessor == Processor) ||
+ ((ULONG)Thread1->IdealProcessor == Processor)))) {
+ Thread = Thread1;
+ break;
+ }
+
+ WaitTime = TickLow - Thread1->WaitTime;
+ if (WaitTime >= (READY_SKIP_QUANTUM + 1)) {
+ break;
+ }
+ }
+ }
+
+ if (Processor == (ULONG)Thread->IdealProcessor) {
+ KiIncrementSwitchCounter(FindIdeal);
+
+ } else if (Processor == (ULONG)Thread->NextProcessor) {
+ KiIncrementSwitchCounter(FindLast);
+
+ } else {
+ KiIncrementSwitchCounter(FindAny);
+ }
+
+ Thread->NextProcessor = (CCHAR)Processor;
+
+ RemoveEntryList(&Thread->WaitListEntry);
+ if (IsListEmpty(ListHead)) {
+ ClearMember(HighPriority, KiReadySummary);
+ }
+
+ return (PKTHREAD)Thread;
+ }
+ }
+
+#endif
+
+ }
+
+ HighPriority -= 1;
+ ListHead -= 1;
+ PrioritySet <<= 1;
+ };
+
+ //
+ // No thread could be found, return a null pointer.
+ //
+
+ return (PKTHREAD)NULL;
+}
+
+VOID
+FASTCALL
+KiReadyThread (
+ IN PRKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function readies a thread for execution and attempts to immediately
+ dispatch the thread for execution by preempting another lower priority
+ thread. If a thread can be preempted, then the specified thread enters
+ the standby state and the target processor is requested to dispatch. If
+ another thread cannot be preempted, then the specified thread is inserted
+ either at the head or tail of the dispatcher ready selected by its priority
+ acccording to whether it was preempted or not.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKPRCB Prcb;
+ BOOLEAN Preempted;
+ KPRIORITY Priority;
+ PRKPROCESS Process;
+ ULONG Processor;
+ KPRIORITY ThreadPriority;
+ PRKTHREAD Thread1;
+ KAFFINITY IdleSet;
+
+ //
+ // Save value of thread's preempted flag, set thread preempted FALSE,
+ // capture the thread priority, and set clear the read wait time.
+ //
+
+ Preempted = Thread->Preempted;
+ Thread->Preempted = FALSE;
+ ThreadPriority = Thread->Priority;
+ Thread->WaitTime = KiQueryLowTickCount();
+
+ //
+ // If the thread's process is not in memory, then insert the thread in
+ // the process ready queue and inswap the process.
+ //
+
+ Process = Thread->ApcState.Process;
+ if (Process->State != ProcessInMemory) {
+ Thread->State = Ready;
+ Thread->ProcessReadyQueue = TRUE;
+ InsertTailList(&Process->ReadyListHead, &Thread->WaitListEntry);
+ if (Process->State == ProcessOutOfMemory) {
+ Process->State = ProcessInTransition;
+ InsertTailList(&KiProcessInSwapListHead, &Process->SwapListEntry);
+ KiSwapEvent.Header.SignalState = 1;
+ if (IsListEmpty(&KiSwapEvent.Header.WaitListHead) == FALSE) {
+ KiWaitTest(&KiSwapEvent, BALANCE_INCREMENT);
+ }
+ }
+
+ return;
+
+ } else if (Thread->KernelStackResident == FALSE) {
+
+ //
+ // The thread's kernel stack is not resident. Increment the process
+ // stack count, set the state of the thread to transition, insert
+ // the thread in the kernel stack inswap list, and set the kernel
+ // stack inswap event.
+ //
+
+ Process->StackCount += 1;
+ Thread->State = Transition;
+ InsertTailList(&KiStackInSwapListHead, &Thread->WaitListEntry);
+ KiSwapEvent.Header.SignalState = 1;
+ if (IsListEmpty(&KiSwapEvent.Header.WaitListHead) == FALSE) {
+ KiWaitTest(&KiSwapEvent, BALANCE_INCREMENT);
+ }
+
+ return;
+
+ } else {
+
+ //
+ // If there is an idle processor, then schedule the thread on an
+ // idle processor giving preference to the processor the thread
+ // last ran on. Otherwise, try to preempt either a thread in the
+ // standby or running state.
+ //
+
+#if defined(NT_UP)
+
+ Prcb = KiProcessorBlock[0];
+ if (KiIdleSummary != 0) {
+ KiIdleSummary = 0;
+ KiIncrementSwitchCounter(IdleLast);
+
+#else
+
+ IdleSet = KiIdleSummary & Thread->Affinity;
+ if (IdleSet != 0) {
+ Processor = Thread->IdealProcessor;
+ if ((IdleSet & (1 << Processor)) == 0) {
+ Processor = Thread->NextProcessor;
+ if ((IdleSet & (1 << Processor)) == 0) {
+ Prcb = KeGetCurrentPrcb();
+ if ((IdleSet & Prcb->SetMember) == 0) {
+ FindFirstSetLeftMember(IdleSet, &Processor);
+ KiIncrementSwitchCounter(IdleAny);
+
+ } else {
+ Processor = Prcb->Number;
+ KiIncrementSwitchCounter(IdleCurrent);
+ }
+
+ } else {
+ KiIncrementSwitchCounter(IdleLast);
+ }
+
+ } else {
+ KiIncrementSwitchCounter(IdleIdeal);
+ }
+
+ Thread->NextProcessor = (CCHAR)Processor;
+ ClearMember(Processor, KiIdleSummary);
+ Prcb = KiProcessorBlock[Processor];
+
+#endif
+
+ Prcb->NextThread = Thread;
+ Thread->State = Standby;
+ return;
+
+ } else {
+
+#if !defined(NT_UP)
+
+ Processor = Thread->IdealProcessor;
+ if ((Thread->Affinity & (1 << Processor)) == 0) {
+ Processor = Thread->NextProcessor;
+ if ((Thread->Affinity & (1 << Processor)) == 0) {
+ FindFirstSetLeftMember(Thread->Affinity, &Processor);
+ }
+ }
+
+ Thread->NextProcessor = (CCHAR)Processor;
+ Prcb = KiProcessorBlock[Processor];
+
+#endif
+
+ if (Prcb->NextThread != NULL) {
+ Thread1 = Prcb->NextThread;
+ if (ThreadPriority > Thread1->Priority) {
+ Thread1->Preempted = TRUE;
+ Prcb->NextThread = Thread;
+ Thread->State = Standby;
+ KiReadyThread(Thread1);
+ KiIncrementSwitchCounter(PreemptLast);
+ return;
+ }
+
+ } else {
+ Thread1 = Prcb->CurrentThread;
+ if (ThreadPriority > Thread1->Priority) {
+ Thread1->Preempted = TRUE;
+ Prcb->NextThread = Thread;
+ Thread->State = Standby;
+ KiRequestDispatchInterrupt(Thread->NextProcessor);
+ KiIncrementSwitchCounter(PreemptLast);
+ return;
+ }
+ }
+ }
+ }
+
+ //
+ // No thread can be preempted. Insert the thread in the dispatcher
+ // queue selected by its priority. If the thread was preempted and
+ // runs at a realtime priority level, then insert the thread at the
+ // front of the queue. Else insert the thread at the tail of the queue.
+ //
+
+ Thread->State = Ready;
+ if (Preempted != FALSE) {
+ InsertHeadList(&KiDispatcherReadyListHead[ThreadPriority],
+ &Thread->WaitListEntry);
+
+ } else {
+ InsertTailList(&KiDispatcherReadyListHead[ThreadPriority],
+ &Thread->WaitListEntry);
+ }
+
+ SetMember(ThreadPriority, KiReadySummary);
+ return;
+}
+
+PRKTHREAD
+FASTCALL
+KiSelectNextThread (
+ IN PRKTHREAD Thread
+ )
+
+/*++
+
+Routine Description:
+
+ This function selects the next thread to run on the processor that the
+ specified thread is running on. If a thread cannot be found, then the
+ idle thread is selected.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+Return Value:
+
+ The address of the selected thread object.
+
+--*/
+
+{
+
+ PRKPRCB Prcb;
+ ULONG Processor;
+ PRKTHREAD Thread1;
+
+ //
+ // Get the processor number and the address of the processor control block.
+ //
+
+#if !defined(NT_UP)
+
+ Processor = Thread->NextProcessor;
+ Prcb = KiProcessorBlock[Processor];
+
+#else
+
+ Prcb = KiProcessorBlock[0];
+
+#endif
+
+ //
+ // If a thread has already been selected to run on the specified processor,
+ // then return that thread as the selected thread.
+ //
+
+ if ((Thread1 = Prcb->NextThread) != NULL) {
+ Prcb->NextThread = (PKTHREAD)NULL;
+
+ } else {
+
+ //
+ // Attempt to find a ready thread to run.
+ //
+
+#if !defined(NT_UP)
+
+ Thread1 = KiFindReadyThread(Processor, 0);
+
+#else
+
+ Thread1 = KiFindReadyThread(0, 0);
+
+#endif
+
+ //
+ // If a thread was not found, then select the idle thread and
+ // set the processor member in the idle summary.
+ //
+
+ if (Thread1 == NULL) {
+ KiIncrementSwitchCounter(SwitchToIdle);
+ Thread1 = Prcb->IdleThread;
+
+#if !defined(NT_UP)
+
+ SetMember(Processor, KiIdleSummary);
+
+#else
+ KiIdleSummary = 1;
+
+#endif
+
+ }
+ }
+
+ //
+ // Return address of selected thread object.
+ //
+
+ return Thread1;
+}
+
+VOID
+FASTCALL
+KiSetPriorityThread (
+ IN PRKTHREAD Thread,
+ IN KPRIORITY Priority
+ )
+
+/*++
+
+Routine Description:
+
+ This function set the priority of the specified thread to the specified
+ value. If the thread is in the standby or running state, then the processor
+ may be redispatched. If the thread is in the ready state, then some other
+ thread may be preempted.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ Priority - Supplies the new thread priority value.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKPRCB Prcb;
+ ULONG Processor;
+ KPRIORITY ThreadPriority;
+ PRKTHREAD Thread1;
+
+ ASSERT(Priority <= HIGH_PRIORITY);
+
+ //
+ // Capture the current priority of the specified thread.
+ //
+
+ ThreadPriority = Thread->Priority;
+
+ //
+ // If the new priority is not equal to the old priority, then set the
+ // new priority of the thread and redispatch a processor if necessary.
+ //
+
+ if (Priority != ThreadPriority) {
+ Thread->Priority = (SCHAR)Priority;
+
+ //
+ // Case on the thread state.
+ //
+
+ switch (Thread->State) {
+
+ //
+ // Ready case - If the thread is not in the process ready queue,
+ // then remove it from its current dispatcher ready queue. If the
+ // new priority is less than the old priority, then insert the
+ // thread at the tail of the dispatcher ready queue selected by
+ // the new priority. Else reready the thread for execution.
+ //
+
+ case Ready:
+ if (Thread->ProcessReadyQueue == FALSE) {
+ RemoveEntryList(&Thread->WaitListEntry);
+ if (IsListEmpty(&KiDispatcherReadyListHead[ThreadPriority])) {
+ ClearMember(ThreadPriority, KiReadySummary);
+ }
+
+ if (Priority < ThreadPriority) {
+ InsertTailList(&KiDispatcherReadyListHead[Priority],
+ &Thread->WaitListEntry);
+ SetMember(Priority, KiReadySummary);
+
+ } else {
+ KiReadyThread(Thread);
+ }
+ }
+
+ break;
+
+ //
+ // Standby case - If the thread's priority is being lowered, then
+ // attempt to find another thread to execute. If a new thread is
+ // found, then put the new thread in the standby state, and reready
+ // the old thread.
+ //
+
+ case Standby:
+
+#if !defined(NT_UP)
+
+ Processor = Thread->NextProcessor;
+
+#endif
+
+ if (Priority < ThreadPriority) {
+
+#if !defined(NT_UP)
+
+ Thread1 = KiFindReadyThread(Processor, Priority);
+
+#else
+
+ Thread1 = KiFindReadyThread(0, Priority);
+
+#endif
+
+ if (Thread1 != NULL) {
+
+#if !defined(NT_UP)
+
+ Prcb = KiProcessorBlock[Processor];
+
+#else
+
+ Prcb = KiProcessorBlock[0];
+
+#endif
+
+ Thread1->State = Standby;
+ Prcb->NextThread = Thread1;
+ KiReadyThread(Thread);
+ }
+ }
+
+ break;
+
+ //
+ // Running case - If there is not a thread in the standby state
+ // on the thread's processor and the thread's priority is being
+ // lowered, then attempt to find another thread to execute. If
+ // a new thread is found, then put the new thread in the standby
+ // state, and request a redispatch on the thread's processor.
+ //
+
+ case Running:
+
+#if !defined(NT_UP)
+
+ Processor = Thread->NextProcessor;
+ Prcb = KiProcessorBlock[Processor];
+
+#else
+
+ Prcb = KiProcessorBlock[0];
+
+#endif
+
+ if (Prcb->NextThread == NULL) {
+ if (Priority < ThreadPriority) {
+
+#if !defined(NT_UP)
+
+ Thread1 = KiFindReadyThread(Processor, Priority);
+
+#else
+
+ Thread1 = KiFindReadyThread(0, Priority);
+
+#endif
+
+ if (Thread1 != NULL) {
+ Thread1->State = Standby;
+ Prcb->NextThread = Thread1;
+
+#if !defined(NT_UP)
+
+ KiRequestDispatchInterrupt(Processor);
+
+#endif
+
+ }
+ }
+ }
+
+ break;
+
+ //
+ // Initialized, Terminated, Waiting, Transition case - For
+ // these states it is sufficient to just set the new thread
+ // priority.
+ //
+
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+VOID
+KiSuspendThread (
+ IN PVOID NormalContext,
+ IN PVOID SystemArgument1,
+ IN PVOID SystemArgument2
+ )
+
+/*++
+
+Routine Description:
+
+ This function is the kernel routine for the builtin suspend APC of a
+ thread. It is executed in kernel mode as the result of queuing the builtin
+ suspend APC and suspends thread execution by Waiting nonalerable on the
+ thread's builtin suspend semaphore. When the thread is resumed, execution
+ of thread is continued by simply returning.
+
+Arguments:
+
+ Apc - Supplies a pointer to a control object of type APC.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PRKTHREAD Thread;
+
+ //
+ // Get the address of the current thread object and Wait nonalertable on
+ // the thread's builtin suspend semaphore.
+ //
+
+ Thread = KeGetCurrentThread();
+ KeWaitForSingleObject(&Thread->SuspendSemaphore,
+ Suspended,
+ KernelMode,
+ FALSE,
+ (PLARGE_INTEGER)NULL);
+
+ return;
+}
+#if 0
+
+VOID
+KiVerifyReadySummary (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function verifies the correctness of ready summary.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ ULONG Index;
+ ULONG Summary;
+ PKTHREAD Thread;
+
+ extern ULONG InitializationPhase;
+
+ //
+ // If initilization has been completed, then check the ready summary
+ //
+
+ if (InitializationPhase == 2) {
+
+ //
+ // Scan the ready queues and compute the ready summary.
+ //
+
+ Summary = 0;
+ for (Index = 0; Index < MAXIMUM_PRIORITY; Index += 1) {
+ if (IsListEmpty(&KiDispatcherReadyListHead[Index]) == FALSE) {
+ Summary |= (1 << Index);
+ }
+ }
+
+ //
+ // If the computed summary does not agree with the current ready
+ // summary, then break into the debugger.
+ //
+
+ if (Summary != KiReadySummary) {
+ DbgBreakPoint();
+ }
+
+ //
+ // If the priority of the current thread or the next thread is
+ // not greater than or equal to all ready threads, then break
+ // into the debugger.
+ //
+
+ Thread = KeGetCurrentPrcb()->NextThread;
+ if (Thread == NULL) {
+ Thread = KeGetCurrentPrcb()->CurrentThread;
+ }
+
+ if ((1 << Thread->Priority) < (Summary & ((1 << Thread->Priority) - 1))) {
+ DbgBreakPoint();
+ }
+ }
+
+ return;
+}
+#endif
diff --git a/private/ntos/ke/timerobj.c b/private/ntos/ke/timerobj.c
new file mode 100644
index 000000000..782c528dc
--- /dev/null
+++ b/private/ntos/ke/timerobj.c
@@ -0,0 +1,367 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ timerobj.c
+
+Abstract:
+
+ This module implements the kernel timer object. Functions are
+ provided to initialize, read, set, and cancel timer objects.
+
+Author:
+
+ David N. Cutler (davec) 2-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// The following assert macro is used to check that an input timer is
+// really a ktimer and not something else, like deallocated pool.
+//
+
+#define ASSERT_TIMER(E) { \
+ ASSERT(((E)->Header.Type == TimerNotificationObject) || \
+ ((E)->Header.Type == TimerSynchronizationObject)); \
+}
+
+VOID
+KeInitializeTimer (
+ IN PKTIMER Timer
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes a kernel timer object.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize extended timer object with a type of notification and a
+ // period of zero.
+ //
+
+ KeInitializeTimerEx(Timer, NotificationTimer);
+ return;
+}
+
+VOID
+KeInitializeTimerEx (
+ IN PKTIMER Timer,
+ IN TIMER_TYPE Type
+ )
+
+/*++
+
+Routine Description:
+
+ This function initializes an extended kernel timer object.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ Type - Supplies the type of timer object; NotificationTimer or
+ SynchronizationTimer;
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ //
+ // Initialize standard dispatcher object header and set initial
+ // state of timer.
+ //
+
+ Timer->Header.Type = TimerNotificationObject + Type;
+ Timer->Header.Inserted = FALSE;
+ Timer->Header.Size = sizeof(KTIMER) / sizeof(LONG);
+ Timer->Header.SignalState = FALSE;
+
+#if DBG
+
+ Timer->TimerListEntry.Flink = NULL;
+ Timer->TimerListEntry.Blink = NULL;
+
+#endif
+
+ InitializeListHead(&Timer->Header.WaitListHead);
+ Timer->DueTime.QuadPart = 0;
+ Timer->Period = 0;
+ return;
+}
+
+BOOLEAN
+KeCancelTimer (
+ IN PKTIMER Timer
+ )
+
+/*++
+
+Routine Description:
+
+ This function cancels a timer that was previously set to expire at
+ a specified time. If the timer is not currently set, then no operation
+ is performed. Canceling a timer does not set the state of the timer to
+ Signaled.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+Return Value:
+
+ A boolean value of TRUE is returned if the the specified timer was
+ currently set. Else a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+ KIRQL OldIrql;
+
+ ASSERT_TIMER(Timer);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level, lock the dispatcher database, and
+ // capture the timer inserted status. If the timer is currently set,
+ // then remove it from the timer list.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+ Inserted = Timer->Header.Inserted;
+ if (Inserted != FALSE) {
+ KiRemoveTreeTimer(Timer);
+ }
+
+ //
+ // Unlock the dispatcher database, lower IRQL to its previous value, and
+ // return boolean value that signifies whether the timer was set of not.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return Inserted;
+}
+
+BOOLEAN
+KeReadStateTimer (
+ IN PKTIMER Timer
+ )
+
+/*++
+
+Routine Description:
+
+ This function reads the current signal state of a timer object.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+Return Value:
+
+ The current signal state of the timer object.
+
+--*/
+
+{
+
+ ASSERT_TIMER(Timer);
+
+ //
+ // Return current signal state of timer object.
+ //
+
+ return (BOOLEAN)Timer->Header.SignalState;
+}
+
+BOOLEAN
+KeSetTimer (
+ IN PKTIMER Timer,
+ IN LARGE_INTEGER DueTime,
+ IN PKDPC Dpc OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets a timer to expire at a specified time. If the timer is
+ already set, then it is implicitly canceled before it is set to expire at
+ the specified time. Setting a timer causes its due time to be computed,
+ its state to be set to Not-Signaled, and the timer object itself to be
+ inserted in the timer list.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ DueTime - Supplies an absolute or relative time at which the timer
+ is to expire.
+
+ Dpc - Supplies an optional pointer to a control object of type DPC.
+
+Return Value:
+
+ A boolean value of TRUE is returned if the the specified timer was
+ currently set. Else a value of FALSE is returned.
+
+--*/
+
+{
+
+ //
+ // Set the timer with a period of zero.
+ //
+
+ return KeSetTimerEx(Timer, DueTime, 0, Dpc);
+}
+
+BOOLEAN
+KeSetTimerEx (
+ IN PKTIMER Timer,
+ IN LARGE_INTEGER DueTime,
+ IN LONG Period OPTIONAL,
+ IN PKDPC Dpc OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets a timer to expire at a specified time. If the timer is
+ already set, then it is implicitly canceled before it is set to expire at
+ the specified time. Setting a timer causes its due time to be computed,
+ its state to be set to Not-Signaled, and the timer object itself to be
+ inserted in the timer list.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ DueTime - Supplies an absolute or relative time at which the timer
+ is to expire.
+
+ Period - Supplies an optional period for the timer in milliseconds.
+
+ Dpc - Supplies an optional pointer to a control object of type DPC.
+
+Return Value:
+
+ A boolean value of TRUE is returned if the the specified timer was
+ currently set. Else a value of FALSE is returned.
+
+--*/
+
+{
+
+ BOOLEAN Inserted;
+ LARGE_INTEGER Interval;
+ KIRQL OldIrql;
+ LARGE_INTEGER SystemTime;
+
+ ASSERT_TIMER(Timer);
+ ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
+
+ //
+ // Raise IRQL to dispatcher level and lock dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // Capture the timer inserted status and if the timer is currently
+ // set, then remove it from the timer list.
+ //
+
+ Inserted = Timer->Header.Inserted;
+ if (Inserted != FALSE) {
+ KiRemoveTreeTimer(Timer);
+ }
+
+ //
+ // Clear the signal state, set the period, set the DPC address, and
+ // insert the timer in the timer tree. If the timer is not inserted
+ // in the timer tree, then it has already expired and as many waiters
+ // as possible should be continued, and a DPC, if specified should be
+ // queued.
+ //
+ // N.B. The signal state must be cleared in case the period is not
+ // zero.
+ //
+
+ Timer->Header.SignalState = FALSE;
+ Timer->Dpc = Dpc;
+ Timer->Period = Period;
+ if (KiInsertTreeTimer((PRKTIMER)Timer, DueTime) == FALSE) {
+ if (IsListEmpty(&Timer->Header.WaitListHead) == FALSE) {
+ KiWaitTest(Timer, TIMER_EXPIRE_INCREMENT);
+ }
+
+ //
+ // If a DPC is specfied, then call the DPC routine.
+ //
+
+ if (Dpc != NULL) {
+ KiQuerySystemTime(&SystemTime);
+ KeInsertQueueDpc(Timer->Dpc,
+ (PVOID)SystemTime.LowPart,
+ (PVOID)SystemTime.HighPart);
+ }
+
+ //
+ // If the timer is periodic, then compute the next interval time
+ // and reinsert the timer in the timer tree.
+ //
+ // N.B. Since the period is relative the timer tree insertion
+ // cannot fail.
+ //
+
+ if (Period != 0) {
+ Interval.QuadPart = Int32x32To64(Timer->Period, - 10 * 1000);
+ KiInsertTreeTimer(Timer, Interval);
+ }
+ }
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value.
+ //
+
+ KiUnlockDispatcherDatabase(OldIrql);
+
+ //
+ // Return boolean value that signifies whether the timer was set of
+ // not.
+ //
+
+ return Inserted;
+}
diff --git a/private/ntos/ke/timersup.c b/private/ntos/ke/timersup.c
new file mode 100644
index 000000000..4d0677558
--- /dev/null
+++ b/private/ntos/ke/timersup.c
@@ -0,0 +1,306 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ timersup.c
+
+Abstract:
+
+ This module contains the support routines for the timer object. It
+ contains functions to insert and remove from the timer queue.
+
+Author:
+
+ David N. Cutler (davec) 13-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward referenced function prototypes.
+//
+
+LOGICAL
+FASTCALL
+KiInsertTimerTable (
+ LARGE_INTEGER Interval,
+ LARGE_INTEGER CurrentTime,
+ IN PRKTIMER Timer
+ );
+
+LOGICAL
+FASTCALL
+KiInsertTreeTimer (
+ IN PRKTIMER Timer,
+ IN LARGE_INTEGER Interval
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a timer object in the timer queue.
+
+ N.B. This routine assumes that the dispatcher data lock has been acquired.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ Interval - Supplies the absolute or relative time at which the time
+ is to expire.
+
+Return Value:
+
+ If the timer is inserted in the timer tree, than a value of TRUE is
+ returned. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ LARGE_INTEGER CurrentTime;
+ LARGE_INTEGER SystemTime;
+ LARGE_INTEGER TimeDifference;
+
+ //
+ // Clear the signal state of timer if the timer period is zero and set
+ // the inserted state to TRUE.
+ //
+
+ Timer->Header.Inserted = TRUE;
+ Timer->Header.Absolute = FALSE;
+ if (Timer->Period == 0) {
+ Timer->Header.SignalState = FALSE;
+ }
+
+ //
+ // If the specified interval is not a relative time (i.e., is an absolute
+ // time), then convert it to relative time.
+ //
+
+ if (Interval.HighPart >= 0) {
+ KiQuerySystemTime(&SystemTime);
+ TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart;
+
+ //
+ // If the resultant relative time is greater than or equal to zero,
+ // then the timer has already expired.
+ //
+
+ if (TimeDifference.HighPart >= 0) {
+ Timer->Header.SignalState = TRUE;
+ Timer->Header.Inserted = FALSE;
+ return FALSE;
+ }
+
+ Interval = TimeDifference;
+ Timer->Header.Absolute = TRUE;
+ }
+
+ //
+ // Get the current interrupt time, insert the timer in the timer table,
+ // and return the inserted state.
+ //
+
+ KiQueryInterruptTime(&CurrentTime);
+ return KiInsertTimerTable(Interval, CurrentTime, Timer);
+}
+
+LOGICAL
+FASTCALL
+KiReinsertTreeTimer (
+ IN PRKTIMER Timer,
+ IN ULARGE_INTEGER DueTime
+ )
+
+/*++
+
+Routine Description:
+
+ This function reinserts a timer object in the timer queue.
+
+ N.B. This routine assumes that the dispatcher data lock has been acquired.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ DueTime - Supplies the absolute time the timer is to expire.
+
+Return Value:
+
+ If the timer is inserted in the timer tree, than a value of TRUE is
+ returned. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ LARGE_INTEGER CurrentTime;
+ LARGE_INTEGER Interval;
+
+ //
+ // Clear the signal state of timer if the timer period is zero and set
+ // the inserted state to TRUE.
+ //
+
+ Timer->Header.Inserted = TRUE;
+ if (Timer->Period == 0) {
+ Timer->Header.SignalState = FALSE;
+ }
+
+ //
+ // Compute the interval between the current time and the due time.
+ // If the resultant relative time is greater than or equal to zero,
+ // then the timer has already expired.
+ //
+
+ KiQueryInterruptTime(&CurrentTime);
+ Interval.QuadPart = CurrentTime.QuadPart - DueTime.QuadPart;
+ if (Interval.QuadPart >= 0) {
+ Timer->Header.SignalState = TRUE;
+ Timer->Header.Inserted = FALSE;
+ return FALSE;
+ }
+
+ //
+ // Insert the timer in the timer table and return the inserted state.
+ //
+
+ return KiInsertTimerTable(Interval, CurrentTime, Timer);
+}
+
+LOGICAL
+FASTCALL
+KiInsertTimerTable (
+ LARGE_INTEGER Interval,
+ LARGE_INTEGER CurrentTime,
+ IN PRKTIMER Timer
+ )
+
+/*++
+
+Routine Description:
+
+ This function inserts a timer object in the timer table.
+
+ N.B. This routine assumes that the dispatcher data lock has been acquired.
+
+Arguments:
+
+ Interval - Supplies the relative timer before the timer is to expire.
+
+ CurrentTime - supplies the current interrupt time.
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+Return Value:
+
+ If the timer is inserted in the timer tree, than a value of TRUE is
+ returned. Otherwise, a value of FALSE is returned.
+
+--*/
+
+{
+
+ ULONG Index;
+ PLIST_ENTRY ListHead;
+ PLIST_ENTRY NextEntry;
+ PRKTIMER NextTimer;
+ ULONG SearchCount;
+
+ //
+ // Compute the timer table index and set the timer expiration time.
+ //
+
+ Index = KiComputeTimerTableIndex(Interval, CurrentTime, Timer);
+
+ //
+ // If the timer is due before the first entry in the computed list
+ // or the computed list is empty, then insert the timer at the front
+ // of the list and check if the timer has already expired. Otherwise,
+ // insert then timer in the sorted order of the list searching from
+ // the back of the list forward.
+ //
+ // N.B. The sequence of operations below is critical to avoid the race
+ // condition that exists between this code and the clock interrupt
+ // code that examines the timer table lists to detemine when timers
+ // expire.
+ //
+
+ ListHead = &KiTimerTableListHead[Index];
+ NextEntry = ListHead->Blink;
+
+#if DBG
+
+ SearchCount = 0;
+
+#endif
+
+ while (NextEntry != ListHead) {
+
+ //
+ // Compute the maximum search count.
+ //
+
+#if DBG
+
+ SearchCount += 1;
+ if (SearchCount > KiMaximumSearchCount) {
+ KiMaximumSearchCount = SearchCount;
+ }
+
+#endif
+
+ NextTimer = CONTAINING_RECORD(NextEntry, KTIMER, TimerListEntry);
+ if (((Timer->DueTime.HighPart == NextTimer->DueTime.HighPart) &&
+ (Timer->DueTime.LowPart >= NextTimer->DueTime.LowPart)) ||
+ (Timer->DueTime.HighPart > NextTimer->DueTime.HighPart)) {
+ InsertHeadList(NextEntry, &Timer->TimerListEntry);
+ return TRUE;
+ }
+
+ NextEntry = NextEntry->Blink;
+ }
+
+ //
+ // The computed list is empty or the timer is due to expire before
+ // the first entry in the list. Insert the entry in the computed
+ // timer table list, then check if the timer has expired.
+ //
+ // Note that it is critical that the interrupt time not be captured
+ // until after the timer has been completely inserted into the list.
+ //
+ // Otherwise, the clock interrupt code can think the list is empty,
+ // and the code here that checks if the timer has expired will use
+ // a stale interrupt time.
+ //
+
+ InsertHeadList(ListHead, &Timer->TimerListEntry);
+ KiQueryInterruptTime(&CurrentTime);
+ if (((Timer->DueTime.HighPart == (ULONG)CurrentTime.HighPart) &&
+ (Timer->DueTime.LowPart <= CurrentTime.LowPart)) ||
+ (Timer->DueTime.HighPart < (ULONG)CurrentTime.HighPart)) {
+
+ //
+ // The timer is due to expire before the current time. Remove the
+ // timer from the computed list, set its status to Signaled, set
+ // its inserted state to FALSE, and
+ //
+
+ KiRemoveTreeTimer(Timer);
+ Timer->Header.SignalState = TRUE;
+ }
+
+ return Timer->Header.Inserted;
+}
diff --git a/private/ntos/ke/up/makefile b/private/ntos/ke/up/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/ntos/ke/up/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/ntos/ke/up/makefile.inc b/private/ntos/ke/up/makefile.inc
new file mode 100644
index 000000000..38703da23
--- /dev/null
+++ b/private/ntos/ke/up/makefile.inc
@@ -0,0 +1,8 @@
+obj\$(TARGET_DIRECTORY)\sysstubs.obj: ..\$(TARGET_DIRECTORY)\sysstubs.$(ASM_SUFFIX)
+
+obj\$(TARGET_DIRECTORY)\systable.obj: ..\$(TARGET_DIRECTORY)\systable.$(ASM_SUFFIX)
+
+obj\$(TARGET_DIRECTORY)\gen$(TARGET_DIRECTORY).obj: ..\$(TARGET_DIRECTORY)\gen$(TARGET_DIRECTORY).c
+
+..\$(TARGET_DIRECTORY)\sysstubs.$(ASM_SUFFIX) ..\$(TARGET_DIRECTORY)\systable.$(ASM_SUFFIX): ..\services.tab ..\$(TARGET_DIRECTORY)\table.stb ..\$(TARGET_DIRECTORY)\services.stb
+ gensrv -d $(TARGET_DIRECTORY) -e $(ASM_SUFFIX) -g .. $(TARGET_BRACES)
diff --git a/private/ntos/ke/up/sources b/private/ntos/ke/up/sources
new file mode 100644
index 000000000..6dca9c583
--- /dev/null
+++ b/private/ntos/ke/up/sources
@@ -0,0 +1,27 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+TARGETPATH=..\..\obj
+
+!include ..\sources.inc
diff --git a/private/ntos/ke/wait.c b/private/ntos/ke/wait.c
new file mode 100644
index 000000000..b4dea5bcb
--- /dev/null
+++ b/private/ntos/ke/wait.c
@@ -0,0 +1,1776 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ wait.c
+
+Abstract:
+
+ This module implements the generic kernel wait routines. Functions
+ are provided to wait for a single object, wait for multiple objects,
+ wait for event pair low, wait for event pair high, release and wait
+ for semaphore, and to delay thread execution.
+
+ N.B. This module is written to be a fast as possible and not as small
+ as possible. Therefore some code sequences are duplicated to avoid
+ procedure calls. It would also be possible to combine wait for
+ single object into wait for multiple objects at the cost of some
+ speed. Since wait for single object is the most common case, the
+ two routines have been separated.
+
+Author:
+
+ David N. Cutler (davec) 23-Mar-89
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Test for alertable condition.
+//
+// If alertable is TRUE and the thread is alerted for a processor
+// mode that is equal to the wait mode, then return immediately
+// with a wait completion status of ALERTED.
+//
+// Else if alertable is TRUE, the wait mode is user, and the user APC
+// queue is not empty, then set user APC pending, and return immediately
+// with a wait completion status of USER_APC.
+//
+// Else if alertable is TRUE and the thread is alerted for kernel
+// mode, then return immediately with a wait completion status of
+// ALERTED.
+//
+// Else if alertable is FALSE and the wait mode is user and there is a
+// user APC pending, then return immediately with a wait completion
+// status of USER_APC.
+//
+
+#define TestForAlertPending(Alertable) \
+ if (Alertable) { \
+ if (Thread->Alerted[WaitMode] != FALSE) { \
+ Thread->Alerted[WaitMode] = FALSE; \
+ WaitStatus = STATUS_ALERTED; \
+ break; \
+ } else if ((WaitMode != KernelMode) && \
+ (IsListEmpty(&Thread->ApcState.ApcListHead[UserMode])) == FALSE) { \
+ Thread->ApcState.UserApcPending = TRUE; \
+ WaitStatus = STATUS_USER_APC; \
+ break; \
+ } else if (Thread->Alerted[KernelMode] != FALSE) { \
+ Thread->Alerted[KernelMode] = FALSE; \
+ WaitStatus = STATUS_ALERTED; \
+ break; \
+ } \
+ } else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending)) { \
+ WaitStatus = STATUS_USER_APC; \
+ break; \
+ }
+
+NTSTATUS
+KeDelayExecutionThread (
+ IN KPROCESSOR_MODE WaitMode,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Interval
+ )
+
+/*++
+
+Routine Description:
+
+ This function delays the execution of the current thread for the specified
+ interval of time.
+
+Arguments:
+
+ WaitMode - Supplies the processor mode in which the delay is to occur.
+
+ Alertable - Supplies a boolean value that specifies whether the delay
+ is alertable.
+
+ Interval - Supplies a pointer to the absolute or relative time over which
+ the delay is to occur.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_SUCCESS is returned if
+ the delay occurred. A value of STATUS_ALERTED is returned if the wait
+ was aborted to deliver an alert to the current thread. A value of
+ STATUS_USER_APC is returned if the wait was aborted to deliver a user
+ APC to the current thread.
+
+--*/
+
+{
+
+ LARGE_INTEGER NewTime;
+ PLARGE_INTEGER OriginalTime;
+ PKPRCB Prcb;
+ KPRIORITY Priority;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+ PRKTIMER Timer;
+ PKWAIT_BLOCK WaitBlock;
+ NTSTATUS WaitStatus;
+
+ //
+ // If the dispatcher database lock is not already held, then set the wait
+ // IRQL and lock the dispatcher database. Else set boolean wait variable
+ // to FALSE.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Thread->WaitNext) {
+ Thread->WaitNext = FALSE;
+
+ } else {
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ }
+
+ //
+ // Start of delay loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the middle
+ // of the delay or a kernel APC is pending on the first attempt through
+ // the loop.
+ //
+
+ OriginalTime = Interval;
+ WaitBlock = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
+ do {
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor just
+ // after IRQL was raised to DISPATCH_LEVEL, but before the dispatcher
+ // database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (Thread->ApcState.KernelApcPending && (Thread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value. An APC interrupt will immediately occur which will result
+ // in the delivery of the kernel APC if possible.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+
+ } else {
+
+ //
+ // Test for alert pending.
+ //
+
+ TestForAlertPending(Alertable);
+
+ //
+ // Initialize wait block, insert wait block in timer wait list,
+ // insert timer in timer queue, put thread in wait state, select
+ // next thread to execute, and context switch to next thread.
+ //
+ // N.B. The timer wait block is initialized when the respective
+ // thread is initialized. Thus the constant fields are not
+ // reinitialized. These include the wait object, wait key,
+ // wait type, and the wait list entry link pointers.
+ //
+
+ Thread->WaitBlockList = WaitBlock;
+ Thread->WaitStatus = (NTSTATUS)0;
+ Timer = &Thread->Timer;
+ WaitBlock->NextWaitBlock = WaitBlock;
+ Timer->Header.WaitListHead.Flink = &WaitBlock->WaitListEntry;
+ Timer->Header.WaitListHead.Blink = &WaitBlock->WaitListEntry;
+
+ //
+ // If the timer is inserted in the timer tree, then place the
+ // current thread in a wait state. Otherwise, attempt to force
+ // the current thread to yield the processor to another thread.
+ //
+
+ if (KiInsertTreeTimer(Timer, *Interval) == FALSE) {
+
+ //
+ // If the thread is not a realtime thread, then drop the
+ // thread priority to the base priority.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Priority = Thread->Priority;
+ if (Priority < LOW_REALTIME_PRIORITY) {
+ if (Priority != Thread->BasePriority) {
+ Thread->PriorityDecrement = 0;
+ KiSetPriorityThread(Thread, Thread->BasePriority);
+ }
+ }
+
+ //
+ // If a new thread has not been selected, the attempt to round
+ // robin the thread with other threads at the same priority.
+ //
+
+ if (Prcb->NextThread == NULL) {
+ Prcb->NextThread = KiFindReadyThread(Thread->NextProcessor,
+ Thread->Priority);
+ }
+
+ //
+ // If a new thread has been selected for execution, then
+ // switch immediately to the selected thread.
+ //
+
+ if (Prcb->NextThread != NULL) {
+
+ //
+ // Give the current thread a new qunatum and switch
+ // context to selected thread.
+ //
+ // N.B. Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(Thread->WaitIrql <= DISPATCH_LEVEL);
+
+ Thread->Preempted = FALSE;
+ Thread->Quantum = Thread->ApcState.Process->ThreadQuantum;
+
+ KiReadyThread(Thread);
+ WaitStatus = KiSwapThread();
+ goto WaitComplete;
+
+ } else {
+ WaitStatus = (NTSTATUS)STATUS_SUCCESS;
+ break;
+ }
+ }
+
+ //
+ // If the current thread is processing a queue entry, then attempt
+ // to activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher state
+ // to Waiting, and insert the thread in the wait list.
+ //
+
+ Thread->Alertable = Alertable;
+ Thread->WaitMode = WaitMode;
+ Thread->WaitReason = DelayExecution;
+ Thread->WaitTime= KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // N.B. Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(Thread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return the wait status.
+ //
+
+ WaitComplete:
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ if (WaitStatus == STATUS_TIMEOUT) {
+ WaitStatus = STATUS_SUCCESS;
+ }
+
+ return WaitStatus;
+ }
+
+ //
+ // Reduce the time remaining before the time delay expires.
+ //
+
+ Interval = KiComputeWaitInterval(Timer, OriginalTime, &NewTime);
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ } while (TRUE);
+
+ //
+ // The thread is alerted or a user APC should be delivered. Unlock the
+ // dispatcher database, lower IRQL to its previous value, and return the
+ // wait status.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return WaitStatus;
+}
+
+NTSTATUS
+KeReleaseWaitForSemaphore (
+ IN PKSEMAPHORE Server,
+ IN PKSEMAPHORE Client,
+ IN ULONG WaitReason,
+ IN ULONG WaitMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function releases a semaphore and waits on another semaphore. The
+ wait is performed such that an optimal switch to the waiting thread
+ occurs if possible. No timeout is associated with the wait, and thus,
+ the issuing thread will wait until the semaphore is signaled or an APC
+ is delivered.
+
+Arguments:
+
+ Server - Supplies a pointer to a dispatcher object of type semaphore.
+
+ Client - Supplies a pointer to a dispatcher object of type semaphore.
+
+ WaitReason - Supplies the reason for the wait.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_SUCCESS is returned if
+ the specified object satisfied the wait. A value of STATUS_USER_APC is
+ returned if the wait was aborted to deliver a user APC to the current
+ thread.
+
+--*/
+
+{
+
+ PRKTHREAD NextThread;
+ LONG OldState;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+
+ //
+ // Raise the IRQL to dispatch level and lock the dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+
+ ASSERT(Thread->WaitNext == FALSE);
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+
+ //
+ // If the client semaphore is not in the Signaled state and the server
+ // semaphore wait queue is not empty, then attempt a direct dispatch
+ // to the target thread.
+ //
+
+ if ((Client->Header.SignalState == 0) &&
+ (IsListEmpty(&Server->Header.WaitListHead) == FALSE)) {
+
+ //
+ // Get the address of the first waiting server thread.
+ //
+
+ WaitEntry = Server->Header.WaitListHead.Flink;
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ NextThread = WaitBlock->Thread;
+
+ //
+ // Remove the wait block from the semaphore wait list and remove the
+ // target thread from the system wait list.
+ //
+
+ RemoveEntryList(&WaitBlock->WaitListEntry);
+ RemoveEntryList(&NextThread->WaitListEntry);
+
+ //
+ // If the next thread is processing a queue entry, then increment
+ // the current number of threads.
+ //
+
+ Queue = NextThread->Queue;
+ if (Queue != NULL) {
+ Queue->CurrentCount += 1;
+ }
+
+ //
+ // Attempt to switch directly to the target thread.
+ //
+
+ return KiSwitchToThread(NextThread, WaitReason, WaitMode, Client);
+
+ } else {
+
+ //
+ // If the server sempahore is at the maximum limit, then unlock the
+ // dispatcher database and raise an exception.
+ //
+
+ OldState = Server->Header.SignalState;
+ if (OldState == Server->Limit) {
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ ExRaiseStatus(STATUS_SEMAPHORE_LIMIT_EXCEEDED);
+ }
+
+ //
+ // Signal the server semaphore and test to determine if any wait can be
+ // satisfied.
+ //
+
+ Server->Header.SignalState += 1;
+ if ((OldState == 0) && (IsListEmpty(&Server->Header.WaitListHead) == FALSE)) {
+ KiWaitTest(Server, 1);
+ }
+
+ //
+ // Continue the semaphore wait and return the wait completion status.
+ //
+ // N.B. The wait continuation routine is called with the dispatcher
+ // database locked.
+ //
+
+ return KiContinueClientWait(Client, WaitReason, WaitMode);
+ }
+}
+
+NTSTATUS
+KeWaitForMultipleObjects (
+ IN ULONG Count,
+ IN PVOID Object[],
+ IN WAIT_TYPE WaitType,
+ IN KWAIT_REASON WaitReason,
+ IN KPROCESSOR_MODE WaitMode,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Timeout OPTIONAL,
+ IN PKWAIT_BLOCK WaitBlockArray OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function waits until the specified objects attain a state of
+ Signaled. The wait can be specified to wait until all of the objects
+ attain a state of Signaled or until one of the objects attains a state
+ of Signaled. An optional timeout can also be specified. If a timeout
+ is not specified, then the wait will not be satisfied until the objects
+ attain a state of Signaled. If a timeout is specified, and the objects
+ have not attained a state of Signaled when the timeout expires, then
+ the wait is automatically satisfied. If an explicit timeout value of
+ zero is specified, then no wait will occur if the wait cannot be satisfied
+ immediately. The wait can also be specified as alertable.
+
+Arguments:
+
+ Count - Supplies a count of the number of objects that are to be waited
+ on.
+
+ Object[] - Supplies an array of pointers to dispatcher objects.
+
+ WaitType - Supplies the type of wait to perform (WaitAll, WaitAny).
+
+ WaitReason - Supplies the reason for the wait.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+ Alertable - Supplies a boolean value that specifies whether the wait is
+ alertable.
+
+ Timeout - Supplies a pointer to an optional absolute of relative time over
+ which the wait is to occur.
+
+ WaitBlockArray - Supplies an optional pointer to an array of wait blocks
+ that are to used to describe the wait operation.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_TIMEOUT is returned if a
+ timeout occurred. The index of the object (zero based) in the object
+ pointer array is returned if an object satisfied the wait. A value of
+ STATUS_ALERTED is returned if the wait was aborted to deliver an alert
+ to the current thread. A value of STATUS_USER_APC is returned if the
+ wait was aborted to deliver a user APC to the current thread.
+
+--*/
+
+{
+
+ ULONG Index;
+ LARGE_INTEGER NewTime;
+ PRKTHREAD NextThread;
+ PKMUTANT Objectx;
+ PLARGE_INTEGER OriginalTime;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+ PRKTIMER Timer;
+ PRKWAIT_BLOCK WaitBlock;
+ BOOLEAN WaitSatisfied;
+ NTSTATUS WaitStatus;
+ PKWAIT_BLOCK WaitTimer;
+
+ //
+ // If the dispatcher database lock is not already held, then set the wait
+ // IRQL and lock the dispatcher database. Else set boolean wait variable
+ // to FALSE.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Thread->WaitNext) {
+ Thread->WaitNext = FALSE;
+
+ } else {
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ }
+
+ //
+ // If a wait block array has been specified, then the maximum number of
+ // objects that can be waited on is specified by MAXIMUM_WAIT_OBJECTS.
+ // Otherwise the builtin wait blocks in the thread object are used and
+ // the maximum number of objects that can be waited on is specified by
+ // THREAD_WAIT_OBJECTS. If the specified number of objects is not within
+ // limits, then bug check.
+ //
+
+ if (ARGUMENT_PRESENT(WaitBlockArray)) {
+ if (Count > MAXIMUM_WAIT_OBJECTS) {
+ KeBugCheck(MAXIMUM_WAIT_OBJECTS_EXCEEDED);
+ }
+
+ } else {
+ if (Count > THREAD_WAIT_OBJECTS) {
+ KeBugCheck(MAXIMUM_WAIT_OBJECTS_EXCEEDED);
+ }
+
+ WaitBlockArray = &Thread->WaitBlock[0];
+ }
+
+ //
+ // Start of wait loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the middle
+ // of the wait or a kernel APC is pending on the first attempt through
+ // the loop.
+ //
+
+ OriginalTime = Timeout;
+ do {
+
+ //
+ // Set address of wait block list in thread object.
+ //
+
+ Thread->WaitBlockList = WaitBlockArray;
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor just
+ // after IRQL was raised to DISPATCH_LEVEL, but before the dispatcher
+ // database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (Thread->ApcState.KernelApcPending && (Thread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value. An APC interrupt will immediately occur which will result
+ // in the delivery of the kernel APC if possible.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+
+ } else {
+
+ //
+ // Construct wait blocks and check to determine if the wait is
+ // already satisfied. If the wait is satisfied, then perform
+ // wait completion and return. Else put current thread in a wait
+ // state if an explicit timeout value of zero is not specified.
+ //
+
+ Thread->WaitStatus = (NTSTATUS)0;
+ WaitSatisfied = TRUE;
+ for (Index = 0; Index < Count; Index += 1) {
+
+ //
+ // Test if wait can be satisfied immediately.
+ //
+
+ Objectx = (PKMUTANT)Object[Index];
+
+ ASSERT(Objectx->Header.Type != QueueObject);
+
+ if (WaitType == WaitAny) {
+
+ //
+ // If the object is a mutant object and the mutant object
+ // has been recursively acquired MINLONG times, then raise
+ // an exception. Otherwise if the signal state of the mutant
+ // object is greater than zero, or the current thread is
+ // the owner of the mutant object, then satisfy the wait.
+ //
+
+ if (Objectx->Header.Type == MutantObject) {
+ if ((Objectx->Header.SignalState > 0) ||
+ (Thread == Objectx->OwnerThread)) {
+ if (Objectx->Header.SignalState != MINLONG) {
+ KiWaitSatisfyMutant(Objectx, Thread);
+ WaitStatus = (NTSTATUS)(Index) | Thread->WaitStatus;
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return WaitStatus;
+
+ } else {
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ ExRaiseStatus(STATUS_MUTANT_LIMIT_EXCEEDED);
+ }
+ }
+
+ //
+ // If the signal state is greater than zero, then satisfy
+ // the wait.
+ //
+
+ } else if (Objectx->Header.SignalState > 0) {
+ KiWaitSatisfyOther(Objectx);
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return (NTSTATUS)(Index);
+ }
+
+ } else {
+
+ //
+ // If the object is a mutant object and the mutant object
+ // has been recursively acquired MAXLONG times, then raise
+ // an exception. Otherwise if the signal state of the mutant
+ // object is less than or equal to zero and the current
+ // thread is not the owner of the mutant object, then the
+ // wait cannot be satisfied.
+ //
+
+ if (Objectx->Header.Type == MutantObject) {
+ if ((Thread == Objectx->OwnerThread) &&
+ (Objectx->Header.SignalState == MINLONG)) {
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ ExRaiseStatus(STATUS_MUTANT_LIMIT_EXCEEDED);
+
+ } else if ((Objectx->Header.SignalState <= 0) &&
+ (Thread != Objectx->OwnerThread)) {
+ WaitSatisfied = FALSE;
+ }
+
+ //
+ // If the signal state is less than or equal to zero, then
+ // the wait cannot be satisfied.
+ //
+
+ } else if (Objectx->Header.SignalState <= 0) {
+ WaitSatisfied = FALSE;
+ }
+ }
+
+ //
+ // Construct wait block for the current object.
+ //
+
+ WaitBlock = &WaitBlockArray[Index];
+ WaitBlock->Object = (PVOID)Objectx;
+ WaitBlock->WaitKey = (CSHORT)(Index);
+ WaitBlock->WaitType = WaitType;
+ WaitBlock->Thread = Thread;
+ WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1];
+ }
+
+ //
+ // If the wait type is wait all, then check to determine if the
+ // wait can be satisfied immediately.
+ //
+
+ if ((WaitType == WaitAll) && (WaitSatisfied)) {
+ WaitBlock->NextWaitBlock = &WaitBlockArray[0];
+ KiWaitSatisfyAll(WaitBlock);
+ WaitStatus = Thread->WaitStatus;
+ break;
+ }
+
+ //
+ // Test for alert pending.
+ //
+
+ TestForAlertPending(Alertable);
+
+ //
+ // The wait cannot be satisifed immediately. Check to determine if
+ // a timeout value is specified.
+ //
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // If the timeout value is zero, then return immediately without
+ // waiting.
+ //
+
+ if (!(Timeout->LowPart | Timeout->HighPart)) {
+ WaitStatus = (NTSTATUS)(STATUS_TIMEOUT);
+ break;
+ }
+
+ //
+ // Initialize a wait block for the thread specific timer,
+ // initialize timer wait list head, insert the timer in the
+ // timer tree, and increment the number of wait objects.
+ //
+ // N.B. The timer wait block is initialized when the respective
+ // thread is initialized. Thus the constant fields are not
+ // reinitialized. These include the wait object, wait key,
+ // wait type, and the wait list entry link pointers.
+ //
+
+ WaitTimer = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
+ WaitBlock->NextWaitBlock = WaitTimer;
+ WaitBlock = WaitTimer;
+ Timer = &Thread->Timer;
+ InitializeListHead(&Timer->Header.WaitListHead);
+ if (KiInsertTreeTimer(Timer, *Timeout) == FALSE) {
+ WaitStatus = (NTSTATUS)STATUS_TIMEOUT;
+ break;
+ }
+ }
+
+ //
+ // Close up the circular list of wait control blocks.
+ //
+
+ WaitBlock->NextWaitBlock = &WaitBlockArray[0];
+
+ //
+ // Insert wait blocks in object wait lists.
+ //
+
+ WaitBlock = &WaitBlockArray[0];
+ do {
+ Objectx = (PKMUTANT)WaitBlock->Object;
+ InsertTailList(&Objectx->Header.WaitListHead, &WaitBlock->WaitListEntry);
+ WaitBlock = WaitBlock->NextWaitBlock;
+ } while (WaitBlock != &WaitBlockArray[0]);
+
+ //
+ // If the current thread is processing a queue entry, then attempt
+ // to activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher state
+ // to Waiting, and insert the thread in the wait list.
+ //
+
+ Thread->Alertable = Alertable;
+ Thread->WaitMode = WaitMode;
+ Thread->WaitReason = WaitReason;
+ Thread->WaitTime= KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(Thread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then the wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return WaitStatus;
+ }
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // Reduce the amount of time remaining before timeout occurs.
+ //
+
+ Timeout = KiComputeWaitInterval(Timer, OriginalTime, &NewTime);
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ } while (TRUE);
+
+ //
+ // The thread is alerted, a user APC should be delivered, or the wait is
+ // satisfied. Unlock dispatcher database, lower IRQL to its previous value,
+ // and return the wait status.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return WaitStatus;
+}
+
+NTSTATUS
+KeWaitForSingleObject (
+ IN PVOID Object,
+ IN KWAIT_REASON WaitReason,
+ IN KPROCESSOR_MODE WaitMode,
+ IN BOOLEAN Alertable,
+ IN PLARGE_INTEGER Timeout OPTIONAL
+ )
+
+/*++
+
+Routine Description:
+
+ This function waits until the specified object attains a state of
+ Signaled. An optional timeout can also be specified. If a timeout
+ is not specified, then the wait will not be satisfied until the object
+ attains a state of Signaled. If a timeout is specified, and the object
+ has not attained a state of Signaled when the timeout expires, then
+ the wait is automatically satisfied. If an explicit timeout value of
+ zero is specified, then no wait will occur if the wait cannot be satisfied
+ immediately. The wait can also be specified as alertable.
+
+Arguments:
+
+ Object - Supplies a pointer to a dispatcher object.
+
+ WaitReason - Supplies the reason for the wait.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+ Alertable - Supplies a boolean value that specifies whether the wait is
+ alertable.
+
+ Timeout - Supplies a pointer to an optional absolute of relative time over
+ which the wait is to occur.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_TIMEOUT is returned if a
+ timeout occurred. A value of STATUS_SUCCESS is returned if the specified
+ object satisfied the wait. A value of STATUS_ALERTED is returned if the
+ wait was aborted to deliver an alert to the current thread. A value of
+ STATUS_USER_APC is returned if the wait was aborted to deliver a user
+ APC to the current thread.
+
+--*/
+
+{
+
+ LARGE_INTEGER NewTime;
+ PRKTHREAD NextThread;
+ PKMUTANT Objectx;
+ PLARGE_INTEGER OriginalTime;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+ PRKTIMER Timer;
+ PKWAIT_BLOCK WaitBlock;
+ NTSTATUS WaitStatus;
+ PKWAIT_BLOCK WaitTimer;
+
+ //
+ // Collect call data.
+ //
+
+#if defined(_COLLECT_WAIT_SINGLE_CALLDATA_)
+
+ RECORD_CALL_DATA(&KiWaitSingleCallData);
+
+#endif
+
+ //
+ // If the dispatcher database lock is not already held, then set the wait
+ // IRQL and lock the dispatcher database. Else set boolean wait variable
+ // to FALSE.
+ //
+
+ Thread = KeGetCurrentThread();
+ if (Thread->WaitNext) {
+ Thread->WaitNext = FALSE;
+
+ } else {
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ }
+
+ //
+ // Start of wait loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the middle
+ // of the wait or a kernel APC is pending on the first attempt through
+ // the loop.
+ //
+
+ OriginalTime = Timeout;
+ WaitBlock = &Thread->WaitBlock[0];
+ do {
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor just
+ // after IRQL was raised to DISPATCH_LEVEL, but before the dispatcher
+ // database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (Thread->ApcState.KernelApcPending && (Thread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value. An APC interrupt will immediately occur which will result
+ // in the delivery of the kernel APC if possible.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+
+ } else {
+
+ //
+ // Test if the wait can be immediately satisfied.
+ //
+
+ Objectx = (PKMUTANT)Object;
+ Thread->WaitStatus = (NTSTATUS)0;
+
+ ASSERT(Objectx->Header.Type != QueueObject);
+
+ //
+ // If the object is a mutant object and the mutant object has been
+ // recursively acquired MINLONG times, then raise an exception.
+ // Otherwise if the signal state of the mutant object is greater
+ // than zero, or the current thread is the owner of the mutant
+ // object, then satisfy the wait.
+ //
+
+ if (Objectx->Header.Type == MutantObject) {
+ if ((Objectx->Header.SignalState > 0) ||
+ (Thread == Objectx->OwnerThread)) {
+ if (Objectx->Header.SignalState != MINLONG) {
+ KiWaitSatisfyMutant(Objectx, Thread);
+ WaitStatus = (NTSTATUS)(0) | Thread->WaitStatus;
+ break;
+
+ } else {
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ ExRaiseStatus(STATUS_MUTANT_LIMIT_EXCEEDED);
+ }
+ }
+
+ //
+ // If the signal state is greater than zero, then satisfy the wait.
+ //
+
+ } else if (Objectx->Header.SignalState > 0) {
+ KiWaitSatisfyOther(Objectx);
+ WaitStatus = (NTSTATUS)(0);
+ break;
+ }
+
+ //
+ // Construct a wait block for the object.
+ //
+
+ Thread->WaitBlockList = WaitBlock;
+ WaitBlock->Object = Object;
+ WaitBlock->WaitKey = (CSHORT)(STATUS_SUCCESS);
+ WaitBlock->WaitType = WaitAny;
+
+ //
+ // Test for alert pending.
+ //
+
+ TestForAlertPending(Alertable);
+
+ //
+ // The wait cannot be satisifed immediately. Check to determine if
+ // a timeout value is specified.
+ //
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // If the timeout value is zero, then return immediately without
+ // waiting.
+ //
+
+ if (!(Timeout->LowPart | Timeout->HighPart)) {
+ WaitStatus = (NTSTATUS)(STATUS_TIMEOUT);
+ break;
+ }
+
+ //
+ // Initialize a wait block for the thread specific timer, insert
+ // wait block in timer wait list, insert the timer in the timer
+ // tree.
+ //
+ // N.B. The timer wait block is initialized when the respective
+ // thread is initialized. Thus the constant fields are not
+ // reinitialized. These include the wait object, wait key,
+ // wait type, and the wait list entry link pointers.
+ //
+
+ Timer = &Thread->Timer;
+ WaitTimer = &Thread->WaitBlock[TIMER_WAIT_BLOCK];
+ WaitBlock->NextWaitBlock = WaitTimer;
+ Timer->Header.WaitListHead.Flink = &WaitTimer->WaitListEntry;
+ Timer->Header.WaitListHead.Blink = &WaitTimer->WaitListEntry;
+ WaitTimer->NextWaitBlock = WaitBlock;
+ if (KiInsertTreeTimer(Timer, *Timeout) == FALSE) {
+ WaitStatus = (NTSTATUS)STATUS_TIMEOUT;
+ break;
+ }
+
+ } else {
+ WaitBlock->NextWaitBlock = WaitBlock;
+ }
+
+ //
+ // Insert wait block in object wait list.
+ //
+
+ InsertTailList(&Objectx->Header.WaitListHead, &WaitBlock->WaitListEntry);
+
+ //
+ // If the current thread is processing a queue entry, then attempt
+ // to activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher state
+ // to Waiting, and insert the thread in the wait list.
+ //
+
+ Thread->Alertable = Alertable;
+ Thread->WaitMode = WaitMode;
+ Thread->WaitReason = WaitReason;
+ Thread->WaitTime= KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ ASSERT(KeIsExecutingDpc() == FALSE);
+ ASSERT(Thread->WaitIrql <= DISPATCH_LEVEL);
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return WaitStatus;
+ }
+
+ if (ARGUMENT_PRESENT(Timeout)) {
+
+ //
+ // Reduce the amount of time remaining before timeout occurs.
+ //
+
+ Timeout = KiComputeWaitInterval(Timer, OriginalTime, &NewTime);
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ } while (TRUE);
+
+ //
+ // The thread is alerted, a user APC should be delivered, or the wait is
+ // satisfied. Unlock dispatcher database, lower IRQL to its previous value,
+ // and return the wait status.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return WaitStatus;
+}
+
+NTSTATUS
+KiSetServerWaitClientEvent (
+ IN PKEVENT ServerEvent,
+ IN PKEVENT ClientEvent,
+ IN ULONG WaitMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function sets the specified server event and waits on specified
+ client event. The wait is performed such that an optimal switch to
+ the waiting thread occurs if possible. No timeout is associated with
+ the wait, and thus, the issuing thread will wait until the client event
+ is signaled or an APC is delivered.
+
+Arguments:
+
+ ServerEvent - Supplies a pointer to a dispatcher object of type event.
+
+ ClientEvent - Supplies a pointer to a dispatcher object of type event.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_SUCCESS is returned if
+ the specified object satisfied the wait. A value of STATUS_USER_APC is
+ returned if the wait was aborted to deliver a user APC to the current
+ thread.
+
+--*/
+
+{
+
+ PKTHREAD NextThread;
+ KPRIORITY NewPriority;
+ LONG OldState;
+ PKQUEUE Queue;
+ PKTHREAD Thread;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+
+ //
+ // Raise the IRQL to dispatch level and lock the dispatcher database.
+ //
+
+ Thread = KeGetCurrentThread();
+
+ ASSERT(Thread->WaitNext == FALSE);
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+
+ //
+ // If the client event is not in the Signaled state and the server
+ // event wait queue is not empty, then attempt to do a direct dispatch
+ // to the target thread.
+ //
+
+ if ((ClientEvent->Header.SignalState == 0) &&
+ (IsListEmpty(&ServerEvent->Header.WaitListHead) == FALSE)) {
+
+ //
+ // Get the address of the first waiting server thread.
+ //
+
+ WaitEntry = ServerEvent->Header.WaitListHead.Flink;
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ NextThread = WaitBlock->Thread;
+
+ //
+ // Remove the wait block from the event wait list and remove the
+ // target thread from the system wait list.
+ //
+
+ RemoveEntryList(&WaitBlock->WaitListEntry);
+ RemoveEntryList(&NextThread->WaitListEntry);
+
+ //
+ // If the next thread is processing a queue entry, then increment
+ // the current number of threads.
+ //
+
+ Queue = NextThread->Queue;
+ if (Queue != NULL) {
+ Queue->CurrentCount += 1;
+ }
+
+ //
+ // Attempt to switch directly to the target thread.
+ //
+
+ return KiSwitchToThread(NextThread, WrEventPair, WaitMode, ClientEvent);
+
+ } else {
+
+ //
+ // Signal the server event and test to determine if any wait can be
+ // satisfied.
+ //
+
+ OldState = ServerEvent->Header.SignalState;
+ ServerEvent->Header.SignalState = 1;
+ if ((OldState == 0) && (IsListEmpty(&ServerEvent->Header.WaitListHead) == FALSE)) {
+ KiWaitTest(ServerEvent, 1);
+ }
+
+ //
+ // Continue the event pair wait and return the wait completion status.
+ //
+ // N.B. The wait continuation routine is called with the dispatcher
+ // database locked.
+ //
+
+ return KiContinueClientWait(ClientEvent, WrEventPair, WaitMode);
+ }
+}
+
+NTSTATUS
+KiContinueClientWait (
+ IN PVOID ClientObject,
+ IN ULONG WaitReason,
+ IN ULONG WaitMode
+ )
+
+/*++
+
+Routine Description:
+
+ This function continues a wait operation that could not be completed by
+ a optimal switch from a client to a server.
+
+ N.B. This function is entered with the dispatcher database locked.
+
+Arguments:
+
+ ClientEvent - Supplies a pointer to a dispatcher object of type event
+ or semaphore.
+
+ WaitReason - Supplies the reason for the wait operation.
+
+ WaitMode - Supplies the processor mode in which the wait is to occur.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_SUCCESS is returned if
+ the specified object satisfied the wait. A value of STATUS_USER_APC is
+ returned if the wait was aborted to deliver a user APC to the current
+ thread.
+
+--*/
+
+{
+
+ PKEVENT ClientEvent;
+ PRKTHREAD NextThread;
+ PRKQUEUE Queue;
+ PRKTHREAD Thread;
+ PKWAIT_BLOCK WaitBlock;
+ NTSTATUS WaitStatus;
+
+ //
+ // Start of wait loop.
+ //
+ // Note this loop is repeated if a kernel APC is delivered in the middle
+ // of the wait or a kernel APC is pending on the first attempt through
+ // the loop.
+ //
+
+ ClientEvent = (PKEVENT)ClientObject;
+ Thread = KeGetCurrentThread();
+ WaitBlock = &Thread->WaitBlock[0];
+ do {
+
+ //
+ // Set address of wait block list in thread object.
+ //
+
+ Thread->WaitBlockList = WaitBlock;
+
+ //
+ // Test to determine if a kernel APC is pending.
+ //
+ // If a kernel APC is pending and the previous IRQL was less than
+ // APC_LEVEL, then a kernel APC was queued by another processor just
+ // after IRQL was raised to DISPATCH_LEVEL, but before the dispatcher
+ // database was locked.
+ //
+ // N.B. that this can only happen in a multiprocessor system.
+ //
+
+ if (Thread->ApcState.KernelApcPending && (Thread->WaitIrql < APC_LEVEL)) {
+
+ //
+ // Unlock the dispatcher database and lower IRQL to its previous
+ // value. An APC interrupt will immediately occur which will result
+ // in the delivery of the kernel APC if possible.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+
+ } else {
+
+ //
+ // Test if a user APC is pending.
+ //
+
+ if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending)) {
+ WaitStatus = STATUS_USER_APC;
+ break;
+ }
+
+ //
+ // Initialize the event\semaphore wait block and check to determine
+ // if the wait is already satisfied. If the wait is satisfied, then
+ // perform wait completion and return. Otherwise, put current thread
+ // in a wait state.
+ //
+
+ Thread->WaitStatus = (NTSTATUS)0;
+ WaitBlock->Object = ClientEvent;
+ WaitBlock->NextWaitBlock = WaitBlock;
+ WaitBlock->WaitKey = (CSHORT)(STATUS_SUCCESS);
+ WaitBlock->WaitType = WaitAny;
+
+ //
+ // If the signal state is not equal to zero, then satisfy the wait.
+ //
+
+ if (ClientEvent->Header.SignalState != 0) {
+ KiWaitSatisfyOther(ClientEvent);
+ WaitStatus = (NTSTATUS)(0);
+ break;
+ }
+
+ //
+ // Insert wait block in object wait list.
+ //
+
+ InsertTailList(&ClientEvent->Header.WaitListHead,
+ &WaitBlock->WaitListEntry);
+
+ //
+ // If the current thread is processing a queue entry, then attempt
+ // to activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the thread wait parameters, set the thread dispatcher state
+ // to Waiting, and insert the thread in the wait list.
+ //
+
+ Thread->Alertable = FALSE;
+ Thread->WaitMode = (KPROCESSOR_MODE)WaitMode;
+ Thread->WaitReason = (UCHAR)WaitReason;
+ Thread->WaitTime= KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to selected thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return WaitStatus;
+ }
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ } while (TRUE);
+
+ //
+ // The thread is alerted, a user APC should be delivered, or the wait is
+ // satisfied. Unlock dispatcher database, lower IRQL to its previous value,
+ // and return the wait status.
+ //
+
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ return WaitStatus;
+}
+
+PLARGE_INTEGER
+FASTCALL
+KiComputeWaitInterval (
+ IN PRKTIMER Timer,
+ IN PLARGE_INTEGER OriginalTime,
+ IN OUT PLARGE_INTEGER NewTime
+ )
+
+/*++
+
+Routine Description:
+
+ This function recomputes the wait interval after a thread has been
+ awakened to deliver a kernel APC.
+
+Arguments:
+
+ Timer - Supplies a pointer to a dispatcher object of type timer.
+
+ OriginalTime - Supplies a pointer to the original timeout value.
+
+ NewTime - Supplies a pointer to a variable that receives the
+ recomputed wait interval.
+
+Return Value:
+
+ A pointer to the new time is returned as the function value.
+
+--*/
+
+{
+
+ //
+ // If the original wait time was absolute, then return the same
+ // absolute time. Otherwise, reduce the wait time remaining before
+ // the time delay expires.
+ //
+
+ if (Timer->Header.Absolute != FALSE) {
+ return OriginalTime;
+
+ } else {
+ KiQueryInterruptTime(NewTime);
+ NewTime->QuadPart -= Timer->DueTime.QuadPart;
+ return NewTime;
+ }
+}
+
+#if !defined(_MIPS_) && !defined(_ALPHA_) && !defined(_PPC_) && !defined(_X86_)
+
+
+NTSTATUS
+KiSwitchToThread (
+ IN PKTHREAD NextThread,
+ IN ULONG WaitReason,
+ IN ULONG WaitMode,
+ IN PKEVENT WaitObject
+ )
+
+/*++
+
+Routine Description:
+
+ This function performs an optimal switch to the specified target thread
+ if possible. No timeout is associated with the wait, thus the issuing
+ thread will wait until the wait event is signaled or an APC is deliverd.
+
+ N.B. This routine is called with the dispatcher database locked.
+
+ N.B. The wait IRQL is assumed to be set for the current thread and the
+ wait status is assumed to be set for the target thread.
+
+ N.B. It is assumed that if a queue is associated with the target thread,
+ then the concurrency count has been incremented.
+
+ N.B. Control is returned from this function with the dispatcher database
+ unlocked.
+
+Arguments:
+
+ NextThread - Supplies a pointer to a dispatcher object of type thread.
+
+ WaitReason - Supplies the reason for the wait operation.
+
+ WaitMode - Supplies the processor wait mode.
+
+ WaitObject - Supplies a pointer to a dispatcher object of type event
+ or semaphore.
+
+Return Value:
+
+ The wait completion status. A value of STATUS_SUCCESS is returned if
+ the specified object satisfied the wait. A value of STATUS_USER_APC is
+ returned if the wait was aborted to deliver a user APC to the current
+ thread.
+
+--*/
+
+{
+
+ KPRIORITY NewPriority;
+ PKPRCB Prcb;
+ PKPROCESS Process;
+ ULONG Processor;
+ PKQUEUE Queue;
+ PKTHREAD Thread;
+ PKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+ PKEVENT WaitForEvent = (PKEVENT)WaitObject;
+ NTSTATUS WaitStatus;
+
+ //
+ // If the target thread's kernel stack is resident, the target
+ // thread's process is in the balance set, the target thread can
+ // can run on the current processor, and another thread has not
+ // already been selected to run on the current processor, then
+ // do a direct dispatch to the target thread bypassing all the
+ // general wait logic, thread priorities permiting.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ Process = NextThread->ApcState.Process;
+ Thread = KeGetCurrentThread();
+
+#if !defined(NT_UP)
+
+ Processor = Thread->NextProcessor;
+
+#endif
+
+ if ((NextThread->KernelStackResident != FALSE) &&
+
+#if !defined(NT_UP)
+
+ ((NextThread->Affinity & (1 << Processor)) != 0) &&
+ (Prcb->NextThread == NULL) &&
+
+#endif
+
+ (Process->State == ProcessInMemory)) {
+
+ //
+ // Compute the new thread priority and check if a direct switch
+ // to the target thread can be made.
+ //
+
+ if (Thread->Priority < LOW_REALTIME_PRIORITY) {
+ if (NextThread->Priority < LOW_REALTIME_PRIORITY) {
+
+ //
+ // Both the current and target threads run at a variable
+ // priority level. If the target thread is not running
+ // at a boosted level, then attempt to boost its priority
+ // to a level that is equal or greater than the current
+ // thread.
+ //
+
+ if (NextThread->PriorityDecrement == 0) {
+
+ //
+ // The target thread is not running at a boosted level.
+ //
+
+ NewPriority = NextThread->BasePriority + 1;
+ if (NewPriority >= Thread->Priority) {
+ if (NewPriority >= LOW_REALTIME_PRIORITY) {
+ NextThread->Priority = LOW_REALTIME_PRIORITY - 1;
+
+ } else {
+ NextThread->Priority = (SCHAR)NewPriority;
+ }
+
+ } else {
+ if (NextThread->BasePriority >= BASE_PRIORITY_THRESHOLD) {
+ NextThread->PriorityDecrement =
+ Thread->Priority - NextThread->BasePriority;
+ NextThread->DecrementCount = ROUND_TRIP_DECREMENT_COUNT;
+ NextThread->Priority = Thread->Priority;
+
+ } else {
+ NextThread->Priority = (SCHAR)NewPriority;
+ goto LongWay;
+ }
+ }
+
+ } else {
+
+ //
+ // The target thread is running at a boosted level.
+ //
+
+ NextThread->DecrementCount -= 1;
+ if (NextThread->DecrementCount == 0) {
+ NextThread->Priority = NextThread->BasePriority;
+ NextThread->PriorityDecrement = 0;
+ goto LongWay;
+ }
+
+ if (NextThread->Priority < Thread->Priority) {
+ goto LongWay;
+ }
+ }
+
+ } else {
+
+ //
+ // The current thread runs at a variable priority level
+ // and the target thread runs at a realtime priority
+ // level. A direct switch to the target thread can be
+ // made.
+ //
+
+ NextThread->Quantum = Process->ThreadQuantum;
+ }
+
+ } else {
+
+ //
+ // The current thread runs in at a realtime priority level.
+ // If the priority of the current thread is less than or
+ // equal to the priority of the target thread, then a direct
+ // switch to the target thread can be made.
+ //
+
+ if (NextThread->Priority < Thread->Priority) {
+ goto LongWay;
+ }
+
+ NextThread->Quantum = Process->ThreadQuantum;
+ }
+
+ //
+ // Set the next processor number.
+ //
+
+#if !defined(NT_UP)
+
+ NextThread->NextProcessor = (CCHAR)Processor;
+
+#endif
+
+ //
+ // Initialization the event wait block and insert the wait block
+ // in the wait for event wait list.
+ //
+
+ WaitBlock = &Thread->WaitBlock[0];
+ Thread->WaitBlockList = WaitBlock;
+ Thread->WaitStatus = (NTSTATUS)0;
+ WaitBlock->Object = WaitForEvent;
+ WaitBlock->NextWaitBlock = WaitBlock;
+ WaitBlock->WaitKey = (CSHORT)(STATUS_SUCCESS);
+ WaitBlock->WaitType = WaitAny;
+ InsertTailList(&WaitForEvent->Header.WaitListHead,
+ &WaitBlock->WaitListEntry);
+
+ //
+ // If the current thread is processing a queue entry, then attempt
+ // to activate another thread that is blocked on the queue object.
+ //
+
+ Queue = Thread->Queue;
+ Prcb->NextThread = NextThread;
+ if (Queue != NULL) {
+ KiActivateWaiterQueue(Queue);
+ }
+
+ //
+ // Set the current thread wait parameters, set the thread state
+ // to Waiting, and insert the thread in the wait list.
+ //
+ // N.B. It is not necessary to increment and decrement the wait
+ // reason count since both the server and the client have
+ // the same wait reason.
+ //
+
+ Thread->Alertable = FALSE;
+ Thread->WaitMode = (KPROCESSOR_MODE)WaitMode;
+ Thread->WaitReason = (UCHAR)WaitReason;
+ Thread->WaitTime= KiQueryLowTickCount();
+ Thread->State = Waiting;
+ KiInsertWaitList(WaitMode, Thread);
+
+ //
+ // Switch context to target thread.
+ //
+ // Control is returned at the original IRQL.
+ //
+
+ WaitStatus = KiSwapThread();
+
+ //
+ // If the thread was not awakened to deliver a kernel mode APC,
+ // then return wait status.
+ //
+
+ if (WaitStatus != STATUS_KERNEL_APC) {
+ return WaitStatus;
+ }
+
+ //
+ // Raise IRQL to DISPATCH_LEVEL and lock the dispatcher database.
+ //
+
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ goto ContinueWait;
+ }
+
+ //
+ // Ready the target thrread for execution and wait on the specified
+ // object.
+ //
+
+LongWay:
+
+ KiReadyThread(NextThread);
+
+ //
+ // Continue the wait and return the wait completion status.
+ //
+ // N.B. The wait continuation routine is called with the dispatcher
+ // database locked.
+ //
+
+ContinueWait:
+
+ return KiContinueClientWait(WaitForEvent, WaitReason, WaitMode);
+}
+
+#endif
diff --git a/private/ntos/ke/waitsup.c b/private/ntos/ke/waitsup.c
new file mode 100644
index 000000000..a9cb5f072
--- /dev/null
+++ b/private/ntos/ke/waitsup.c
@@ -0,0 +1,378 @@
+/*++
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ waitsup.c
+
+Abstract:
+
+ This module contains the support routines necessary to support the
+ generic kernel wait functions. Functions are provided to test if a
+ wait can be satisfied, to satisfy a wait, and to unwwait a thread.
+
+Author:
+
+ David N. Cutler (davec) 24-Mar-1989
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define time critical priority class base.
+//
+
+#define TIME_CRITICAL_PRIORITY_BOUND 14
+
+VOID
+FASTCALL
+KiUnwaitThread (
+ IN PRKTHREAD Thread,
+ IN NTSTATUS WaitStatus,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function unwaits a thread, sets the thread's wait completion status,
+ calculates the thread's new priority, and readies the thread for execution.
+
+Arguments:
+
+ Thread - Supplies a pointer to a dispatcher object of type thread.
+
+ WaitStatus - Supplies the wait completion status.
+
+ Increment - Supplies the priority increment that is to be applied to
+ the thread's priority.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KPRIORITY NewPriority;
+ PKPROCESS Process;
+ PKQUEUE Queue;
+ PKTIMER Timer;
+ PRKWAIT_BLOCK WaitBlock;
+
+ //
+ // Set wait completion status, remove wait blocks from object wait
+ // lists, and remove thread from wait list.
+ //
+
+ Thread->WaitStatus |= WaitStatus;
+ WaitBlock = Thread->WaitBlockList;
+ do {
+ RemoveEntryList(&WaitBlock->WaitListEntry);
+ WaitBlock = WaitBlock->NextWaitBlock;
+ } while (WaitBlock != Thread->WaitBlockList);
+
+ RemoveEntryList(&Thread->WaitListEntry);
+
+ //
+ // If thread timer is still active, then cancel thread timer.
+ //
+
+ Timer = &Thread->Timer;
+ if (Timer->Header.Inserted != FALSE) {
+ KiRemoveTreeTimer(Timer);
+ }
+
+ //
+ // If the thread is processing a queue entry, then increment the
+ // count of currently active threads.
+ //
+
+ Queue = Thread->Queue;
+ if (Queue != NULL) {
+ Queue->CurrentCount += 1;
+ }
+
+ //
+ // If the thread runs at a realtime priority level, then reset the
+ // thread quantum. Otherwise, compute the next thread priority and
+ // charge the thread for the wait operation.
+ //
+
+ Process = Thread->ApcState.Process;
+ if (Thread->Priority < LOW_REALTIME_PRIORITY) {
+ if ((Thread->PriorityDecrement == 0) &&
+ (Thread->DisableBoost == FALSE)) {
+ NewPriority = Thread->BasePriority + Increment;
+ if (((PEPROCESS)Process)->Vm.MemoryPriority == MEMORY_PRIORITY_FOREGROUND) {
+ NewPriority += PsPrioritySeperation;
+ }
+
+ if (NewPriority > Thread->Priority) {
+ if (NewPriority >= LOW_REALTIME_PRIORITY) {
+ Thread->Priority = LOW_REALTIME_PRIORITY - 1;
+
+ } else {
+ Thread->Priority = (SCHAR)NewPriority;
+ }
+ }
+ }
+
+ if (Thread->BasePriority >= TIME_CRITICAL_PRIORITY_BOUND) {
+ Thread->Quantum = Process->ThreadQuantum;
+
+ } else {
+ Thread->Quantum -= WAIT_QUANTUM_DECREMENT;
+ if (Thread->Quantum <= 0) {
+ Thread->Quantum = Process->ThreadQuantum;
+ Thread->Priority -= (Thread->PriorityDecrement + 1);
+ if (Thread->Priority < Thread->BasePriority) {
+ Thread->Priority = Thread->BasePriority;
+ }
+
+ Thread->PriorityDecrement = 0;
+ }
+ }
+
+ } else {
+ Thread->Quantum = Process->ThreadQuantum;
+ }
+
+ //
+ // Reready the thread for execution.
+ //
+
+ KiReadyThread(Thread);
+ return;
+}
+
+VOID
+KeBoostCurrentThread(
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function boosts the priority of the current thread for one quantum,
+ then reduce the thread priority to the base priority of the thread.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PKTHREAD Thread;
+
+ //
+ // Get current thread address, raise IRQL to synchronization level, and
+ // lock the dispatcher database
+ //
+
+ Thread = KeGetCurrentThread();
+
+redoboost:
+ KiLockDispatcherDatabase(&OldIrql);
+
+ //
+ // If a priority boost is not already active for the current thread
+ // and the thread priority is less than 14, then boost the thread
+ // priority to 14 and give the thread a large quantum. Otherwise,
+ // if a priority boost is active, then decrement the round trip
+ // count. If the count goes to zero, then release the dispatcher
+ // database lock, lower the thread priority to the base priority,
+ // and then attempt to boost the priority again. This will give
+ // other threads a chance to run. If the count does not reach zero,
+ // then give the thread another large qunatum.
+ //
+ // If the thread priority is above 14, then no boost is applied.
+ //
+
+ if ((Thread->PriorityDecrement == 0) && (Thread->Priority < 14)) {
+ Thread->PriorityDecrement = 14 - Thread->BasePriority;
+ Thread->DecrementCount = ROUND_TRIP_DECREMENT_COUNT;
+ Thread->Priority = 14;
+ Thread->Quantum = Thread->ApcState.Process->ThreadQuantum * 2;
+
+ } else if (Thread->PriorityDecrement != 0) {
+ Thread->DecrementCount -= 1;
+ if (Thread->DecrementCount == 0) {
+ KiUnlockDispatcherDatabase(OldIrql);
+ KeSetPriorityThread(Thread, Thread->BasePriority);
+ goto redoboost;
+
+ } else {
+ Thread->Quantum = Thread->ApcState.Process->ThreadQuantum * 2;
+ }
+ }
+
+ KiUnlockDispatcherDatabase(OldIrql);
+ return;
+}
+
+VOID
+FASTCALL
+KiWaitSatisfyAll (
+ IN PRKWAIT_BLOCK WaitBlock
+ )
+
+/*++
+
+Routine Description:
+
+ This function satisfies a wait all and performs any side effects that
+ are necessary.
+
+Arguments:
+
+ WaitBlock - Supplies a pointer to a wait block.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKMUTANT Object;
+ PRKTHREAD Thread;
+ PRKWAIT_BLOCK WaitBlock1;
+
+ //
+ // If the wait type was WaitAny, then perform neccessary side effects on
+ // the object specified by the wait block. Else perform necessary side
+ // effects on all the objects that were involved in the wait operation.
+ //
+
+ WaitBlock1 = WaitBlock;
+ Thread = WaitBlock1->Thread;
+ do {
+ if (WaitBlock1->WaitKey != (CSHORT)STATUS_TIMEOUT) {
+ Object = (PKMUTANT)WaitBlock1->Object;
+ KiWaitSatisfyAny(Object, Thread);
+ }
+
+ WaitBlock1 = WaitBlock1->NextWaitBlock;
+ } while (WaitBlock1 != WaitBlock);
+
+ return;
+}
+
+VOID
+FASTCALL
+KiWaitTest (
+ IN PVOID Object,
+ IN KPRIORITY Increment
+ )
+
+/*++
+
+Routine Description:
+
+ This function tests if a wait can be satisfied when an object attains
+ a state of signaled. If a wait can be satisfied, then the subject thread
+ is unwaited with a completion status that is the WaitKey of the wait
+ block from the object wait list. As many waits as possible are satisfied.
+
+Arguments:
+
+ Object - Supplies a pointer to a dispatcher object.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKEVENT Event;
+ PLIST_ENTRY ListHead;
+ PRKWAIT_BLOCK NextBlock;
+ PKMUTANT Mutant;
+ PRKTHREAD Thread;
+ PRKWAIT_BLOCK WaitBlock;
+ PLIST_ENTRY WaitEntry;
+
+ //
+ // As long as the signal state of the specified object is Signaled and
+ // there are waiters in the object wait list, then try to satisfy a wait.
+ //
+
+ Event = (PKEVENT)Object;
+ ListHead = &Event->Header.WaitListHead;
+ WaitEntry = ListHead->Flink;
+ while ((Event->Header.SignalState > 0) &&
+ (WaitEntry != ListHead)) {
+ WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry);
+ Thread = WaitBlock->Thread;
+ if (WaitBlock->WaitType != WaitAny) {
+
+ //
+ // The wait type is wait all - if all the objects are in
+ // a Signaled state, then satisfy the wait.
+ //
+
+ NextBlock = WaitBlock->NextWaitBlock;
+ while (NextBlock != WaitBlock) {
+ if (NextBlock->WaitKey != (CSHORT)(STATUS_TIMEOUT)) {
+ Mutant = (PKMUTANT)NextBlock->Object;
+ if ((Mutant->Header.Type == MutantObject) &&
+ (Mutant->Header.SignalState <= 0) &&
+ (Thread != Mutant->OwnerThread)) {
+ goto scan;
+
+ } else if (Mutant->Header.SignalState <= 0) {
+ goto scan;
+ }
+ }
+
+ NextBlock = NextBlock->NextWaitBlock;
+ }
+
+ //
+ // All objects associated with the wait are in the Signaled
+ // state - satisfy the wait.
+ //
+
+ WaitEntry = WaitEntry->Blink;
+ KiWaitSatisfyAll(WaitBlock);
+
+ } else {
+
+ //
+ // The wait type is wait any - satisfy the wait.
+ //
+
+ WaitEntry = WaitEntry->Blink;
+ KiWaitSatisfyAny((PKMUTANT)Event, Thread);
+ }
+
+ KiUnwaitThread(Thread, (NTSTATUS)WaitBlock->WaitKey, Increment);
+
+ scan:
+ WaitEntry = WaitEntry->Flink;
+ }
+
+ return;
+}
diff --git a/private/ntos/ke/xipi.c b/private/ntos/ke/xipi.c
new file mode 100644
index 000000000..b1d517119
--- /dev/null
+++ b/private/ntos/ke/xipi.c
@@ -0,0 +1,244 @@
+/*++
+
+Copyright (c) 1993-1995 Microsoft Corporation
+
+Module Name:
+
+ xipi.c
+
+Abstract:
+
+ This module implements portable interprocessor interrup routines.
+
+Author:
+
+ David N. Cutler (davec) 24-Apr-1993
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+//
+// Define forward reference function prototypes.
+//
+
+VOID
+KiIpiGenericCallTarget (
+ IN PULONG SignalDone,
+ IN PVOID BroadcastFunction,
+ IN PVOID Context,
+ IN PVOID Parameter3
+ );
+
+ULONG
+KiIpiGenericCall (
+ IN PKIPI_BROADCAST_WORKER BroadcastFunction,
+ IN ULONG Context
+ )
+
+/*++
+
+Routine Description:
+
+ This function executes the specified function on every processor in
+ the host configuration in a synchronous manner, i.e., the function
+ is executed on each target in series with the execution of the source
+ processor.
+
+Arguments:
+
+ BroadcastFunction - Supplies the address of function that is executed
+ on each of the target processors.
+
+ Context - Supplies the value of the context parameter that is passed
+ to each function.
+
+Return Value:
+
+ The value returned by the specified function on the source processor
+ is returned as the function value.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ ULONG Status;
+ KAFFINITY TargetProcessors;
+
+ //
+ // Raise IRQL to the higher of the current level and synchronization
+ // level to avoid a possible context switch.
+ //
+
+ KeRaiseIrql((KIRQL)(max(KiSynchIrql, KeGetCurrentIrql())), &OldIrql);
+
+ //
+ // Initialize the broadcast packet, compute the set of target processors,
+ // and sent the packet to the target processors for execution.
+ //
+
+#if !defined(NT_UP)
+
+ TargetProcessors = KeActiveProcessors & ~KeGetCurrentPrcb()->SetMember;
+ if (TargetProcessors != 0) {
+ KiIpiSendPacket(TargetProcessors,
+ KiIpiGenericCallTarget,
+ (PVOID)BroadcastFunction,
+ (PVOID)Context,
+ NULL);
+ }
+
+#endif
+
+ //
+ // Execute function of source processor and capture return status.
+ //
+
+ Status = BroadcastFunction(Context);
+
+ //
+ // Wait until all of the target processors have finished capturing the
+ // function parameters.
+ //
+
+#if !defined(NT_UP)
+
+ if (TargetProcessors != 0) {
+ KiIpiStallOnPacketTargets();
+ }
+
+#endif
+
+ //
+ // Lower IRQL to its previous level and return the function execution
+ // status.
+ //
+
+ KeLowerIrql(OldIrql);
+ return Status;
+}
+
+#if !defined(NT_UP)
+
+
+VOID
+KiIpiGenericCallTarget (
+ IN PULONG SignalDone,
+ IN PVOID BroadcastFunction,
+ IN PVOID Context,
+ IN PVOID Parameter3
+ )
+
+/*++
+
+Routine Description:
+
+ This function is the target jacket function to execute a broadcast
+ function on a set of target processors. The broadcast packet address
+ is obtained, the specified parameters are captured, the broadcast
+ packet address is cleared to signal the source processor to continue,
+ and the specified function is executed.
+
+Arguments:
+
+ SignalDone Supplies a pointer to a variable that is cleared when the
+ requested operation has been performed.
+
+ BroadcastFunction - Supplies the address of function that is executed
+ on each of the target processors.
+
+ Context - Supplies the value of the context parameter that is passed
+ to each function.
+
+ Parameter3 - Not used.
+
+Return Value:
+
+ None
+
+--*/
+
+{
+
+ //
+ // Execute the specified function.
+ //
+
+ ((PKIPI_BROADCAST_WORKER)(BroadcastFunction))((ULONG)Context);
+ KiIpiSignalPacketDone(SignalDone);
+ return;
+}
+
+
+
+VOID
+KiIpiStallOnPacketTargets (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function waits until the specified set of processors have signaled
+ their completion of a requested function.
+
+ N.B. The exact protocol used between the source and the target of an
+ interprocessor request is not specified. Minimally the source
+ must construct an appropriate packet and send the packet to a set
+ of specified targets. Each target receives the address of the packet
+ address as an argument, and minimally must clear the packet address
+ when the mutually agreed upon protocol allows. The target has three
+ options:
+
+ 1. Capture necessary information, release the source by clearing
+ the packet address, execute the request in parallel with the
+ source, and return from the interrupt.
+
+ 2. Execute the request in series with the source, release the
+ source by clearing the packet address, and return from the
+ interrupt.
+
+ 3. Execute the request in series with the source, release the
+ source, wait for a reply from the source based on a packet
+ parameter, and return from the interrupt.
+
+ This function is provided to enable the source to synchronize with the
+ target for cases 2 and 3 above.
+
+ N.B. There is no support for method 3 above.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ PKPRCB Prcb;
+
+ //
+ // Wait until the target set of processors is zero in the current
+ // processor's packet.
+ //
+
+ Prcb = KeGetCurrentPrcb();
+ do {
+ } while (Prcb->TargetSet != 0);
+
+ return;
+}
+
+#endif
diff --git a/private/ntos/ke/yield.c b/private/ntos/ke/yield.c
new file mode 100644
index 000000000..7e5803d8d
--- /dev/null
+++ b/private/ntos/ke/yield.c
@@ -0,0 +1,118 @@
+/*++
+
+Copyright (c) 1996 Microsoft Corporation
+
+Module Name:
+
+ yield.c
+
+Abstract:
+
+ This module implements the function to yield execution for one quantum
+ to any other runnable thread.
+
+Author:
+
+ David N. Cutler (davec) 15-Mar-1996
+
+Environment:
+
+ Kernel mode only.
+
+Revision History:
+
+--*/
+
+#include "ki.h"
+
+NTSTATUS
+NtYieldExecution (
+ VOID
+ )
+
+/*++
+
+Routine Description:
+
+ This function yields execution to any ready thread for up to one
+ quantum.
+
+Arguments:
+
+ None.
+
+Return Value:
+
+ None.
+
+--*/
+
+{
+
+ KIRQL OldIrql;
+ PRKPRCB Prcb;
+ KPRIORITY Priority;
+ NTSTATUS Status;
+ PRKTHREAD Thread;
+
+ //
+ // If any other threads are ready, then attempt to yield execution.
+ //
+
+ Status = STATUS_NO_YIELD_PERFORMED;
+ if (KiReadySummary != 0) {
+
+ //
+ // If a thread has not already been selected for execution, then
+ // attempt to select another thread for execution.
+ //
+
+ Thread = KeGetCurrentThread();
+ KiLockDispatcherDatabase(&Thread->WaitIrql);
+ Prcb = KeGetCurrentPrcb();
+ if (Prcb->NextThread == NULL) {
+ Prcb->NextThread = KiFindReadyThread(Thread->NextProcessor, 1);
+ }
+
+ //
+ // If a new thread has been selected for execution, then switch
+ // immediately to the selected thread.
+ //
+
+ if (Prcb->NextThread != NULL) {
+
+ //
+ // Give the current thread a new quantum, simulate a quantum
+ // end, insert the current thread in the appropriate ready list,
+ // and switch context to selected thread.
+ //
+
+ Thread->Quantum = Thread->ApcState.Process->ThreadQuantum;
+ Thread->State = Ready;
+ Priority = Thread->Priority;
+ if (Priority < LOW_REALTIME_PRIORITY) {
+ Priority = Priority - Thread->PriorityDecrement - 1;
+ if (Priority < Thread->BasePriority) {
+ Priority = Thread->BasePriority;
+ }
+
+ Thread->PriorityDecrement = 0;
+
+ }
+
+ Thread->Priority = (SCHAR)Priority;
+
+ InsertTailList(&KiDispatcherReadyListHead[Priority],
+ &Thread->WaitListEntry);
+
+ SetMember(Priority, KiReadySummary);
+ KiSwapThread();
+ Status = STATUS_SUCCESS;
+
+ } else {
+ KiUnlockDispatcherDatabase(Thread->WaitIrql);
+ }
+ }
+
+ return Status;
+}