File: cpu.cc

package info (click to toggle)
bochs 2.3-2etch1
  • links: PTS
  • area: main
  • in suites: etch
  • size: 14,116 kB
  • ctags: 16,927
  • sloc: cpp: 130,524; ansic: 18,822; sh: 7,922; makefile: 3,836; yacc: 1,056; asm: 463; perl: 381; lex: 280; csh: 3
file content (988 lines) | stat: -rw-r--r-- 32,158 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.cc,v 1.165 2006/06/25 21:44:46 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
//  Copyright (C) 2001  MandrakeSoft S.A.
//
//    MandrakeSoft S.A.
//    43, rue d'Aboukir
//    75002 Paris - France
//    http://www.linux-mandrake.com/
//    http://www.mandrakesoft.com/
//
//  This library is free software; you can redistribute it and/or
//  modify it under the terms of the GNU Lesser General Public
//  License as published by the Free Software Foundation; either
//  version 2 of the License, or (at your option) any later version.
//
//  This library is distributed in the hope that it will be useful,
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
//  Lesser General Public License for more details.
//
//  You should have received a copy of the GNU Lesser General Public
//  License along with this library; if not, write to the Free Software
//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA


#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR

#include "iodev/iodev.h"

#if BX_EXTERNAL_DEBUGGER
#include "extdb.h"
#endif

#if BX_PROVIDE_CPU_MEMORY
#if BX_ADDRESS_SPACES==1
BOCHSAPI BX_MEM_C bx_mem;
#else
BOCHSAPI BX_MEM_C bx_mem_array[BX_ADDRESS_SPACES];
#endif
#endif

#if BX_SUPPORT_ICACHE

bxPageWriteStampTable pageWriteStampTable;

void purgeICaches(void)
{
#if BX_SUPPORT_SMP
  for (unsigned i=0; i<BX_SMP_PROCESSORS; i++)
    BX_CPU(i)->iCache.purgeICacheEntries();
#else
  BX_CPU(0)->iCache.purgeICacheEntries();
#endif

  pageWriteStampTable.purgeWriteStamps();
}

void flushICaches(void)
{
#if BX_SUPPORT_SMP
  for (unsigned i=0; i<BX_SMP_PROCESSORS; i++)
    BX_CPU(i)->iCache.flushICacheEntries();
#else
  BX_CPU(0)->iCache.flushICacheEntries();
#endif

  pageWriteStampTable.resetWriteStamps();
}

#define InstrumentICACHE 0

#if InstrumentICACHE
static unsigned iCacheLookups=0;
static unsigned iCacheMisses=0;

#define InstrICache_StatsMask 0xffffff

#define InstrICache_Stats() {\
  if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
    BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
          iCacheLookups, \
          iCacheMisses,  \
          (iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
    iCacheLookups = iCacheMisses = 0; \
  } \
}
#define InstrICache_Increment(v) (v)++

#else
#define InstrICache_Stats()
#define InstrICache_Increment(v)
#endif

#endif // BX_SUPPORT_ICACHE

// notes:

// The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
// instructions and then return so that the other processors have a chance to
// run.  This is used only when simulating multiple processors.
// 
// If maximum instructions have been executed, return. The zero-count
// means run forever.
#define CHECK_MAX_INSTRUCTIONS(count) \
  if (count > 0) {                    \
    count--;                          \
    if (count == 0) return;           \
  }

#if BX_SUPPORT_SMP
#  define BX_TICK1_IF_SINGLE_PROCESSOR()
#else
#  define BX_TICK1_IF_SINGLE_PROCESSOR() BX_TICK1()
#endif

// Make code more tidy with a few macros.
#if BX_SUPPORT_X86_64==0
#define RIP EIP
#define RSP ESP
#endif

BX_CPP_INLINE bxInstruction_c* BX_CPU_C::fetchInstruction(bxInstruction_c *iStorage, bx_address eipBiased)
{
  unsigned ret;
  bxInstruction_c *i = iStorage;

#if BX_SUPPORT_ICACHE
  bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrA20Page + eipBiased;
  unsigned iCacheHash = BX_CPU_THIS_PTR iCache.hash(pAddr);
  bxICacheEntry_c *cache_entry = &(BX_CPU_THIS_PTR iCache.entry[iCacheHash]);
  i = &(cache_entry->i);

  Bit32u pageWriteStamp = *(BX_CPU_THIS_PTR currPageWriteStampPtr);

  InstrICache_Increment(iCacheLookups);
  InstrICache_Stats();

  if ((cache_entry->pAddr == pAddr) &&
      (cache_entry->writeStamp == pageWriteStamp))
  {
    // iCache hit. Instruction is already decoded and stored in the
    // instruction cache.
#if BX_INSTRUMENTATION
    // An instruction was found in the iCache.
    BX_INSTR_OPCODE(BX_CPU_ID, BX_CPU_THIS_PTR eipFetchPtr + eipBiased,
       i->ilen(), BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
#endif
    return i;
  }
#endif

  // iCache miss. No validated instruction with matching fetch parameters
  // is in the iCache. Or we're not compiling iCache support in, in which
  // case we always have an iCache miss.  :^)
  bx_address remainingInPage = (BX_CPU_THIS_PTR eipPageWindowSize - eipBiased);
  unsigned maxFetch = 15;
  if (remainingInPage < 15) maxFetch = remainingInPage;
  Bit8u *fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;

#if BX_SUPPORT_ICACHE
  // The entry will be marked valid if fetchdecode will succeed
  cache_entry->writeStamp = ICacheWriteStampInvalid;
  InstrICache_Increment(iCacheMisses);
#endif

#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
    ret = fetchDecode64(fetchPtr, i, maxFetch);
  else
#endif
    ret = fetchDecode(fetchPtr, i, maxFetch);

  if (ret==0) {
    // return iStorage and leave icache entry invalid (do not cache instr)
    boundaryFetch(fetchPtr, remainingInPage, iStorage);
    return iStorage;
  }
  else
  {
#if BX_SUPPORT_ICACHE
    cache_entry->pAddr = pAddr;
    cache_entry->writeStamp = pageWriteStamp;
#endif
#if BX_INSTRUMENTATION
    // An instruction was either fetched, or found in the iCache.
    BX_INSTR_OPCODE(BX_CPU_ID, fetchPtr, i->ilen(),
         BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
#endif
  }

  return i;
}

void BX_CPU_C::cpu_loop(Bit32u max_instr_count)
{
  bxInstruction_c iStorage BX_CPP_AlignN(32);

#if BX_DEBUGGER
  BX_CPU_THIS_PTR break_point = 0;
#if BX_MAGIC_BREAKPOINT
  BX_CPU_THIS_PTR magic_break = 0;
#endif
  BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
#endif

  if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) 
  { 
    // only from exception function can we get here ...
    BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
#if BX_GDBSTUB
    if (bx_dbg.gdbstub_enabled) {
      return;
    }
#endif
  }

#if BX_DEBUGGER
  // If the exception() routine has encountered a nasty fault scenario,
  // the debugger may request that control is returned to it so that
  // the situation may be examined.
  if (bx_guard.interrupt_requested) {
    BX_ERROR(("CPU_LOOP bx_guard.interrupt_requested=%d", bx_guard.interrupt_requested));
    return;
  }
#endif

  // We get here either by a normal function call, or by a longjmp
  // back from an exception() call.  In either case, commit the
  // new EIP/ESP, and set up other environmental fields.  This code
  // mirrors similar code below, after the interrupt() call.
  BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
  BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
  BX_CPU_THIS_PTR EXT = 0;
  BX_CPU_THIS_PTR errorno = 0;

  while (1) {

    // First check on events which occurred for previous instructions
    // (traps) and ones which are asynchronous to the CPU
    // (hardware interrupts).
    if (BX_CPU_THIS_PTR async_event) {
      if (handleAsyncEvent()) {
        // If request to return to caller ASAP.
        return;
      }
    }

    bx_address eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;

    if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
      prefetch();
      eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
    }

    // fetch and decode next instruction
    bxInstruction_c *i = fetchInstruction(&iStorage, eipBiased);
    BxExecutePtr_tR resolveModRM = i->ResolveModrm; // Get as soon as possible for speculation
    BxExecutePtr_t execute = i->execute; // fetch as soon as possible for speculation
    if (resolveModRM)
      BX_CPU_CALL_METHODR(resolveModRM, (i));

    // An instruction will have been fetched using either the normal case,
    // or the boundary fetch (across pages), by this point.
    BX_INSTR_FETCH_DECODE_COMPLETED(BX_CPU_ID, i);

#if BX_DEBUGGER
    if(dbg_check_begin_instr_bpoint()) return;
#endif

#if BX_EXTERNAL_DEBUGGER
    bx_external_debugger(BX_CPU_THIS);
#endif

#if BX_DISASM
    if (BX_CPU_THIS_PTR trace) {
      // print the instruction that is about to be executed
#if BX_DEBUGGER
      bx_dbg_disassemble_current(BX_CPU_ID, 1); // only one cpu, print time stamp
#else
      debug_disasm_instruction(BX_CPU_THIS_PTR prev_eip);
#endif
    }
#endif

    // decoding instruction compeleted -> continue with execution
    BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
    RIP += i->ilen();

    if ( !(i->repUsedL() && i->repeatableL()) ) {
      // non repeating instruction
      BX_CPU_CALL_METHOD(execute, (i));
    }
    else {

repeat_loop:

      if (i->repeatableZFL()) {
#if BX_SUPPORT_X86_64
        if (i->as64L()) {
          if (RCX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            RCX --;
          }
          if ((i->repUsedValue()==3) && (get_ZF()==0)) goto debugger_check;
          if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto debugger_check;
          if (RCX == 0) goto debugger_check;
        }
        else
#endif
        if (i->as32L()) {
          if (ECX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            ECX --;
          }
          if ((i->repUsedValue()==3) && (get_ZF()==0)) goto debugger_check;
          if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto debugger_check;
          if (ECX == 0) goto debugger_check;
        }
        else {
          if (CX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            CX --;
          }
          if ((i->repUsedValue()==3) && (get_ZF()==0)) goto debugger_check;
          if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto debugger_check;
          if (CX == 0) goto debugger_check;
        }
      }
      else { // normal repeat, no concern for ZF
#if BX_SUPPORT_X86_64
        if (i->as64L()) {
          if (RCX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            RCX --;
          }
          if (RCX == 0) goto debugger_check;
        }
        else
#endif
        if (i->as32L()) {
          if (ECX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            ECX --;
          }
          if (ECX == 0) goto debugger_check;
        }
        else { // 16bit addrsize
          if (CX != 0) {
            BX_CPU_CALL_METHOD(execute, (i));
            BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
            CX --;
          }
          if (CX == 0) goto debugger_check;
        }
      }

      BX_TICK1_IF_SINGLE_PROCESSOR();
#if BX_DEBUGGER == 0
      // when debugger is not enabled, directly jump to next iteration
      if (! BX_CPU_THIS_PTR async_event) goto repeat_loop;
#endif
//    invalidate_prefetch_q(); // why do we need invalidate_prefetch_q ?
      RIP = BX_CPU_THIS_PTR prev_eip; // repeat loop not done, restore RIP
      goto debugger_check;
    }

debugger_check:
    BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
    BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
    BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
    BX_TICK1_IF_SINGLE_PROCESSOR();

    // inform instrumentation about new instruction
    BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);

#if BX_DEBUGGER
    // note instr generating exceptions never reach this point.
    if (dbg_check_end_instr_bpoint()) return;
#endif

#if BX_GDBSTUB
    if (bx_dbg.gdbstub_enabled) {
      unsigned reason = bx_gdbstub_check(EIP);
      if (reason != GDBSTUB_STOP_NO_REASON) return;
    }
#endif

#if BX_SUPPORT_SMP || BX_DEBUGGER
    // The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
    // instructions and then return so that the other processors have a chance
    // to run. This is used only when simulating multiple processors. If only
    // one processor, don't waste any cycles on it!
    CHECK_MAX_INSTRUCTIONS(max_instr_count);
#endif

  }  // while (1)
}

unsigned BX_CPU_C::handleAsyncEvent(void)
{
  //
  // This area is where we process special conditions and events.
  //
  if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_HALT_STATE) {
    // I made up the bitmask above to mean HALT state.
#if BX_SUPPORT_SMP == 0
    // for one processor, pass the time as quickly as possible until
    // an interrupt wakes up the CPU.
#if BX_DEBUGGER
    while (bx_guard.interrupt_requested != 1)
#else
    while (1)
#endif
    {
      if ((BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) || 
           BX_CPU_THIS_PTR nmi_pending || BX_CPU_THIS_PTR smi_pending)
      {
        // interrupt ends the HALT condition
        BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
        BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
        break;
      }
      if ((BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_HALT_STATE) == 0) {
        BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
        break;
      }
      BX_TICK1();
    }
#else   /* BX_SUPPORT_SMP */
    // for multiprocessor simulation, even if this CPU is halted we still
    // must give the others a chance to simulate.  If an interrupt has 
    // arrived, then clear the HALT condition; otherwise just return from
    // the CPU loop with stop_reason STOP_CPU_HALTED.
    if ((BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) || 
         BX_CPU_THIS_PTR nmi_pending || BX_CPU_THIS_PTR smi_pending)
    {
      // interrupt ends the HALT condition
      BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
      BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
    } else {
      // HALT condition remains, return so other CPUs have a chance
#if BX_DEBUGGER
      BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
#endif
      return 1; // Return to caller of cpu_loop.
    }
#endif
  } else if (bx_pc_system.kill_bochs_request) {
    // setting kill_bochs_request causes the cpu loop to return ASAP.
    return 1; // Return to caller of cpu_loop.
  }

  // Priority 1: Hardware Reset and Machine Checks
  //   RESET
  //   Machine Check
  // (bochs doesn't support these)

  // Priority 2: Trap on Task Switch
  //   T flag in TSS is set
  if (BX_CPU_THIS_PTR debug_trap & 0x00008000) {
    BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
    exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
  }

  // Priority 3: External Hardware Interventions
  //   FLUSH
  //   STOPCLK
  //   SMI
  //   INIT
  // (bochs doesn't support these)
  if (BX_CPU_THIS_PTR smi_pending && ! BX_CPU_THIS_PTR smm_mode())
  {
    // clear SMI pending flag and disable NMI when SMM was accepted
    BX_CPU_THIS_PTR smi_pending = 0;
    BX_CPU_THIS_PTR nmi_disable = 1;
    enter_system_management_mode();
  }

  // Priority 4: Traps on Previous Instruction
  //   Breakpoints
  //   Debug Trap Exceptions (TF flag set or data/IO breakpoint)
  if (BX_CPU_THIS_PTR debug_trap &&
       !(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG))
  {
    // A trap may be inhibited on this boundary due to an instruction
    // which loaded SS.  If so we clear the inhibit_mask below
    // and don't execute this code until the next boundary.
    // Commit debug events to DR6
    BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
    exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
  }

  // Priority 5: External Interrupts
  //   NMI Interrupts
  //   Maskable Hardware Interrupts
  if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS) {
    // Processing external interrupts is inhibited on this
    // boundary because of certain instructions like STI.
    // inhibit_mask is cleared below, in which case we will have
    // an opportunity to check interrupts on the next instruction
    // boundary.
  }
  else if (BX_CPU_THIS_PTR nmi_pending) {
    BX_CPU_THIS_PTR nmi_pending = 0;
    BX_CPU_THIS_PTR nmi_disable = 1;
    BX_CPU_THIS_PTR errorno = 0;
    BX_CPU_THIS_PTR EXT = 1; /* external event */
    BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
    interrupt(2, 0, 0, 0);
  }
  else if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF() && BX_DBG_ASYNC_INTR)
  {
    Bit8u vector;

    // NOTE: similar code in ::take_irq()
#if BX_SUPPORT_APIC
    if (BX_CPU_THIS_PTR local_apic.INTR)
      vector = BX_CPU_THIS_PTR local_apic.acknowledge_int();
    else
      vector = DEV_pic_iac(); // may set INTR with next interrupt
#else
    // if no local APIC, always acknowledge the PIC.
    vector = DEV_pic_iac(); // may set INTR with next interrupt
#endif
    BX_CPU_THIS_PTR errorno = 0;
    BX_CPU_THIS_PTR EXT = 1; /* external event */
    BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
    interrupt(vector, 0, 0, 0);
    // Set up environment, as would be when this main cpu loop gets
    // invoked.  At the end of normal instructions, we always commmit
    // the new EIP/ESP values.  But here, we call interrupt() much like
    // it was a sofware interrupt instruction, and need to effect the
    // commit here.  This code mirrors similar code above.
    BX_CPU_THIS_PTR prev_eip = RIP; // commit new RIP
    BX_CPU_THIS_PTR prev_esp = RSP; // commit new RSP
    BX_CPU_THIS_PTR EXT = 0;
    BX_CPU_THIS_PTR errorno = 0;
  }
  else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
    // NOTE: similar code in ::take_dma()
    // assert Hold Acknowledge (HLDA) and go into a bus hold state
    DEV_dma_raise_hlda();
  }

  // Priority 6: Faults from fetching next instruction
  //   Code breakpoint fault
  //   Code segment limit violation (priority 7 on 486/Pentium)
  //   Code page fault (priority 7 on 486/Pentium)
  // (handled in main decode loop)

  // Priority 7: Faults from decoding next instruction
  //   Instruction length > 15 bytes
  //   Illegal opcode
  //   Coprocessor not available
  // (handled in main decode loop etc)

  // Priority 8: Faults on executing an instruction
  //   Floating point execution
  //   Overflow
  //   Bound error
  //   Invalid TSS
  //   Segment not present
  //   Stack fault
  //   General protection
  //   Data page fault
  //   Alignment check
  // (handled by rest of the code)

  if (BX_CPU_THIS_PTR get_TF())
  {
    // TF is set before execution of next instruction.  Schedule
    // a debug trap (#DB) after execution.  After completion of
    // next instruction, the code above will invoke the trap.
    BX_CPU_THIS_PTR debug_trap |= 0x00004000; // BS flag in DR6
  }

  // Now we can handle things which are synchronous to instruction
  // execution.
  if (BX_CPU_THIS_PTR get_RF()) {
    BX_CPU_THIS_PTR clear_RF();
  }
#if BX_X86_DEBUGGER
  else {
    // only bother comparing if any breakpoints enabled
    if (BX_CPU_THIS_PTR dr7 & 0x000000ff) {
      bx_address iaddr = BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS) + BX_CPU_THIS_PTR prev_eip;
      Bit32u dr6_bits = hwdebug_compare(iaddr, 1, BX_HWDebugInstruction, BX_HWDebugInstruction);
      if (dr6_bits)
      {
        // Add to the list of debug events thus far.
        BX_CPU_THIS_PTR async_event = 1;
        BX_CPU_THIS_PTR debug_trap |= dr6_bits;
        // If debug events are not inhibited on this boundary,
        // fire off a debug fault.  Otherwise handle it on the next
        // boundary. (becomes a trap)
        if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG)) {
          // Commit debug events to DR6
          BX_CPU_THIS_PTR dr6 = BX_CPU_THIS_PTR debug_trap;
          exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
        }
      }
    }
  }
#endif

  // We have ignored processing of external interrupts and
  // debug events on this boundary.  Reset the mask so they
  // will be processed on the next boundary.
  BX_CPU_THIS_PTR inhibit_mask = 0;

  if ( !(BX_CPU_INTR ||
         BX_CPU_THIS_PTR debug_trap ||
         BX_HRQ ||
         BX_CPU_THIS_PTR get_TF() 
#if BX_X86_DEBUGGER
         || (BX_CPU_THIS_PTR dr7 & 0xff)
#endif
        ))
    BX_CPU_THIS_PTR async_event = 0;

  return 0; // Continue executing cpu_loop.
}


// boundaries of consideration:
//
//  * physical memory boundary: 1024k (1Megabyte) (increments of...)
//  * A20 boundary:             1024k (1Megabyte)
//  * page boundary:            4k
//  * ROM boundary:             2k (dont care since we are only reading)
//  * segment boundary:         any

void BX_CPU_C::prefetch(void)
{
  bx_address temp_rip = RIP;
  bx_address laddr = BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS) + temp_rip;
  bx_phy_address pAddr;

  // Calculate RIP at the beginning of the page.
  bx_address eipPageOffset0 = RIP - (laddr & 0xfff);
  BX_CPU_THIS_PTR eipPageBias = -eipPageOffset0;
  BX_CPU_THIS_PTR eipPageWindowSize = 4096;

  if (! Is64BitMode()) {
    Bit32u temp_limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
    if (((Bit32u) temp_rip) > temp_limit) {
      BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", (Bit32u) temp_rip, temp_limit));
      exception(BX_GP_EXCEPTION, 0, 0);
    }
    if (temp_limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
      BX_CPU_THIS_PTR eipPageWindowSize = temp_limit + BX_CPU_THIS_PTR eipPageBias + 1;
    }
  }

#if BX_SUPPORT_PAGING
  if (BX_CPU_THIS_PTR cr0.pg) {
    // aligned block guaranteed to be all in one page, same A20 address
    pAddr = itranslate_linear(laddr, CPL==3);
    pAddr = A20ADDR(pAddr);
  }
  else
#endif // BX_SUPPORT_PAGING
  {
    pAddr = A20ADDR(laddr);
  }

  BX_CPU_THIS_PTR pAddrA20Page = pAddr & 0xfffff000;
  BX_CPU_THIS_PTR eipFetchPtr =
    BX_CPU_THIS_PTR mem->getHostMemAddr(BX_CPU_THIS, 
          BX_CPU_THIS_PTR pAddrA20Page, BX_READ, CODE_ACCESS);

  // Sanity checks
  if (! BX_CPU_THIS_PTR eipFetchPtr) {
    if (pAddr >= BX_CPU_THIS_PTR mem->len) {
      BX_PANIC(("prefetch: running in bogus memory, pAddr=0x%08x", pAddr));
    }
    else {
      BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x%08x", pAddr));
    }
  }

#if BX_SUPPORT_ICACHE
  BX_CPU_THIS_PTR currPageWriteStampPtr = pageWriteStampTable.getPageWriteStampPtr(pAddr);
  Bit32u pageWriteStamp = *(BX_CPU_THIS_PTR currPageWriteStampPtr);
  Bit32u fetchModeMask  = BX_CPU_THIS_PTR fetchModeMask;
  if ((pageWriteStamp & ICacheFetchModeMask) != fetchModeMask)
  {
    // The current CPU mode does not match iCache entries for this
    // physical page.
    pageWriteStamp &= ICacheWriteStampMask; // Clear out old fetch mode bits.
    pageWriteStamp |= fetchModeMask;        // Add in new ones.
    pageWriteStampTable.setPageWriteStamp(pAddr, pageWriteStamp);
  }
#endif
}

void BX_CPU_C::boundaryFetch(Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i)
{
  unsigned j;
  Bit8u fetchBuffer[16]; // Really only need 15
  unsigned ret;

  if (remainingInPage >= 15) {
    BX_INFO(("fetchDecode #GP(0): too many instruction prefixes"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  // Read all leftover bytes in current page up to boundary.
  for (j=0; j<remainingInPage; j++) {
    fetchBuffer[j] = *fetchPtr++;
  }

  // The 2nd chunk of the instruction is on the next page.
  // Set RIP to the 0th byte of the 2nd page, and force a
  // prefetch so direct access of that physical page is possible, and
  // all the associated info is updated.
  RIP += remainingInPage;
  prefetch();
  if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
    BX_PANIC(("fetch_decode: small window size after prefetch"));
  }

  // We can fetch straight from the 0th byte, which is eipFetchPtr;
  fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;

  // read leftover bytes in next page
  for (; j<15; j++) {
    fetchBuffer[j] = *fetchPtr++;
  }
#if BX_SUPPORT_X86_64
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
    ret = fetchDecode64(fetchBuffer, i, 15);
  }
  else
#endif
  {
    ret = fetchDecode(fetchBuffer, i, 15);
  }

  if (ret==0) {
    BX_INFO(("fetchDecode #GP(0): too many instruction prefixes"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  // Restore EIP since we fudged it to start at the 2nd page boundary.
  RIP = BX_CPU_THIS_PTR prev_eip;

  // Since we cross an instruction boundary, note that we need a prefetch()
  // again on the next instruction.  Perhaps we can optimize this to
  // eliminate the extra prefetch() since we do it above, but have to
  // think about repeated instructions, etc.
  invalidate_prefetch_q();

  BX_INSTR_OPCODE(BX_CPU_ID, fetchBuffer, i->ilen(),
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
}

void BX_CPU_C::deliver_NMI(void)
{
  BX_CPU_THIS_PTR nmi_pending = 1;
  BX_CPU_THIS_PTR async_event = 1;
}

void BX_CPU_C::deliver_SMI(void)
{
  BX_CPU_THIS_PTR smi_pending = 1;
  BX_CPU_THIS_PTR async_event = 1;
}

#if BX_EXTERNAL_DEBUGGER
void BX_CPU_C::ask(int level, const char *prefix, const char *fmt, va_list ap)
{
  char buf1[1024];
  vsprintf (buf1, fmt, ap);
  printf ("%s %s\n", prefix, buf1);
  trap_debugger(1);
}
#endif

#if BX_DEBUGGER
extern unsigned dbg_show_mask;

bx_bool BX_CPU_C::dbg_check_begin_instr_bpoint(void)
{ 
  Bit64u tt = bx_pc_system.time_ticks();
  bx_address debug_eip = BX_CPU_THIS_PTR prev_eip;
  Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;

  BX_CPU_THIS_PTR guard_found.cs  = cs;
  BX_CPU_THIS_PTR guard_found.eip = debug_eip;
  BX_CPU_THIS_PTR guard_found.laddr = 
    BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS) + debug_eip;
  BX_CPU_THIS_PTR guard_found.is_32bit_code = 
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
  BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();

  // mode switch breakpoint
  // instruction which generate exceptions never reach the end of the
  // loop due to a long jump. Thats why we check at start of instr.
  // Downside is that we show the instruction about to be executed
  // (not the one generating the mode switch).
  if (BX_CPU_THIS_PTR mode_break && 
     (BX_CPU_THIS_PTR dbg_cpu_mode != BX_CPU_THIS_PTR get_cpu_mode()))
  {
    BX_INFO(("[" FMT_LL "d] Caught mode switch breakpoint, switching from '%s' to '%s'",
        bx_pc_system.time_ticks(), cpu_mode_string(BX_CPU_THIS_PTR dbg_cpu_mode),
        cpu_mode_string(BX_CPU_THIS_PTR get_cpu_mode())));
    BX_CPU_THIS_PTR dbg_cpu_mode = BX_CPU_THIS_PTR get_cpu_mode();
    BX_CPU_THIS_PTR stop_reason = STOP_MODE_BREAK_POINT;
    return(1);
  }

  // support for 'show' command in debugger
  if(dbg_show_mask) {
    int rv = bx_dbg_show_symbolic();
    if (rv) return(rv);
  }

  // see if debugger is looking for iaddr breakpoint of any type
  if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
#if BX_DBG_SUPPORT_VIR_BPOINT
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
      if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
          (tt != BX_CPU_THIS_PTR guard_found.time_tick))
      {
        for (unsigned i=0; i<bx_guard.iaddr.num_virtual; i++) {
          if (bx_guard.iaddr.vir[i].enabled &&
             (bx_guard.iaddr.vir[i].cs  == cs) &&
             (bx_guard.iaddr.vir[i].eip == debug_eip))
          {
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
	    BX_CPU_THIS_PTR guard_found.time_tick = tt;
            return(1); // on a breakpoint
          }
        }
      }
    }
#endif
#if BX_DBG_SUPPORT_LIN_BPOINT
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
      if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
          (tt != BX_CPU_THIS_PTR guard_found.time_tick))
      {
        for (unsigned i=0; i<bx_guard.iaddr.num_linear; i++) {
          if (bx_guard.iaddr.lin[i].enabled && 
             (bx_guard.iaddr.lin[i].addr == BX_CPU_THIS_PTR guard_found.laddr))
          {
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
	    BX_CPU_THIS_PTR guard_found.time_tick = tt;
            return(1); // on a breakpoint
          }
        }
      }
    }
#endif
#if BX_DBG_SUPPORT_PHY_BPOINT
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
      bx_phy_address phy;
      bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
      // The "guard_found.icount!=0" condition allows you to step or
      // continue beyond a breakpoint.  Bryce tried removing it once,
      // and once you get to a breakpoint you are stuck there forever.
      // Not pretty.
      if (valid && ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
          (tt != BX_CPU_THIS_PTR guard_found.time_tick)))
      {
        for (unsigned i=0; i<bx_guard.iaddr.num_physical; i++) {
          if (bx_guard.iaddr.phy[i].enabled && (bx_guard.iaddr.phy[i].addr == phy))
          {
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
	    BX_CPU_THIS_PTR guard_found.time_tick = tt;
            return(1); // on a breakpoint
          }
        }
      }
    }
#endif
  }

  return(0); // not on a breakpoint
}

bx_bool BX_CPU_C::dbg_check_end_instr_bpoint(void)
{
  bx_address debug_eip = BX_CPU_THIS_PTR prev_eip;
  BX_CPU_THIS_PTR guard_found.icount++;
  BX_CPU_THIS_PTR guard_found.cs  = 
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
  BX_CPU_THIS_PTR guard_found.eip = debug_eip;
  BX_CPU_THIS_PTR guard_found.laddr = 
    BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_CS) + debug_eip;
  BX_CPU_THIS_PTR guard_found.is_32bit_code = 
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
  BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();

  // Check if we hit read/write or time breakpoint
  if (BX_CPU_THIS_PTR break_point) {
    switch (BX_CPU_THIS_PTR break_point) {
    case BREAK_POINT_TIME:
      BX_INFO(("[" FMT_LL "d] Caught time breakpoint", bx_pc_system.time_ticks()));
      BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
      return(1); // on a breakpoint
    case BREAK_POINT_READ:
      BX_INFO(("[" FMT_LL "d] Caught read watch point", bx_pc_system.time_ticks()));
      BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
      return(1); // on a breakpoint
    case BREAK_POINT_WRITE:
      BX_INFO(("[" FMT_LL "d] Caught write watch point", bx_pc_system.time_ticks()));
      BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
      return(1); // on a breakpoint
    default:
      BX_PANIC(("Weird break point condition"));
    }
  }

#if BX_MAGIC_BREAKPOINT
  if (BX_CPU_THIS_PTR magic_break) {
    BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
    BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
    return(1); // on a breakpoint
  }
#endif

  // convenient point to see if user typed Ctrl-C
  if (bx_guard.interrupt_requested &&
     (bx_guard.guard_for & BX_DBG_GUARD_CTRL_C))
  {
    BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_CTRL_C;
    return(1); // Ctrl-C pressed
  }

  return(0); // no breakpoint
}

void BX_CPU_C::dbg_take_irq(void)
{
  // NOTE: similar code in ::cpu_loop()

  if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) {
    if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
      // normal return from setjmp setup
      unsigned vector = DEV_pic_iac(); // may set INTR with next interrupt
      BX_CPU_THIS_PTR errorno = 0;
      BX_CPU_THIS_PTR EXT = 1; // external event
      BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
      interrupt(vector, 0, 0, 0);
    }
  }
}

void BX_CPU_C::dbg_force_interrupt(unsigned vector)
{
  // Used to force simulator to take an interrupt, without
  // regard to IF

  if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
    // normal return from setjmp setup
    BX_CPU_THIS_PTR errorno = 0;
    BX_CPU_THIS_PTR EXT = 1; // external event
    BX_CPU_THIS_PTR async_event = 1; // probably don't need this
    interrupt(vector, 0, 0, 0);
  }
}

void BX_CPU_C::dbg_take_dma(void)
{
  // NOTE: similar code in ::cpu_loop()
  if (BX_HRQ) {
    BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
    DEV_dma_raise_hlda();
  }
}

#endif  // #if BX_DEBUGGER