File: head.S

package info (click to toggle)
memtest86 3.5-2.2
  • links: PTS
  • area: main
  • in suites: squeeze
  • size: 808 kB
  • ctags: 1,885
  • sloc: ansic: 11,019; asm: 1,154; sh: 150; makefile: 91
file content (1037 lines) | stat: -rw-r--r-- 23,196 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
/*
 *  linux/boot/head.S
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 *  head.S contains the 32-bit startup code.
 *
 *  1-Jan-96 Modified by Chris Brady for use as a boot/loader for MemTest-86.
 *  Setup the memory management for flat non-paged linear addressing.
 *  17 May 2004 : Added X86_PWRCAP for AMD64 (Memtest86+ - Samuel D.)
 */

.text
#define __ASSEMBLY__
#include "defs.h"
#include "config.h"
#include "test.h"

/*
 * References to members of the boot_cpu_data structure.
 */
#define CPU_PARAMS	cpu_id
#define X86		0
#define X86_MODEL	1
#define X86_MASK	2
#define X86_CPUID	4
#define X86_CAPABILITY	8
#define X86_VENDOR_ID	12
#define X86_CACHE	24
#define X86_PWRCAP 40
#define X86_EXT	44
#define X86_FFL	48

	.code32
	.globl startup_32
startup_32:
	cld
	cli

	/* Ensure I have a boot_stack pointer */
	testl	%esp, %esp
	jnz 0f
	movl	$(LOW_TEST_ADR + _GLOBAL_OFFSET_TABLE_), %esp
	leal	boot_stack_top@GOTOFF(%esp), %esp
0:

	/* Load the GOT pointer */
	call	0f
0:	popl	%ebx
	addl	$_GLOBAL_OFFSET_TABLE_+[.-0b], %ebx

	/* Pick the appropriate boot_stack address */
	leal	boot_stack_top@GOTOFF(%ebx), %esp

	/* Reload all of the segment registers */
	leal	gdt@GOTOFF(%ebx), %eax
	movl	%eax, 2 + gdt_descr@GOTOFF(%ebx)
	lgdt	gdt_descr@GOTOFF(%ebx)
	leal	flush@GOTOFF(%ebx), %eax
	pushl	$KERNEL_CS
	pushl	%eax
	lret
flush:	movl	$KERNEL_DS, %eax
	movw	%ax, %ds
	movw	%ax, %es
	movw	%ax, %fs
	movw	%ax, %gs
	movw	%ax, %ss

/*
 *  Zero BSS
 */
	cmpl	$1, zerobss@GOTOFF(%ebx)
	jnz	zerobss_done
	xorl	%eax, %eax
	leal	_bss@GOTOFF(%ebx), %edi
	leal	_end@GOTOFF(%ebx), %ecx
	subl	%edi, %ecx
1:	movl	%eax, (%edi)
	addl	$4, %edi
	subl	$4, %ecx
	jnz	1b
	movl	$0, zerobss@GOTOFF(%ebx)
zerobss_done:

/*
 * Clear the video display
 */
	cmpl	$1, clear_display@GOTOFF(%ebx)
	jnz	clear_display_done
	movw	$0x0720, %ax
	movl	$0xb8000, %edi
	movl	$0xc0000, %ecx
1:	movw	%ax, (%edi)
	addl	$2, %edi
	cmpl	%ecx, %edi
	jnz	1b
	movl	$0, clear_display@GOTOFF(%ebx)
clear_display_done:


/*
 * Setup an exception handler
 */
	leal	idt@GOTOFF(%ebx), %edi

	leal	vec0@GOTOFF(%ebx), %edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx, %ax	/* selector = 0x0010 = cs */
	movw	$0x8E00, %dx	/* interrupt gate - dpl=0, present */
	movl	%eax, (%edi)
	movl	%edx, 4(%edi)
	addl	$8, %edi

	leal	vec1@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec2@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec3@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec4@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec5@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec6@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec7@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec8@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec9@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec10@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec11@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec12@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec13@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec14@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec15@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec16@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec17@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec18@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	leal	vec19@GOTOFF(%ebx),%edx
	movl	$(KERNEL_CS << 16),%eax
	movw	%dx,%ax		   /* selector = 0x0010 = cs */
	movw	$0x8E00,%dx	   /* interrupt gate - dpl=0, present */
	movl	%eax,(%edi)
	movl	%edx,4(%edi)
	addl	$8,%edi

	/* Now that it is initialized load the interrupt descriptor table */
	leal	idt@GOTOFF(%ebx), %eax
	movl	%eax, 2 + idt_descr@GOTOFF(%ebx)
	lidt	idt_descr@GOTOFF(%ebx)

/* Find out the CPU type */

	leal	cpu_id@GOTOFF(%ebx), %esi
	movl	%ebx, %edi

	movl	$-1, X86_CPUID(%esi)	#  -1 for no CPUID initially

/* check if it is 486 or 386. */

	movl	$3, X86(%esi)	# at least 386
	pushfl			# push EFLAGS
	popl	%eax		# get EFLAGS
	movl	%eax, %ecx	# save original EFLAGS
	xorl	$0x40000, %eax	# flip AC bit in EFLAGS
	pushl	%eax		# copy to EFLAGS
	popfl			# set EFLAGS
	pushfl			# get new EFLAGS
	popl	%eax		# put it in eax
	xorl	%ecx, %eax	# change in flags
	andl	$0x40000, %eax	# check if AC bit changed
	je	id_done

	movl	$4, X86(%esi)	# at least 486
	movl	%ecx, %eax
	xorl	$0x200000, %eax	# check ID flag
	pushl	%eax
	popfl			# if we are on a straight 486DX, SX, or
	pushfl			# 487SX we can't change it
	popl	%eax
	xorl	%ecx, %eax
	pushl	%ecx		# restore original EFLAGS
	popfl
	andl	$0x200000, %eax
	jne	have_cpuid

	/* Test for Cyrix CPU types */
	xorw	%ax, %ax	# clear ax
	sahf			# clear flags
	movw	$5, %ax
	movw	$2, %bx
	div	%bl		# do operation that does not change flags
	lahf			# get flags
	cmp	$2, %ah		# check for change in flags
	jne	id_done		# if not Cyrix
	movl	$2, X86(%esi)	# Use two to identify as Cyrix
	jmp	id_done

have_cpuid:
	/* get vendor info */
	xorl	%eax, %eax			# call CPUID with 0 -> return vendor ID
	cpuid
	movl	%eax, X86_CPUID(%esi)		# save CPUID level
	movl	%ebx, X86_VENDOR_ID(%esi)	# first 4 chars
	movl	%edx, X86_VENDOR_ID+4(%esi)	# next 4 chars
	movl	%ecx, X86_VENDOR_ID+8(%esi)	# last 4 chars

	orl	%eax, %eax			# do we have processor info as well?
	je	id_done

	movl	$1, %eax		# Use the CPUID instruction to get CPU type
	cpuid


	#
	# CDH start
	# Check FPU, initialize if present
	#
	testl $1, %edx # FPU available?
	jz no_fpu
	finit

	no_fpu:
	#
	# CDH end
	#

	movl  %eax, X86_EXT(%esi) # save complete extended CPUID to X86_EXT
	movl  %ecx, X86_FFL(%esi) # save ECX Feature Flags to X86_FFL
	movb	%al, %cl		# save reg for future use
	andb	$0x0f, %ah		# mask processor family
	movb	%ah, X86(%esi)
	andb	$0xf0, %al		# mask model
	shrb	$4, %al
	movb	%al, X86_MODEL(%esi)
	andb	$0x0f, %cl		# mask mask revision
	movb	%cl, X86_MASK(%esi)
	movl	%edx, X86_CAPABILITY(%esi)

	movl	$0, X86_CACHE(%esi)
	movl	$0, X86_CACHE+4(%esi)
	movl	$0, X86_CACHE+8(%esi)
	movl	$0, X86_CACHE+12(%esi)

	movl	X86_VENDOR_ID+8(%esi), %eax
	cmpl	$0x6c65746e,%eax	# Is this an Intel CPU? "GenuineIntel"
	jne	not_intel
	movb	%bl, X86_PWRCAP(%esi) 	# Store BrandID in AMD PWRCAP if the CPU is from Intel
	movl	$2, %eax		# Use the CPUID instruction to get cache info
	cpuid
	movl	%eax, X86_CACHE(%esi)
	movl	%ebx, X86_CACHE+4(%esi)
	movl	%ecx, X86_CACHE+8(%esi)
	movl	%edx, X86_CACHE+12(%esi)
	jmp	id_done

not_intel:
	movl	X86_VENDOR_ID+8(%esi),%eax
	cmpl	$0x444d4163, %eax	# Is this an AMD CPU? "AuthenticAMD"
	jne	not_amd

	movl	$0x80000005, %eax	# Use the CPUID instruction to get cache info
	cpuid
	movl	%ecx, X86_CACHE(%esi)
	movl	%edx, X86_CACHE+4(%esi)
	movl	$0x80000006,%eax	# Use the CPUID instruction to get cache info
	cpuid
	movl	%ecx,X86_CACHE+8(%esi)
	movl	%edx,X86_CACHE+12(%esi)
	movl	$0x80000007,%eax	# Use the CPUID instruction to get AMD Powercap
	cpuid
	movl	%edx,X86_PWRCAP(%esi)

not_amd:
	movl	X86_VENDOR_ID+8(%esi), %eax
	cmpl	$0x3638784D, %eax	# Is this a Transmeta CPU? "GenuineTMx86"
	jne	not_transmeta

	movl	$0x80000000, %eax	# Use the CPUID instruction to check for cache info
	cpuid
	cmp	$6, %al			# Is cache info available?
	jb	id_done

	movl	$0x80000005, %eax	# Use the CPUID instruction to get L1 cache info
	cpuid
	movl	%ecx, X86_CACHE(%esi)
	movl	%edx, X86_CACHE+4(%esi)
	movl	$0x80000006, %eax	# Use the CPUID instruction to get L2 cache info
	cpuid
	movl	%ecx, X86_CACHE+8(%esi)

not_transmeta:
	movl	X86_VENDOR_ID+8(%esi), %eax
	cmpl	$0x64616574, %eax	# Is this a Via/Cyrix CPU? "CyrixInstead"
	jne	not_cyrix

	movl	X86_CPUID(%esi), %eax	# get CPUID level
	cmpl	$2, %eax		# Is there cache information available ?
	jne	id_done

	movl	$2, %eax		# Use the CPUID instruction to get cache info
	cpuid
	movl	%edx, X86_CACHE(%esi)

not_cyrix:
	movl	X86_VENDOR_ID+8(%esi), %eax
	cmpl	$0x736C7561, %eax	# Is this a Via/Centaur CPU "CentaurHauls"
	jne	not_centaur

	movl	$0x80000000, %eax	# Use the CPUID instruction to check for cache info
	cpuid
	cmp	$6, %al			# Is cache info available?
	jb	id_done

	movl	$0x80000005, %eax	# Use the CPUID instruction to get L1 cache info
	cpuid
	movl	%ecx, X86_CACHE(%esi)
	movl	%edx, X86_CACHE+4(%esi)
	movl	$0x80000006, %eax	# Use the CPUID instruction to get L2 cache info
	cpuid
	movl	%ecx, X86_CACHE+8(%esi)


not_centaur:
id_done:
	movl	%edi, %ebx	/* Restore GOT pointer */

	cmpl	$1, do_reloc@GOTOFF(%ebx)
	jnz	reloc_done
	leal	_dl_start@GOTOFF(%ebx), %eax
	call	*%eax
	movl	$0, do_reloc@GOTOFF(%ebx)
reloc_done:
	call	test_start
	/* In case we return simulate an exception */
	pushfl
	pushl	%cs
	call	0f
0:	pushl	$0 /* error code */
	pushl	$257 /* vector */
	jmp	int_hand

vec0:
	pushl	$0 /* error code */
	pushl	$0 /* vector */
	jmp int_hand
vec1:
	pushl	$0 /* error code */
	pushl	$1 /* vector */
	jmp int_hand

vec2:
	pushl	$0 /* error code */
	pushl	$2 /* vector */
	jmp int_hand

vec3:
	pushl	$0 /* error code */
	pushl	$3 /* vector */
	jmp	int_hand

vec4:
	pushl	$0 /* error code */
	pushl	$4 /* vector */
	jmp	int_hand

vec5:
	pushl	$0 /* error code */
	pushl	$5 /* vector */
	jmp	int_hand

vec6:
	pushl	$0 /* error code */
	pushl	$6 /* vector */
	jmp	int_hand

vec7:
	pushl	$0 /* error code */
	pushl	$7 /* vector */
	jmp	int_hand

vec8:
	/* error code */
	pushl	$8 /* vector */
	jmp	int_hand

vec9:
	pushl	$0 /* error code */
	pushl	$9 /* vector */
	jmp int_hand

vec10:
	/* error code */
	pushl	$10 /* vector */
	jmp	int_hand

vec11:
	/* error code */
	pushl	$11 /* vector */
	jmp	int_hand

vec12:
	/* error code */
	pushl	$12 /* vector */
	jmp	int_hand

vec13:
	/* error code */
	pushl	$13 /* vector */
	jmp	int_hand

vec14:
	/* error code */
	pushl	$14 /* vector */
	jmp	int_hand

vec15:
	pushl	$0 /* error code */
	pushl	$15 /* vector */
	jmp	int_hand

vec16:
	pushl	$0 /* error code */
	pushl	$16 /* vector */
	jmp	int_hand

vec17:
	/* error code */
	pushl	$17 /* vector */
	jmp	int_hand

vec18:
	pushl	$0 /* error code */
	pushl	$18 /* vector */
	jmp	int_hand

vec19:
	pushl	$0 /* error code */
	pushl	$19 /* vector */
	jmp	int_hand

int_hand:
	pushl	%eax
	pushl	%ebx
	pushl	%ecx
	pushl	%edx
	pushl	%edi
	pushl	%esi
	pushl	%ebp

	/* original boot_stack pointer */
	leal	20(%esp), %eax
	pushl	%eax

	pushl	%esp /* pointer to structure on the boot_stack */
	pushl	%ds  
	pushl	%ss 
	call	inter
	addl	$8, %esp

	popl	%ebp
	popl	%esi
	popl	%edi
	popl	%edx
	popl	%ecx
	popl	%ebx
	popl	%eax
	iret

/*
 * The interrupt descriptor table has room for 32 idt's
 */
.align 4
.word 0
idt_descr:
	.word 20*8-1	       # idt contains 32 entries
	.long 0

idt:
	.fill 20,8,0	       # idt is uninitialized

gdt_descr:
	.word gdt_end - gdt - 1
	.long 0

.align 4
.globl gdt, gdt_end
gdt:
	.quad 0x0000000000000000	/* NULL descriptor */
	.quad 0x0000000000000000	/* not used */
	.quad 0x00cf9a000000ffff	/* 0x10 main 4gb code at 0x000000 */
	.quad 0x00cf92000000ffff	/* 0x18 main 4gb data at 0x000000 */

	.word	0xFFFF				# 16bit 64KB - (0x10000*1 = 64KB)
	.word	0				# base address = SETUPSEG
	.byte	0x00, 0x9b			# code read/exec/accessed
	.byte	0x00, 0x00			# granularity = bytes


	.word	0xFFFF				# 16bit 64KB - (0x10000*1 = 64KB)
	.word	0				# base address = SETUPSEG
	.byte	0x00, 0x93			# data read/write/accessed
	.byte	0x00, 0x00			# granularity = bytes

gdt_end:

.data

.macro ptes64 start, count=64
.quad \start + 0x0000000 + 0xE3
.quad \start + 0x0200000 + 0xE3
.quad \start + 0x0400000 + 0xE3
.quad \start + 0x0600000 + 0xE3
.quad \start + 0x0800000 + 0xE3
.quad \start + 0x0A00000 + 0xE3
.quad \start + 0x0C00000 + 0xE3
.quad \start + 0x0E00000 + 0xE3
.if \count-1
ptes64 "(\start+0x01000000)",\count-1
.endif
.endm

.macro maxdepth depth=1
.if \depth-1
maxdepth \depth-1
.endif
.endm

maxdepth

.balign 4096
.globl pd0
pd0:
	ptes64 0x0000000000000000

.balign 4096
.globl pd1
pd1:
	ptes64 0x0000000040000000

.balign 4096
.globl pd2
pd2:
	ptes64 0x0000000080000000

.balign 4096
.globl pd3
pd3:
	ptes64 0x00000000C0000000

.balign 4096
.globl pdp
pdp:
	.long pd0 + 1
	.long 0
	.long pd1 + 1
	.long 0

	.long pd2 + 1
	.long 0

	.long pd3 + 1
	.long 0
.previous

#define RSTART startup_32

	.globl query_pcbios
query_pcbios:
	/* Save the caller save registers */
	pushl	%ebx
	pushl	%esi
	pushl	%edi
	pushl	%ebp
	call	1f
1:	popl	%ebx
	addl	$_GLOBAL_OFFSET_TABLE_+[.-1b], %ebx

	/* Compute the reloc address */
	leal	RSTART@GOTOFF(%ebx), %esi

	/* Fixup real code pointer */
	movl	%esi, %eax
	shrl	$4, %eax
	movw	%ax, 2 + realptr@GOTOFF(%ebx)

	/* Fixup protected code pointer */
	leal	prot@GOTOFF(%ebx), %eax
	movl	%eax, protptr@GOTOFF(%ebx)

	/* Compute the gdt fixup */
	movl	%esi, %eax
	shll	$16, %eax	# Base low

	movl	%esi, %ecx
	shrl	$16, %ecx
	andl	$0xff, %ecx

	movl	%esi, %edx
	andl	$0xff000000, %edx
	orl	%edx, %ecx

	/* Fixup the gdt */
	andl	$0x0000ffff, REAL_CS + 0 + gdt@GOTOFF(%ebx)
	orl	%eax,        REAL_CS + 0 + gdt@GOTOFF(%ebx)
	andl	$0x00ffff00, REAL_CS + 4 + gdt@GOTOFF(%ebx)
	orl	%ecx,        REAL_CS + 4 + gdt@GOTOFF(%ebx)
	andl	$0x0000ffff, REAL_DS + 0 + gdt@GOTOFF(%ebx)
	orl	%eax,        REAL_DS + 0 + gdt@GOTOFF(%ebx)
	andl	$0x00ffff00, REAL_DS + 4 + gdt@GOTOFF(%ebx)
	orl	%ecx,        REAL_DS + 4 + gdt@GOTOFF(%ebx)

	/* Fixup the gdt_descr */
	leal	gdt@GOTOFF(%ebx), %eax
	movl	%eax, 2 + gdt_descr@GOTOFF(%ebx)

	lidt	idt_real@GOTOFF(%ebx)

	/* Don't disable the a20 line */

	/* Load 16bit data segments, to ensure the segment limits are set */
	movl	$REAL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
	movl	%eax, %ss
	movl	%eax, %fs
	movl	%eax, %gs

	/* Compute the boot_stack base */
	leal	boot_stack@GOTOFF(%ebx), %ecx
	/* Compute the address of meminfo */
	leal	mem_info@GOTOFF(%ebx), %edi

	/* switch to 16bit mode */
	ljmp	$REAL_CS, $1f - RSTART
1:
	.code16
	/* Disable Paging and protected mode */
	/* clear the PG & PE bits of CR0 */
	movl	%cr0,%eax
	andl	$~((1 << 31)|(1<<0)),%eax
	movl	%eax,%cr0

	/* make intersegment jmp to flush the processor pipeline
	 * and reload %cs:%eip (to clear upper 16 bits of %eip).
	 */
	ljmp	*(realptr - RSTART)
real:
	/* we are in real mode now
	 * set up the real mode segment registers : %ds, %ss, %es, %gs, %fs
	 */
	movw	%cs, %ax
	movw	%ax, %ds
	movw	%ax, %es
	movw	%ax, %fs
	movw	%ax, %gs
	movw	%ax, %ss

	/* Adjust the boot_stack pointer */
	movl	%ecx, %eax
	shrl	$4, %eax
	movw	%ax, %ss
	subl	%ecx, %esp

	/* Save my base pointer */
	pushl	%ebx

	/* Setup %ds to point to my data area */
	shrl	$4, %edi
	movl	%edi, %ds

	/* Enable interrupts or BIOS's go crazy */
	sti

# Get memory size (extended mem, kB)

#define SMAP	0x534d4150

	xorl	%eax, %eax
	movl	%eax, (E88)
	movl	%eax, (E801)
	movl	%eax, (E820NR)

# Try three different memory detection schemes.  First, try
# e820h, which lets us assemble a memory map, then try e801h,
# which returns a 32-bit memory size, and finally 88h, which
# returns 0-64m

# method E820H:
# the memory map from hell.  e820h returns memory classified into
# a whole bunch of different types, and allows memory holes and
# everything.  We scan through this memory map and build a list
# of the first 32 memory areas, which we return at [E820MAP].
# This is documented at http://www.teleport.com/~acpi/acpihtml/topic245.htm

meme820:
	xorl	%ebx, %ebx			# continuation counter
	movw	$E820MAP, %di			# point into the whitelist
						# so we can have the bios
						# directly write into it.

jmpe820:
	movl	$0x0000e820, %eax		# e820, upper word zeroed
	movl	$SMAP, %edx			# ascii 'SMAP'
	movl	$20, %ecx			# size of the e820rec
	pushw	%ds				# data record.
	popw	%es
	int	$0x15				# make the call
	jc	bail820				# fall to e801 if it fails

	cmpl	$SMAP, %eax			# check the return is `SMAP'
	jne	bail820				# fall to e801 if it fails

#	cmpl	$1, 16(%di)			# is this usable memory?
#	jne	again820

	# If this is usable memory, we save it by simply advancing %di by
	# sizeof(e820rec).
	#
good820:
	movb	(E820NR), %al			# up to 32 entries
	cmpb	$E820MAX, %al
	jnl	bail820

	incb	(E820NR)
	movw	%di, %ax
	addw	$E820ENTRY_SIZE, %ax
	movw	%ax, %di
again820:
	cmpl	$0, %ebx			# check to see if
	jne	jmpe820				# %ebx is set to EOF
bail820:


# method E801H:
# memory size is in 1k chunksizes, to avoid confusing loadlin.
# we store the 0xe801 memory size in a completely different place,
# because it will most likely be longer than 16 bits.

meme801:
	stc					# fix to work around buggy
	xorw	%cx,%cx				# BIOSes which dont clear/set
	xorw	%dx,%dx				# carry on pass/error of
						# e801h memory size call
						# or merely pass cx,dx though
						# without changing them.
	movw	$0xe801, %ax
	int	$0x15
	jc	mem88

	cmpw	$0x0, %cx			# Kludge to handle BIOSes
	jne	e801usecxdx			# which report their extended
	cmpw	$0x0, %dx			# memory in AX/BX rather than
	jne	e801usecxdx			# CX/DX.  The spec I have read
	movw	%ax, %cx			# seems to indicate AX/BX
	movw	%bx, %dx			# are more reasonable anyway...

e801usecxdx:
	andl	$0xffff, %edx			# clear sign extend
	shll	$6, %edx			# and go from 64k to 1k chunks
	movl	%edx, (E801)			# store extended memory size
	andl	$0xffff, %ecx			# clear sign extend
 	addl	%ecx, (E801)			# and add lower memory into
						# total size.

# Ye Olde Traditional Methode.  Returns the memory size (up to 16mb or
# 64mb, depending on the bios) in ax.
mem88:

	movb	$0x88, %ah
	int	$0x15
	movw	%ax, (E88)

#ifdef APM_OFF
# check for APM BIOS
	movw	$0x5300, %ax    # APM BIOS installation check
	xorw	%bx, %bx
	int	$0x15
	jc	done_apm_bios   # error -> no APM BIOS

	cmpw	$0x504d, %bx    # check for "PM" signature
	jne	done_apm_bios   # no signature -> no APM BIOS

	movw	$0x5304, %ax    # Disconnect first just in case
	xorw	%bx, %bx
	int	$0x15           # ignore return code

	movw	$0x5301, %ax    # Real Mode connect
	xorw	%bx, %bx
	int	$0x15
	jc	done_apm_bios   # error

	movw	$0x5308, %ax    # Disable APM
	mov	$0xffff, %bx
	xorw	%cx, %cx
	int	$0x15

done_apm_bios:
#endif

	/* O.k. the BIOS query is done switch back to protected mode */
	cli

	/* Restore my saved variables */
	popl	%ebx

	/* Get an convinient %ds */
	movw	%cs, %ax
	movw	%ax, %ds

	/* Load the global descriptor table */
	addr32 lgdt	gdt_descr - RSTART

	/* Turn on protected mode */
	/* Set the PE bit in CR0 */
	movl	%cr0,%eax
	orl	$(1<<0),%eax
	movl	%eax,%cr0

	/* flush the prefetch queue, and relaod %cs:%eip */
	data32 ljmp	*(protptr - RSTART)
prot:
	.code32
	/* Reload other segment registers */
	movl	$KERNEL_DS, %eax
	movl	%eax, %ds
	movl	%eax, %es
	movl	%eax, %fs
	movl	%eax, %gs
	movl	%eax, %ss

	/* Adjust the boot_stack pointer */
	leal	boot_stack@GOTOFF(%ebx), %eax
	addl	%eax, %esp

	/* Restore the caller saved registers */
	popl	%ebp
	popl	%edi
	popl	%esi
	popl	%ebx
	movl	$1, %eax
	ret

realptr:
	.word	real - RSTART
	.word	0x0000
protptr:
	.long	0
	.long	KERNEL_CS

idt_real:
	.word	0x400 - 1			# idt limit ( 256 entries)
	.word	0, 0				# idt base = 0L

/* _ap_trampoline_start is the entry point for cpus other than the
 * bootstrap cpu. The code between _ap_trampoline_start to
 * _ap_trampoline_protmode is copied to BootCodeStart(0x9000).
 * The ljmp after turning on CR0.PE will jump to the
 * relocatable code which usually resides at 0x10000 + _ap_trampoline_protmode.
 *
 * The trampoline code uses a temporary GDT. The entries of this temporary
 * GDT must match the first few entries of the GDT used by the relocatble
 * memtest code(see 'gdt' sybmol in this file).
 *
 */
	.globl _ap_trampoline_start
	.globl _ap_trampoline_protmode
	.code16
_ap_trampoline_start:
	lgdt    0x0 /* will be fixed up later, see smp.c:BootAP()*/
	movl	%cr0, %eax
	orl	$1, %eax
	movl	%eax, %cr0
	data32 ljmp    $KERNEL_CS, $_ap_trampoline_protmode
_ap_trampoline_protmode:
	.code32
	movw	$KERNEL_DS, %ax
	movw	%ax, %ds
	movw	%ax, %es
	movw	%ax, %fs
	movw	%ax, %gs
	movw	%ax, %ss
	movl	$(LOW_TEST_ADR + _GLOBAL_OFFSET_TABLE_), %esp
	leal	boot_stack_top@GOTOFF(%esp), %esp
	pushl   $0
	popf
	call    startup_32
	/* if we ever return, we'll just loop forever */
	cli
2:	hlt
	jmp 2b	
.data
zerobss:	.long	1
clear_display:	.long	1
	.globl do_reloc
/* After we relocate the code, we need to patchup the relocation
 * entries,only once. Since this piece of code(head.S) is executed
 * by each cpu, there is no need for each cpu to do the relocation.
 * Let only the BSP do the relocation.          
 */
do_reloc:	.long	1
.previous
.data
.balign 16
	.globl mem_info
mem_info:
	. = . + MEMINFO_SIZE
.previous
.bss
.balign 16
boot_stack:
	.globl boot_stack
	. = . + 4096
boot_stack_top:
	.globl boot_stack_top
.previous