1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
|
--- a/src/hotspot/cpu/zero/vm_version_zero.cpp
+++ b/src/hotspot/cpu/zero/vm_version_zero.cpp
@@ -116,8 +116,9 @@ void VM_Version::initialize() {
}
// Enable error context decoding on known platforms
-#if defined(AMD64) || defined(ARM) || defined(AARCH64) || \
- defined(PPC) || defined(RISCV) || defined(S390)
+#if defined(IA32) || defined(AMD64) || defined(ARM) || \
+ defined(AARCH64) || defined(PPC) || defined(RISCV) || \
+ defined(S390)
if (FLAG_IS_DEFAULT(DecodeErrorContext)) {
FLAG_SET_DEFAULT(DecodeErrorContext, true);
}
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -231,6 +231,8 @@ size_t os::rss() {
// Cpu architecture string
#if defined(ZERO)
static char cpu_arch[] = ZERO_LIBARCH;
+#elif defined(IA32)
+static char cpu_arch[] = "i386";
#elif defined(AMD64)
static char cpu_arch[] = "amd64";
#elif defined(ARM)
@@ -1009,6 +1011,7 @@ bool os::dll_address_to_library_name(add
// same architecture as Hotspot is running on
void *os::Bsd::dlopen_helper(const char *filename, int mode, char *ebuf, int ebuflen) {
+#ifndef IA32
bool ieee_handling = IEEE_subnormal_handling_OK();
if (!ieee_handling) {
Events::log_dll_message(nullptr, "IEEE subnormal handling check failed before loading %s", filename);
@@ -1031,9 +1034,14 @@ void *os::Bsd::dlopen_helper(const char
// numerical "accuracy", but we need to protect Java semantics first
// and foremost. See JDK-8295159.
+ // This workaround is ineffective on IA32 systems because the MXCSR
+ // register (which controls flush-to-zero mode) is not stored in the
+ // legacy fenv.
+
fenv_t default_fenv;
int rtn = fegetenv(&default_fenv);
assert(rtn == 0, "fegetenv must succeed");
+#endif // IA32
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
@@ -1053,6 +1061,7 @@ void *os::Bsd::dlopen_helper(const char
} else {
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
log_info(os)("shared library load of %s was successful", filename);
+#ifndef IA32
if (! IEEE_subnormal_handling_OK()) {
// We just dlopen()ed a library that mangled the floating-point
// flags. Silently fix things now.
@@ -1077,6 +1086,7 @@ void *os::Bsd::dlopen_helper(const char
assert(false, "fesetenv didn't work");
}
}
+#endif // IA32
}
return result;
@@ -1185,7 +1195,9 @@ void * os::dll_load(const char *filename
{EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
};
- #if (defined AMD64)
+ #if (defined IA32)
+ static Elf32_Half running_arch_code=EM_386;
+ #elif (defined AMD64)
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined __powerpc64__)
static Elf32_Half running_arch_code=EM_PPC64;
@@ -1207,7 +1219,7 @@ void * os::dll_load(const char *filename
static Elf32_Half running_arch_code=EM_68K;
#else
#error Method os::dll_load requires that one of following is defined:\
- AMD64, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
+ IA32, AMD64, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
#endif
// Identify compatibility class for VM's architecture and library's architecture
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -1795,7 +1795,9 @@ void * os::dll_load(const char *filename
{EM_LOONGARCH, EM_LOONGARCH, ELFCLASS64, ELFDATA2LSB, (char*)"LoongArch"},
};
-#if (defined AMD64)
+#if (defined IA32)
+ static Elf32_Half running_arch_code=EM_386;
+#elif (defined AMD64) || (defined X32)
static Elf32_Half running_arch_code=EM_X86_64;
#elif (defined __sparc) && (defined _LP64)
static Elf32_Half running_arch_code=EM_SPARCV9;
@@ -1829,7 +1831,7 @@ void * os::dll_load(const char *filename
static Elf32_Half running_arch_code=EM_LOONGARCH;
#else
#error Method os::dll_load requires that one of following is defined:\
- AARCH64, ALPHA, ARM, AMD64, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
+ AARCH64, ALPHA, ARM, AMD64, IA32, LOONGARCH64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, RISCV, S390, SH, __sparc
#endif
// Identify compatibility class for VM's architecture and library's architecture
@@ -1891,6 +1893,7 @@ void * os::dll_load(const char *filename
}
void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
+#ifndef IA32
bool ieee_handling = IEEE_subnormal_handling_OK();
if (!ieee_handling) {
Events::log_dll_message(nullptr, "IEEE subnormal handling check failed before loading %s", filename);
@@ -1913,9 +1916,14 @@ void * os::Linux::dlopen_helper(const ch
// numerical "accuracy", but we need to protect Java semantics first
// and foremost. See JDK-8295159.
+ // This workaround is ineffective on IA32 systems because the MXCSR
+ // register (which controls flush-to-zero mode) is not stored in the
+ // legacy fenv.
+
fenv_t default_fenv;
int rtn = fegetenv(&default_fenv);
assert(rtn == 0, "fegetenv must succeed");
+#endif // IA32
void* result;
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
@@ -1935,6 +1943,7 @@ void * os::Linux::dlopen_helper(const ch
} else {
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
log_info(os)("shared library load of %s was successful", filename);
+#ifndef IA32
// Quickly test to make sure subnormals are correctly handled.
if (! IEEE_subnormal_handling_OK()) {
// We just dlopen()ed a library that mangled the floating-point flags.
@@ -1960,6 +1969,7 @@ void * os::Linux::dlopen_helper(const ch
assert(false, "fesetenv didn't work");
}
}
+#endif // IA32
}
return result;
}
@@ -2603,7 +2613,7 @@ void os::print_memory_info(outputStream*
// before "flags" so if we find a second "model name", then the
// "flags" field is considered missing.
static bool print_model_name_and_flags(outputStream* st, char* buf, size_t buflen) {
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
// Other platforms have less repetitive cpuinfo files
FILE *fp = os::fopen("/proc/cpuinfo", "r");
if (fp) {
@@ -2662,7 +2672,7 @@ static void print_sys_devices_cpu_info(o
}
// we miss the cpufreq entries on Power and s390x
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
_print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
_print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
_print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
@@ -2715,7 +2725,7 @@ void os::jfr_report_memory_info() {
#endif // INCLUDE_JFR
-#if defined(AMD64)
+#if defined(AMD64) || defined(IA32) || defined(X32)
const char* search_string = "model name";
#elif defined(M68K)
const char* search_string = "CPU";
@@ -2768,6 +2778,8 @@ void os::get_summary_cpu_info(char* cpui
strncpy(cpuinfo, "x86_64", length);
#elif defined(ARM) // Order wrt. AARCH64 is relevant!
strncpy(cpuinfo, "ARM", length);
+#elif defined(IA32)
+ strncpy(cpuinfo, "x86_32", length);
#elif defined(PPC)
strncpy(cpuinfo, "PPC64", length);
#elif defined(RISCV)
@@ -3071,9 +3083,14 @@ int os::Linux::sched_getcpu_syscall(void
unsigned int cpu = 0;
long retval = -1;
-#if defined(AMD64)
- // Unfortunately we have to bring all these macros here from vsyscall.h
- // to be able to compile on old linuxes.
+#if defined(IA32)
+ #ifndef SYS_getcpu
+ #define SYS_getcpu 318
+ #endif
+ retval = syscall(SYS_getcpu, &cpu, nullptr, nullptr);
+#elif defined(AMD64)
+// Unfortunately we have to bring all these macros here from vsyscall.h
+// to be able to compile on old linuxes.
#define __NR_vgetcpu 2
#define VSYSCALL_START (-10UL << 20)
#define VSYSCALL_SIZE 1024
@@ -4446,6 +4463,87 @@ void os::Linux::disable_numa(const char*
FLAG_SET_ERGO(UseNUMAInterleaving, false);
}
+#if defined(IA32) && !defined(ZERO)
+/*
+ * Work-around (execute code at a high address) for broken NX emulation using CS limit,
+ * Red Hat patch "Exec-Shield" (IA32 only).
+ *
+ * Map and execute at a high VA to prevent CS lazy updates race with SMP MM
+ * invalidation.Further code generation by the JVM will no longer cause CS limit
+ * updates.
+ *
+ * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
+ * @see JDK-8023956
+ */
+static void workaround_expand_exec_shield_cs_limit() {
+ assert(os::Linux::initial_thread_stack_bottom() != nullptr, "sanity");
+ size_t page_size = os::vm_page_size();
+
+ /*
+ * JDK-8197429
+ *
+ * Expand the stack mapping to the end of the initial stack before
+ * attempting to install the codebuf. This is needed because newer
+ * Linux kernels impose a distance of a megabyte between stack
+ * memory and other memory regions. If we try to install the
+ * codebuf before expanding the stack the installation will appear
+ * to succeed but we'll get a segfault later if we expand the stack
+ * in Java code.
+ *
+ */
+ if (os::is_primordial_thread()) {
+ address limit = os::Linux::initial_thread_stack_bottom();
+ if (! DisablePrimordialThreadGuardPages) {
+ limit += StackOverflow::stack_red_zone_size() +
+ StackOverflow::stack_yellow_zone_size();
+ }
+ os::Linux::expand_stack_to(limit);
+ }
+
+ /*
+ * Take the highest VA the OS will give us and exec
+ *
+ * Although using -(pagesz) as mmap hint works on newer kernel as you would
+ * think, older variants affected by this work-around don't (search forward only).
+ *
+ * On the affected distributions, we understand the memory layout to be:
+ *
+ * TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
+ *
+ * A few pages south main stack will do it.
+ *
+ * If we are embedded in an app other than launcher (initial != main stack),
+ * we don't have much control or understanding of the address space, just let it slide.
+ */
+ char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
+ (StackOverflow::stack_guard_zone_size() + page_size));
+ char* codebuf = os::attempt_reserve_memory_at(hint, page_size, mtThread);
+
+ if (codebuf == nullptr) {
+ // JDK-8197429: There may be a stack gap of one megabyte between
+ // the limit of the stack and the nearest memory region: this is a
+ // Linux kernel workaround for CVE-2017-1000364. If we failed to
+ // map our codebuf, try again at an address one megabyte lower.
+ hint -= 1 * M;
+ codebuf = os::attempt_reserve_memory_at(hint, page_size, mtThread);
+ }
+
+ if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
+ return; // No matter, we tried, best effort.
+ }
+
+ log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
+
+ // Some code to exec: the 'ret' instruction
+ codebuf[0] = 0xC3;
+
+ // Call the code in the codebuf
+ __asm__ volatile("call *%0" : : "r"(codebuf));
+
+ // keep the page mapped so CS limit isn't reduced.
+}
+#endif // defined(IA32) && !defined(ZERO)
+
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
@@ -4466,10 +4564,17 @@ jint os::init_2(void) {
return JNI_ERR;
}
+#if defined(IA32) && !defined(ZERO)
+ // Need to ensure we've determined the process's initial stack to
+ // perform the workaround
+ Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+ workaround_expand_exec_shield_cs_limit();
+#else
suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
if (!suppress_primordial_thread_resolution) {
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
}
+#endif
Linux::libpthread_init();
Linux::sched_getcpu_init();
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -3150,6 +3150,7 @@ void os::large_page_init() {
_large_page_size = os::win32::large_page_init_decide_size();
const size_t default_page_size = os::vm_page_size();
if (_large_page_size > default_page_size) {
+#if !defined(IA32)
if (EnableAllLargePageSizesForWindows) {
size_t min_size = GetLargePageMinimum();
@@ -3158,6 +3159,7 @@ void os::large_page_init() {
_page_sizes.add(page_size);
}
}
+#endif
_page_sizes.add(_large_page_size);
}
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
@@ -86,7 +86,9 @@ char* os::non_memory_address_word() {
address os::Posix::ucontext_get_pc(const ucontext_t* uc) {
if (DecodeErrorContext) {
-#if defined(AMD64)
+#if defined(IA32)
+ return (address)uc->uc_mcontext.gregs[REG_EIP];
+#elif defined(AMD64)
return (address)uc->uc_mcontext.gregs[REG_RIP];
#elif defined(ARM)
return (address)uc->uc_mcontext.arm_pc;
@@ -115,7 +117,9 @@ void os::Posix::ucontext_set_pc(ucontext
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t* uc) {
if (DecodeErrorContext) {
-#if defined(AMD64)
+#if defined(IA32)
+ return (intptr_t*)uc->uc_mcontext.gregs[REG_UESP];
+#elif defined(AMD64)
return (intptr_t*)uc->uc_mcontext.gregs[REG_RSP];
#elif defined(ARM)
return (intptr_t*)uc->uc_mcontext.arm_sp;
@@ -140,7 +144,9 @@ intptr_t* os::Linux::ucontext_get_sp(con
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t* uc) {
if (DecodeErrorContext) {
-#if defined(AMD64)
+#if defined(IA32)
+ return (intptr_t*)uc->uc_mcontext.gregs[REG_EBP];
+#elif defined(AMD64)
return (intptr_t*)uc->uc_mcontext.gregs[REG_RBP];
#elif defined(ARM)
return (intptr_t*)uc->uc_mcontext.arm_fp;
--- a/src/hotspot/share/adlc/output_c.cpp
+++ b/src/hotspot/share/adlc/output_c.cpp
@@ -2323,7 +2323,7 @@ private:
if (strcmp(rep_var,"$Register") == 0) return "as_Register";
if (strcmp(rep_var,"$KRegister") == 0) return "as_KRegister";
if (strcmp(rep_var,"$FloatRegister") == 0) return "as_FloatRegister";
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
if (strcmp(rep_var,"$XMMRegister") == 0) return "as_XMMRegister";
#endif
if (strcmp(rep_var,"$CondRegister") == 0) return "as_ConditionRegister";
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp
@@ -138,7 +138,7 @@ class ConversionStub: public CodeStub {
public:
ConversionStub(Bytecodes::Code bytecode, LIR_Opr input, LIR_Opr result)
: _bytecode(bytecode), _input(input), _result(result) {
- ShouldNotReachHere();
+ NOT_IA32( ShouldNotReachHere(); ) // used only on x86-32
}
Bytecodes::Code bytecode() { return _bytecode; }
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp
@@ -527,6 +527,16 @@ void LIR_Assembler::emit_op1(LIR_Op1* op
safepoint_poll(op->in_opr(), op->info());
break;
+#ifdef IA32
+ case lir_fxch:
+ fxch(op->in_opr()->as_jint());
+ break;
+
+ case lir_fld:
+ fld(op->in_opr()->as_jint());
+ break;
+#endif // IA32
+
case lir_branch:
break;
@@ -602,6 +612,12 @@ void LIR_Assembler::emit_op0(LIR_Op0* op
osr_entry();
break;
+#ifdef IA32
+ case lir_fpop_raw:
+ fpop();
+ break;
+#endif // IA32
+
case lir_breakpoint:
breakpoint();
break;
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahGenerationalMode.cpp
@@ -31,7 +31,7 @@
void ShenandoahGenerationalMode::initialize_flags() const {
-#if !(defined AARCH64 || defined AMD64 || defined PPC64 || defined RISCV64)
+#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64)
vm_exit_during_initialization("Shenandoah Generational GC is not supported on this platform.");
#endif
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
@@ -39,7 +39,7 @@
#include "utilities/defaultStream.hpp"
void ShenandoahArguments::initialize() {
-#if !(defined AARCH64 || defined AMD64 || defined PPC64 || defined RISCV64)
+#if !(defined AARCH64 || defined AMD64 || defined IA32 || defined PPC64 || defined RISCV64)
vm_exit_during_initialization("Shenandoah GC is not supported on this platform.");
#endif
--- a/src/hotspot/share/interpreter/abstractInterpreter.hpp
+++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp
@@ -255,7 +255,7 @@ class AbstractInterpreter: AllStatic {
return stackElementWords * i;
}
-#if !defined(ZERO) && defined(AMD64)
+#if !defined(ZERO) && (defined(IA32) || defined(AMD64))
static Address::ScaleFactor stackElementScale() {
return NOT_LP64(Address::times_4) LP64_ONLY(Address::times_8);
}
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp
@@ -1460,7 +1460,7 @@ JRT_ENTRY(void, InterpreterRuntime::prep
// preparing the same method will be sure to see non-null entry & mirror.
JRT_END
-#if defined(AMD64) || defined(ARM)
+#if defined(IA32) || defined(AMD64) || defined(ARM)
JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* current, void* src_address, void* dest_address))
assert(current == JavaThread::current(), "pre-condition");
if (src_address == dest_address) {
--- a/src/hotspot/share/interpreter/interpreterRuntime.hpp
+++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp
@@ -146,8 +146,8 @@ private:
Method* method,
intptr_t* from, intptr_t* to);
-#if defined(AMD64) || defined(ARM)
- // Popframe support (only needed on AMD64 and ARM)
+#if defined(IA32) || defined(AMD64) || defined(ARM)
+ // Popframe support (only needed on x86, AMD64 and ARM)
static void popframe_move_outgoing_args(JavaThread* current, void* src_address, void* dest_address);
#endif
--- a/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp
+++ b/src/hotspot/share/jfr/utilities/jfrBigEndian.hpp
@@ -101,7 +101,7 @@ inline R JfrBigEndian::read_unaligned(co
}
inline bool JfrBigEndian::platform_supports_unaligned_reads(void) {
-#if defined(AMD64) || defined(PPC) || defined(S390)
+#if defined(IA32) || defined(AMD64) || defined(PPC) || defined(S390)
return true;
#elif defined(ARM) || defined(AARCH64) || defined(RISCV)
return false;
--- a/src/hotspot/share/opto/chaitin.cpp
+++ b/src/hotspot/share/opto/chaitin.cpp
@@ -975,6 +975,7 @@ void PhaseChaitin::gather_lrg_masks( boo
// ------------------- reg_pressure --------------------
// Each entry is reg_pressure_per_value,number_of_regs
// RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
+ // IA32 2 1 1 1 1 6 6
// SPARC 2 2 2 2 2 48 (24) 52 (26)
// SPARCV9 2 2 2 2 2 48 (24) 52 (26)
// AMD64 1 1 1 1 1 14 15
@@ -990,6 +991,12 @@ void PhaseChaitin::gather_lrg_masks( boo
// Define platform specific register pressure
#if defined(ARM32)
lrg.set_reg_pressure(2);
+#elif defined(IA32)
+ if( ireg == Op_RegL ) {
+ lrg.set_reg_pressure(2);
+ } else {
+ lrg.set_reg_pressure(1);
+ }
#else
lrg.set_reg_pressure(1); // normally one value per register
#endif
--- a/src/hotspot/share/opto/divnode.cpp
+++ b/src/hotspot/share/opto/divnode.cpp
@@ -938,11 +938,15 @@ const Type* DivDNode::Value(PhaseGVN* ph
if( t2 == TypeD::ONE )
return t1;
+ // IA32 would only execute this for non-strict FP, which is never the
+ // case now.
+#if ! defined(IA32)
// If divisor is a constant and not zero, divide them numbers
if( t1->base() == Type::DoubleCon &&
t2->base() == Type::DoubleCon &&
t2->getd() != 0.0 ) // could be negative zero
return TypeD::make( t1->getd()/t2->getd() );
+#endif
// If the dividend is a constant zero
// Note: if t1 and t2 are zero then result is NaN (JVMS page 213)
--- a/src/hotspot/share/opto/machnode.hpp
+++ b/src/hotspot/share/opto/machnode.hpp
@@ -99,7 +99,7 @@ public:
return ::as_FloatRegister(reg(ra_, node, idx));
}
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
KRegister as_KRegister(PhaseRegAlloc *ra_, const Node *node) const {
return ::as_KRegister(reg(ra_, node));
}
--- a/src/hotspot/share/opto/mulnode.cpp
+++ b/src/hotspot/share/opto/mulnode.cpp
@@ -202,6 +202,14 @@ const Type* MulNode::Value(PhaseGVN* pha
if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
return bottom_type();
+#if defined(IA32)
+ // Can't trust native compilers to properly fold strict double
+ // multiplication with round-to-zero on this platform.
+ if (op == Op_MulD) {
+ return TypeD::DOUBLE;
+ }
+#endif
+
return mul_ring(t1,t2); // Local flavor of type multiplication
}
--- a/src/hotspot/share/runtime/abstract_vm_version.cpp
+++ b/src/hotspot/share/runtime/abstract_vm_version.cpp
@@ -204,6 +204,7 @@ const char* Abstract_VM_Version::vm_rele
#else
#define CPU AARCH64_ONLY("aarch64") \
AMD64_ONLY("amd64") \
+ IA32_ONLY("x86") \
S390_ONLY("s390") \
RISCV64_ONLY("riscv64")
#endif // !ZERO
--- a/src/hotspot/share/utilities/debug.cpp
+++ b/src/hotspot/share/utilities/debug.cpp
@@ -664,6 +664,7 @@ void help() {
tty->print_cr(" pns(void* sp, void* fp, void* pc) - print native (i.e. mixed) stack trace, e.g.");
#ifdef LINUX
AMD64_ONLY( tty->print_cr(" pns($sp, $rbp, $pc) on Linux/amd64"));
+ IA32_ONLY( tty->print_cr(" pns($sp, $ebp, $pc) on Linux/x86"));
AARCH64_ONLY(tty->print_cr(" pns($sp, $fp, $pc) on Linux/AArch64"));
RISCV_ONLY( tty->print_cr(" pns($sp, $fp, $pc) on Linux/RISC-V"));
PPC64_ONLY( tty->print_cr(" pns($sp, 0, $pc) on Linux/ppc64"));
--- a/src/hotspot/share/utilities/macros.hpp
+++ b/src/hotspot/share/utilities/macros.hpp
@@ -452,7 +452,7 @@
#define NOT_ZERO_RETURN
#endif
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
#define X86
#define X86_ONLY(code) code
#define NOT_X86(code)
@@ -462,6 +462,14 @@
#define NOT_X86(code) code
#endif
+#ifdef IA32
+#define IA32_ONLY(code) code
+#define NOT_IA32(code)
+#else
+#define IA32_ONLY(code)
+#define NOT_IA32(code) code
+#endif
+
#ifdef AMD64
#define AMD64_ONLY(code) code
#define NOT_AMD64(code)
--- a/test/hotspot/gtest/runtime/test_os_windows.cpp
+++ b/test/hotspot/gtest/runtime/test_os_windows.cpp
@@ -756,6 +756,7 @@ TEST_VM(os_windows, large_page_init_mult
size_t decided_large_page_size = os::win32::large_page_init_decide_size();
EXPECT_GT(decided_large_page_size, default_page_size) << "Large page size should be greater than the default page size for LargePageSizeInBytes = 4 * min_size";
+#if !defined(IA32)
size_t page_size_count = 0;
size_t page_size = os::page_sizes().largest();
@@ -772,6 +773,7 @@ TEST_VM(os_windows, large_page_init_mult
EXPECT_TRUE(page_size % min_size == 0) << "Each page size should be a multiple of the minimum large page size.";
EXPECT_LE(page_size, large_page_size) << "Page size should not exceed the determined large page size.";
}
+#endif
}
TEST_VM(os_windows, large_page_init_decide_size) {
@@ -807,11 +809,11 @@ TEST_VM(os_windows, large_page_init_deci
EXPECT_EQ(decided_size, 2 * M) << "Expected decided size to be 2M when large page is 1M and OS reported size is 2M";
}
-#if defined(AMD64)
+#if defined(IA32) || defined(AMD64)
FLAG_SET_CMDLINE(LargePageSizeInBytes, 5 * M); // Set large page size to 5MB
if (!EnableAllLargePageSizesForWindows) {
decided_size = os::win32::large_page_init_decide_size(); // Recalculate decided size
- EXPECT_EQ(decided_size, 0) << "Expected decided size to be 0 for large pages bigger than 4mb on AMD64";
+ EXPECT_EQ(decided_size, 0) << "Expected decided size to be 0 for large pages bigger than 4mb on IA32 or AMD64";
}
#endif
|