1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "prims/jvm.h"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/defaultStream.hpp"
37 #include "utilities/globalDefinitions.hpp"
38 #include "vm_version_ppc.hpp"
39
40 # include <sys/sysinfo.h>
41
42 bool VM_Version::_is_determine_features_test_running = false;
43 uint64_t VM_Version::_dscr_val = 0;
44
45 #define MSG(flag) \
46 if (flag && !FLAG_IS_DEFAULT(flag)) \
47 jio_fprintf(defaultStream::error_stream(), \
48 "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
49 " -XX:+" #flag " will be disabled!\n");
50
51 void VM_Version::initialize() {
52
53 // Test which instructions are supported and measure cache line size.
54 determine_features();
55
56 // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
57 if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
58 if (VM_Version::has_lqarx()) {
59 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
60 } else if (VM_Version::has_popcntw()) {
61 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
62 } else if (VM_Version::has_cmpb()) {
63 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
64 } else if (VM_Version::has_popcntb()) {
65 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
66 } else {
67 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
68 }
69 }
70
71 bool PowerArchitecturePPC64_ok = false;
72 switch (PowerArchitecturePPC64) {
73 case 8: if (!VM_Version::has_lqarx() ) break;
74 case 7: if (!VM_Version::has_popcntw()) break;
75 case 6: if (!VM_Version::has_cmpb() ) break;
76 case 5: if (!VM_Version::has_popcntb()) break;
77 case 0: PowerArchitecturePPC64_ok = true; break;
78 default: break;
79 }
80 guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
81 UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
82
83 // Power 8: Configure Data Stream Control Register.
84 if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) {
85 config_dscr();
86 }
87
88 if (!UseSIGTRAP) {
89 MSG(TrapBasedICMissChecks);
90 MSG(TrapBasedNotEntrantChecks);
91 MSG(TrapBasedNullChecks);
92 FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
93 FLAG_SET_ERGO(bool, TrapBasedNullChecks, false);
94 FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false);
95 }
96
97 #ifdef COMPILER2
98 if (!UseSIGTRAP) {
99 MSG(TrapBasedRangeChecks);
100 FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
101 }
102
103 // On Power6 test for section size.
104 if (PowerArchitecturePPC64 == 6) {
105 determine_section_size();
106 // TODO: PPC port } else {
107 // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
108 }
109
110 MaxVectorSize = 8;
111 #endif
112
113 // Create and print feature-string.
114 char buf[(num_features+1) * 16]; // Max 16 chars per feature.
115 jio_snprintf(buf, sizeof(buf),
116 "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
117 (has_fsqrt() ? " fsqrt" : ""),
118 (has_isel() ? " isel" : ""),
119 (has_lxarxeh() ? " lxarxeh" : ""),
120 (has_cmpb() ? " cmpb" : ""),
121 //(has_mftgpr()? " mftgpr" : ""),
122 (has_popcntb() ? " popcntb" : ""),
123 (has_popcntw() ? " popcntw" : ""),
124 (has_fcfids() ? " fcfids" : ""),
125 (has_vand() ? " vand" : ""),
126 (has_lqarx() ? " lqarx" : ""),
127 (has_vcipher() ? " aes" : ""),
128 (has_vpmsumb() ? " vpmsumb" : ""),
129 (has_tcheck() ? " tcheck" : ""),
130 (has_mfdscr() ? " mfdscr" : ""),
131 (has_vsx() ? " vsx" : ""),
132 (has_ldbrx() ? " ldbrx" : ""),
133 (has_stdbrx() ? " stdbrx" : ""),
134 (has_vshasig() ? " sha" : "")
135 // Make sure number of %s matches num_features!
136 );
137 _features_string = os::strdup(buf);
138 if (Verbose) {
139 print_features();
140 }
141
142 // PPC64 supports 8-byte compare-exchange operations (see
143 // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
144 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
145 _supports_cx8 = true;
146
147 // Used by C1.
148 _supports_atomic_getset4 = true;
149 _supports_atomic_getadd4 = true;
150 _supports_atomic_getset8 = true;
151 _supports_atomic_getadd8 = true;
152
153 UseSSE = 0; // Only on x86 and x64
154
155 intx cache_line_size = L1_data_cache_line_size();
156
157 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
158
159 if (AllocatePrefetchStyle == 4) {
160 AllocatePrefetchStepSize = cache_line_size; // Need exact value.
161 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
162 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
163 } else {
164 if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
165 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
166 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
167 }
168
169 assert(AllocatePrefetchLines > 0, "invalid value");
170 if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
171 AllocatePrefetchLines = 1; // Conservative value.
172 }
173
174 if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
175 AllocatePrefetchStyle = 1; // Fall back if inappropriate.
176 }
177
178 assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
179
180 // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
181 // the implementation uses the vector instructions available with Power8.
182 // In all other cases, the implementation uses only generally available instructions.
183 if (!UseCRC32Intrinsics) {
184 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
185 FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
186 }
187 }
188
189 // Implementation does not use any of the vector instructions available with Power8.
190 // Their exploitation is still pending (aka "work in progress").
191 if (!UseCRC32CIntrinsics) {
192 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
193 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
194 }
195 }
196
197 // TODO: Provide implementation.
198 if (UseAdler32Intrinsics) {
199 warning("Adler32Intrinsics not available on this CPU.");
200 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
201 }
202
203 // The AES intrinsic stubs require AES instruction support.
204 #if defined(VM_LITTLE_ENDIAN)
205 if (has_vcipher()) {
206 if (FLAG_IS_DEFAULT(UseAES)) {
207 UseAES = true;
208 }
209 } else if (UseAES) {
210 if (!FLAG_IS_DEFAULT(UseAES))
211 warning("AES instructions are not available on this CPU");
212 FLAG_SET_DEFAULT(UseAES, false);
213 }
214
215 if (UseAES && has_vcipher()) {
216 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
217 UseAESIntrinsics = true;
218 }
219 } else if (UseAESIntrinsics) {
220 if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
221 warning("AES intrinsics are not available on this CPU");
222 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
223 }
224
225 #else
226 if (UseAES) {
227 warning("AES instructions are not available on this CPU");
228 FLAG_SET_DEFAULT(UseAES, false);
229 }
230 if (UseAESIntrinsics) {
231 if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
232 warning("AES intrinsics are not available on this CPU");
233 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
234 }
235 #endif
236
237 if (UseAESCTRIntrinsics) {
238 warning("AES/CTR intrinsics are not available on this CPU");
239 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
240 }
241
242 if (UseGHASHIntrinsics) {
243 warning("GHASH intrinsics are not available on this CPU");
244 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
245 }
246
247 if (FLAG_IS_DEFAULT(UseFMA)) {
248 FLAG_SET_DEFAULT(UseFMA, true);
249 }
250
251 if (has_vshasig()) {
252 if (FLAG_IS_DEFAULT(UseSHA)) {
253 UseSHA = true;
254 }
255 } else if (UseSHA) {
256 if (!FLAG_IS_DEFAULT(UseSHA))
257 warning("SHA instructions are not available on this CPU");
258 FLAG_SET_DEFAULT(UseSHA, false);
259 }
260
261 if (UseSHA1Intrinsics) {
262 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
263 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
264 }
265
266 if (UseSHA && has_vshasig()) {
267 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
268 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
269 }
270 } else if (UseSHA256Intrinsics) {
271 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
272 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
273 }
274
275 if (UseSHA && has_vshasig()) {
276 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
277 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
278 }
279 } else if (UseSHA512Intrinsics) {
280 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
281 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
282 }
283
284 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
285 FLAG_SET_DEFAULT(UseSHA, false);
286 }
287
288 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
289 UseMultiplyToLenIntrinsic = true;
290 }
291 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
292 UseMontgomeryMultiplyIntrinsic = true;
293 }
294 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
295 UseMontgomerySquareIntrinsic = true;
296 }
297
298 if (UseVectorizedMismatchIntrinsic) {
299 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
300 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
301 }
302
303
304 // Adjust RTM (Restricted Transactional Memory) flags.
305 if (UseRTMLocking) {
306 // If CPU or OS are too old:
307 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
308 // setting during arguments processing. See use_biased_locking().
309 // VM_Version_init() is executed after UseBiasedLocking is used
310 // in Thread::allocate().
311 if (!has_tcheck()) {
312 vm_exit_during_initialization("RTM instructions are not available on this CPU");
313 }
314 bool os_too_old = true;
315 #ifdef AIX
316 // Actually, this is supported since AIX 7.1.. Unfortunately, this first
317 // contained bugs, so that it can only be enabled after AIX 7.1.3.30.
318 // The Java property os.version, which is used in RTM tests to decide
319 // whether the feature is available, only knows major and minor versions.
320 // We don't want to change this property, as user code might depend on it.
321 // So the tests can not check on subversion 3.30, and we only enable RTM
322 // with AIX 7.2.
323 if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2.
324 os_too_old = false;
325 }
326 #endif
327 #ifdef LINUX
328 // At least Linux kernel 4.2, as the problematic behavior of syscalls
329 // being called in the middle of a transaction has been addressed.
330 // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
331 // in Linux kernel source tree: https://goo.gl/Kc5i7A
332 if (os::Linux::os_version_is_known()) {
333 if (os::Linux::os_version() >= 0x040200)
334 os_too_old = false;
335 } else {
336 vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
337 }
338 #endif
339 if (os_too_old) {
340 vm_exit_during_initialization("RTM is not supported on this OS version.");
341 }
342 }
343
344 if (UseRTMLocking) {
345 #if INCLUDE_RTM_OPT
346 if (!UnlockExperimentalVMOptions) {
347 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
348 "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
349 } else {
350 warning("UseRTMLocking is only available as experimental option on this platform.");
351 }
352 if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
353 // RTM locking should be used only for applications with
354 // high lock contention. For now we do not use it by default.
355 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
356 }
357 #else
358 // Only C2 does RTM locking optimization.
359 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
360 // setting during arguments processing. See use_biased_locking().
361 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
362 #endif
363 } else { // !UseRTMLocking
364 if (UseRTMForStackLocks) {
365 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
366 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
367 }
368 FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
369 }
370 if (UseRTMDeopt) {
371 FLAG_SET_DEFAULT(UseRTMDeopt, false);
372 }
373 if (PrintPreciseRTMLockingStatistics) {
374 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
375 }
376 }
377
378 // This machine allows unaligned memory accesses
379 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
380 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
381 }
382 }
383
384 bool VM_Version::use_biased_locking() {
385 #if INCLUDE_RTM_OPT
386 // RTM locking is most useful when there is high lock contention and
387 // low data contention. With high lock contention the lock is usually
388 // inflated and biased locking is not suitable for that case.
389 // RTM locking code requires that biased locking is off.
390 // Note: we can't switch off UseBiasedLocking in get_processor_features()
391 // because it is used by Thread::allocate() which is called before
392 // VM_Version::initialize().
393 if (UseRTMLocking && UseBiasedLocking) {
394 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
395 FLAG_SET_DEFAULT(UseBiasedLocking, false);
396 } else {
397 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
398 UseBiasedLocking = false;
399 }
400 }
401 #endif
402 return UseBiasedLocking;
403 }
404
405 void VM_Version::print_features() {
406 tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
407 }
408
409 #ifdef COMPILER2
410 // Determine section size on power6: If section size is 8 instructions,
411 // there should be a difference between the two testloops of ~15 %. If
412 // no difference is detected the section is assumed to be 32 instructions.
413 void VM_Version::determine_section_size() {
414
415 int unroll = 80;
416
417 const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
418
419 // Allocate space for the code.
420 ResourceMark rm;
421 CodeBuffer cb("detect_section_size", code_size, 0);
422 MacroAssembler* a = new MacroAssembler(&cb);
423
424 uint32_t *code = (uint32_t *)a->pc();
425 // Emit code.
426 void (*test1)() = (void(*)())(void *)a->function_entry();
427
428 Label l1;
429
430 a->li(R4, 1);
431 a->sldi(R4, R4, 28);
432 a->b(l1);
433 a->align(CodeEntryAlignment);
434
435 a->bind(l1);
436
437 for (int i = 0; i < unroll; i++) {
438 // Schleife 1
439 // ------- sector 0 ------------
440 // ;; 0
441 a->nop(); // 1
442 a->fpnop0(); // 2
443 a->fpnop1(); // 3
444 a->addi(R4,R4, -1); // 4
445
446 // ;; 1
447 a->nop(); // 5
448 a->fmr(F6, F6); // 6
449 a->fmr(F7, F7); // 7
450 a->endgroup(); // 8
451 // ------- sector 8 ------------
452
453 // ;; 2
454 a->nop(); // 9
455 a->nop(); // 10
456 a->fmr(F8, F8); // 11
457 a->fmr(F9, F9); // 12
458
459 // ;; 3
460 a->nop(); // 13
461 a->fmr(F10, F10); // 14
462 a->fmr(F11, F11); // 15
463 a->endgroup(); // 16
464 // -------- sector 16 -------------
465
466 // ;; 4
467 a->nop(); // 17
468 a->nop(); // 18
469 a->fmr(F15, F15); // 19
470 a->fmr(F16, F16); // 20
471
472 // ;; 5
473 a->nop(); // 21
474 a->fmr(F17, F17); // 22
475 a->fmr(F18, F18); // 23
476 a->endgroup(); // 24
477 // ------- sector 24 ------------
478
479 // ;; 6
480 a->nop(); // 25
481 a->nop(); // 26
482 a->fmr(F19, F19); // 27
483 a->fmr(F20, F20); // 28
484
485 // ;; 7
486 a->nop(); // 29
487 a->fmr(F21, F21); // 30
488 a->fmr(F22, F22); // 31
489 a->brnop0(); // 32
490
491 // ------- sector 32 ------------
492 }
493
494 // ;; 8
495 a->cmpdi(CCR0, R4, unroll); // 33
496 a->bge(CCR0, l1); // 34
497 a->blr();
498
499 // Emit code.
500 void (*test2)() = (void(*)())(void *)a->function_entry();
501 // uint32_t *code = (uint32_t *)a->pc();
502
503 Label l2;
504
505 a->li(R4, 1);
506 a->sldi(R4, R4, 28);
507 a->b(l2);
508 a->align(CodeEntryAlignment);
509
510 a->bind(l2);
511
512 for (int i = 0; i < unroll; i++) {
513 // Schleife 2
514 // ------- sector 0 ------------
515 // ;; 0
516 a->brnop0(); // 1
517 a->nop(); // 2
518 //a->cmpdi(CCR0, R4, unroll);
519 a->fpnop0(); // 3
520 a->fpnop1(); // 4
521 a->addi(R4,R4, -1); // 5
522
523 // ;; 1
524
525 a->nop(); // 6
526 a->fmr(F6, F6); // 7
527 a->fmr(F7, F7); // 8
528 // ------- sector 8 ---------------
529
530 // ;; 2
531 a->endgroup(); // 9
532
533 // ;; 3
534 a->nop(); // 10
535 a->nop(); // 11
536 a->fmr(F8, F8); // 12
537
538 // ;; 4
539 a->fmr(F9, F9); // 13
540 a->nop(); // 14
541 a->fmr(F10, F10); // 15
542
543 // ;; 5
544 a->fmr(F11, F11); // 16
545 // -------- sector 16 -------------
546
547 // ;; 6
548 a->endgroup(); // 17
549
550 // ;; 7
551 a->nop(); // 18
552 a->nop(); // 19
553 a->fmr(F15, F15); // 20
554
555 // ;; 8
556 a->fmr(F16, F16); // 21
557 a->nop(); // 22
558 a->fmr(F17, F17); // 23
559
560 // ;; 9
561 a->fmr(F18, F18); // 24
562 // -------- sector 24 -------------
563
564 // ;; 10
565 a->endgroup(); // 25
566
567 // ;; 11
568 a->nop(); // 26
569 a->nop(); // 27
570 a->fmr(F19, F19); // 28
571
572 // ;; 12
573 a->fmr(F20, F20); // 29
574 a->nop(); // 30
575 a->fmr(F21, F21); // 31
576
577 // ;; 13
578 a->fmr(F22, F22); // 32
579 }
580
581 // -------- sector 32 -------------
582 // ;; 14
583 a->cmpdi(CCR0, R4, unroll); // 33
584 a->bge(CCR0, l2); // 34
585
586 a->blr();
587 uint32_t *code_end = (uint32_t *)a->pc();
588 a->flush();
589
590 double loop1_seconds,loop2_seconds, rel_diff;
591 uint64_t start1, stop1;
592
593 start1 = os::current_thread_cpu_time(false);
594 (*test1)();
595 stop1 = os::current_thread_cpu_time(false);
596 loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
597
598
599 start1 = os::current_thread_cpu_time(false);
600 (*test2)();
601 stop1 = os::current_thread_cpu_time(false);
602
603 loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
604
605 rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
606
607 if (PrintAssembly) {
608 ttyLocker ttyl;
609 tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
610 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
611 tty->print_cr("Time loop1 :%f", loop1_seconds);
612 tty->print_cr("Time loop2 :%f", loop2_seconds);
613 tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
614
615 if (rel_diff > 12.0) {
616 tty->print_cr("Section Size 8 Instructions");
617 } else{
618 tty->print_cr("Section Size 32 Instructions or Power5");
619 }
620 }
621
622 #if 0 // TODO: PPC port
623 // Set sector size (if not set explicitly).
624 if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
625 if (rel_diff > 12.0) {
626 PdScheduling::power6SectorSize = 0x20;
627 } else {
628 PdScheduling::power6SectorSize = 0x80;
629 }
630 } else if (Power6SectorSize128PPC64) {
631 PdScheduling::power6SectorSize = 0x80;
632 } else {
633 PdScheduling::power6SectorSize = 0x20;
634 }
635 #endif
636 if (UsePower6SchedulerPPC64) Unimplemented();
637 }
638 #endif // COMPILER2
639
640 void VM_Version::determine_features() {
641 #if defined(ABI_ELFv2)
642 // 1 InstWord per call for the blr instruction.
643 const int code_size = (num_features+1+2*1)*BytesPerInstWord;
644 #else
645 // 7 InstWords for each call (function descriptor + blr instruction).
646 const int code_size = (num_features+1+2*7)*BytesPerInstWord;
647 #endif
648 int features = 0;
649
650 // create test area
651 enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
652 char test_area[BUFFER_SIZE];
653 char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
654
655 // Allocate space for the code.
656 ResourceMark rm;
657 CodeBuffer cb("detect_cpu_features", code_size, 0);
658 MacroAssembler* a = new MacroAssembler(&cb);
659
660 // Must be set to true so we can generate the test code.
661 _features = VM_Version::all_features_m;
662
663 // Emit code.
664 void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
665 uint32_t *code = (uint32_t *)a->pc();
666 // Don't use R0 in ldarx.
667 // Keep R3_ARG1 unmodified, it contains &field (see below).
668 // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
669 a->fsqrt(F3, F4); // code[0] -> fsqrt_m
670 a->fsqrts(F3, F4); // code[1] -> fsqrts_m
671 a->isel(R7, R5, R6, 0); // code[2] -> isel_m
672 a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
673 a->cmpb(R7, R5, R6); // code[4] -> cmpb
674 a->popcntb(R7, R5); // code[5] -> popcntb
675 a->popcntw(R7, R5); // code[6] -> popcntw
676 a->fcfids(F3, F4); // code[7] -> fcfids
677 a->vand(VR0, VR0, VR0); // code[8] -> vand
678 // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
679 a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
680 a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
681 a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
682 a->tcheck(0); // code[12] -> tcheck
683 a->mfdscr(R0); // code[13] -> mfdscr
684 a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
685 a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
686 a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
687 a->vshasigmaw(VR0, VR1, 1, 0xF); // code[17] -> vshasig
688 a->blr();
689
690 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
691 void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
692 a->dcbz(R3_ARG1); // R3_ARG1 = addr
693 a->blr();
694
695 uint32_t *code_end = (uint32_t *)a->pc();
696 a->flush();
697 _features = VM_Version::unknown_m;
698
699 // Print the detection code.
700 if (PrintAssembly) {
701 ttyLocker ttyl;
702 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
703 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
704 }
705
706 // Measure cache line size.
707 memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
708 (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
709 int count = 0; // count zeroed bytes
710 for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
711 guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
712 _L1_data_cache_line_size = count;
713
714 // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
715 VM_Version::_is_determine_features_test_running = true;
716 // We must align the first argument to 16 bytes because of the lqarx check.
717 (*test)(align_up((address)mid_of_test_area, 16), 0);
718 VM_Version::_is_determine_features_test_running = false;
719
720 // determine which instructions are legal.
721 int feature_cntr = 0;
722 if (code[feature_cntr++]) features |= fsqrt_m;
723 if (code[feature_cntr++]) features |= fsqrts_m;
724 if (code[feature_cntr++]) features |= isel_m;
725 if (code[feature_cntr++]) features |= lxarxeh_m;
726 if (code[feature_cntr++]) features |= cmpb_m;
727 if (code[feature_cntr++]) features |= popcntb_m;
728 if (code[feature_cntr++]) features |= popcntw_m;
729 if (code[feature_cntr++]) features |= fcfids_m;
730 if (code[feature_cntr++]) features |= vand_m;
731 if (code[feature_cntr++]) features |= lqarx_m;
732 if (code[feature_cntr++]) features |= vcipher_m;
733 if (code[feature_cntr++]) features |= vpmsumb_m;
734 if (code[feature_cntr++]) features |= tcheck_m;
735 if (code[feature_cntr++]) features |= mfdscr_m;
736 if (code[feature_cntr++]) features |= vsx_m;
737 if (code[feature_cntr++]) features |= ldbrx_m;
738 if (code[feature_cntr++]) features |= stdbrx_m;
739 if (code[feature_cntr++]) features |= vshasig_m;
740
741 // Print the detection code.
742 if (PrintAssembly) {
743 ttyLocker ttyl;
744 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
745 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
746 }
747
748 _features = features;
749 }
750
751 // Power 8: Configure Data Stream Control Register.
752 void VM_Version::config_dscr() {
753 // 7 InstWords for each call (function descriptor + blr instruction).
754 const int code_size = (2+2*7)*BytesPerInstWord;
755
756 // Allocate space for the code.
757 ResourceMark rm;
758 CodeBuffer cb("config_dscr", code_size, 0);
759 MacroAssembler* a = new MacroAssembler(&cb);
760
761 // Emit code.
762 uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
763 uint32_t *code = (uint32_t *)a->pc();
764 a->mfdscr(R3);
765 a->blr();
766
767 void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
768 a->mtdscr(R3);
769 a->blr();
770
771 uint32_t *code_end = (uint32_t *)a->pc();
772 a->flush();
773
774 // Print the detection code.
775 if (PrintAssembly) {
776 ttyLocker ttyl;
777 tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
778 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
779 }
780
781 // Apply the configuration if needed.
782 _dscr_val = (*get_dscr)();
783 if (Verbose) {
784 tty->print_cr("dscr value was 0x%lx" , _dscr_val);
785 }
786 bool change_requested = false;
787 if (DSCR_PPC64 != (uintx)-1) {
788 _dscr_val = DSCR_PPC64;
789 change_requested = true;
790 }
791 if (DSCR_DPFD_PPC64 <= 7) {
792 uint64_t mask = 0x7;
793 if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
794 _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
795 change_requested = true;
796 }
797 }
798 if (DSCR_URG_PPC64 <= 7) {
799 uint64_t mask = 0x7 << 6;
800 if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
801 _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
802 change_requested = true;
803 }
804 }
805 if (change_requested) {
806 (*set_dscr)(_dscr_val);
807 if (Verbose) {
808 tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
809 }
810 }
811 }
812
813 static uint64_t saved_features = 0;
814
815 void VM_Version::allow_all() {
816 saved_features = _features;
817 _features = all_features_m;
818 }
819
820 void VM_Version::revert() {
821 _features = saved_features;
822 }