1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "prims/jvm.h"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/defaultStream.hpp"
37 #include "utilities/globalDefinitions.hpp"
38 #include "vm_version_ppc.hpp"
39
40 # include <sys/sysinfo.h>
41
42 bool VM_Version::_is_determine_features_test_running = false;
43 uint64_t VM_Version::_dscr_val = 0;
44
45 #define MSG(flag) \
46 if (flag && !FLAG_IS_DEFAULT(flag)) \
47 jio_fprintf(defaultStream::error_stream(), \
48 "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
49 " -XX:+" #flag " will be disabled!\n");
50
51 void VM_Version::initialize() {
52
53 // Test which instructions are supported and measure cache line size.
54 determine_features();
55
56 // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
57 if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
58 if (VM_Version::has_lqarx()) {
59 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
60 } else if (VM_Version::has_popcntw()) {
61 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
62 } else if (VM_Version::has_cmpb()) {
63 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
64 } else if (VM_Version::has_popcntb()) {
65 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
66 } else {
67 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
68 }
69 }
70
71 bool PowerArchitecturePPC64_ok = false;
72 switch (PowerArchitecturePPC64) {
73 case 8: if (!VM_Version::has_lqarx() ) break;
74 case 7: if (!VM_Version::has_popcntw()) break;
75 case 6: if (!VM_Version::has_cmpb() ) break;
76 case 5: if (!VM_Version::has_popcntb()) break;
77 case 0: PowerArchitecturePPC64_ok = true; break;
78 default: break;
79 }
80 guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
81 UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
82
83 // Power 8: Configure Data Stream Control Register.
84 if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) {
85 config_dscr();
86 }
87
88 if (!UseSIGTRAP) {
89 MSG(TrapBasedICMissChecks);
90 MSG(TrapBasedNotEntrantChecks);
91 MSG(TrapBasedNullChecks);
92 FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
93 FLAG_SET_ERGO(bool, TrapBasedNullChecks, false);
94 FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false);
95 }
96
97 #ifdef COMPILER2
98 if (!UseSIGTRAP) {
99 MSG(TrapBasedRangeChecks);
100 FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
101 }
102
103 // On Power6 test for section size.
104 if (PowerArchitecturePPC64 == 6) {
105 determine_section_size();
106 // TODO: PPC port } else {
107 // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
108 }
109
110 MaxVectorSize = 8;
111 #endif
112
113 // Create and print feature-string.
114 char buf[(num_features+1) * 16]; // Max 16 chars per feature.
115 jio_snprintf(buf, sizeof(buf),
116 "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
117 (has_fsqrt() ? " fsqrt" : ""),
118 (has_isel() ? " isel" : ""),
119 (has_lxarxeh() ? " lxarxeh" : ""),
120 (has_cmpb() ? " cmpb" : ""),
121 //(has_mftgpr()? " mftgpr" : ""),
122 (has_popcntb() ? " popcntb" : ""),
123 (has_popcntw() ? " popcntw" : ""),
124 (has_fcfids() ? " fcfids" : ""),
125 (has_vand() ? " vand" : ""),
126 (has_lqarx() ? " lqarx" : ""),
127 (has_vcipher() ? " aes" : ""),
128 (has_vpmsumb() ? " vpmsumb" : ""),
129 (has_tcheck() ? " tcheck" : ""),
130 (has_mfdscr() ? " mfdscr" : ""),
131 (has_vsx() ? " vsx" : ""),
132 (has_ldbrx() ? " ldbrx" : ""),
133 (has_stdbrx() ? " stdbrx" : "")
134 // Make sure number of %s matches num_features!
135 );
136 _features_string = os::strdup(buf);
137 if (Verbose) {
138 print_features();
139 }
140
141 // PPC64 supports 8-byte compare-exchange operations (see
142 // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
143 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
144 _supports_cx8 = true;
145
146 // Used by C1.
147 _supports_atomic_getset4 = true;
148 _supports_atomic_getadd4 = true;
149 _supports_atomic_getset8 = true;
150 _supports_atomic_getadd8 = true;
151
152 UseSSE = 0; // Only on x86 and x64
153
154 intx cache_line_size = L1_data_cache_line_size();
155
156 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
157
158 if (AllocatePrefetchStyle == 4) {
159 AllocatePrefetchStepSize = cache_line_size; // Need exact value.
160 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
161 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
162 } else {
163 if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
164 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
165 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
166 }
167
168 assert(AllocatePrefetchLines > 0, "invalid value");
169 if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
170 AllocatePrefetchLines = 1; // Conservative value.
171 }
172
173 if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
174 AllocatePrefetchStyle = 1; // Fall back if inappropriate.
175 }
176
177 assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
178
179 // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
180 // the implementation uses the vector instructions available with Power8.
181 // In all other cases, the implementation uses only generally available instructions.
182 if (!UseCRC32Intrinsics) {
183 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
184 FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
185 }
186 }
187
188 // Implementation does not use any of the vector instructions available with Power8.
189 // Their exploitation is still pending (aka "work in progress").
190 if (!UseCRC32CIntrinsics) {
191 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
192 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
193 }
194 }
195
196 // TODO: Provide implementation.
197 if (UseAdler32Intrinsics) {
198 warning("Adler32Intrinsics not available on this CPU.");
199 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
200 }
201
202 // The AES intrinsic stubs require AES instruction support.
203 #if defined(VM_LITTLE_ENDIAN)
204 if (has_vcipher()) {
205 if (FLAG_IS_DEFAULT(UseAES)) {
206 UseAES = true;
207 }
208 } else if (UseAES) {
209 if (!FLAG_IS_DEFAULT(UseAES))
210 warning("AES instructions are not available on this CPU");
211 FLAG_SET_DEFAULT(UseAES, false);
212 }
213
214 if (UseAES && has_vcipher()) {
215 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
216 UseAESIntrinsics = true;
217 }
218 } else if (UseAESIntrinsics) {
219 if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
220 warning("AES intrinsics are not available on this CPU");
221 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
222 }
223
224 #else
225 if (UseAES) {
226 warning("AES instructions are not available on this CPU");
227 FLAG_SET_DEFAULT(UseAES, false);
228 }
229 if (UseAESIntrinsics) {
230 if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
231 warning("AES intrinsics are not available on this CPU");
232 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
233 }
234 #endif
235
236 if (UseAESCTRIntrinsics) {
237 warning("AES/CTR intrinsics are not available on this CPU");
238 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
239 }
240
241 if (UseGHASHIntrinsics) {
242 warning("GHASH intrinsics are not available on this CPU");
243 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
244 }
245
246 if (FLAG_IS_DEFAULT(UseFMA)) {
247 FLAG_SET_DEFAULT(UseFMA, true);
248 }
249
250 if (UseSHA) {
251 warning("SHA instructions are not available on this CPU");
252 FLAG_SET_DEFAULT(UseSHA, false);
253 }
254 if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
255 warning("SHA intrinsics are not available on this CPU");
256 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
257 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
258 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
259 }
260
261 #if defined(VM_LITTLE_ENDIAN)
262 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
263 UseSquareToLenIntrinsic = true;
264 }
265 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
266 UseMulAddIntrinsic = true;
267 }
268 #endif
269 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
270 UseMultiplyToLenIntrinsic = true;
271 }
272 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
273 UseMontgomeryMultiplyIntrinsic = true;
274 }
275 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
276 UseMontgomerySquareIntrinsic = true;
277 }
278
279 if (UseVectorizedMismatchIntrinsic) {
280 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
281 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
282 }
283
284
285 // Adjust RTM (Restricted Transactional Memory) flags.
286 if (UseRTMLocking) {
287 // If CPU or OS are too old:
288 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
289 // setting during arguments processing. See use_biased_locking().
290 // VM_Version_init() is executed after UseBiasedLocking is used
291 // in Thread::allocate().
292 if (!has_tcheck()) {
293 vm_exit_during_initialization("RTM instructions are not available on this CPU");
294 }
295 bool os_too_old = true;
296 #ifdef AIX
297 // Actually, this is supported since AIX 7.1.. Unfortunately, this first
298 // contained bugs, so that it can only be enabled after AIX 7.1.3.30.
299 // The Java property os.version, which is used in RTM tests to decide
300 // whether the feature is available, only knows major and minor versions.
301 // We don't want to change this property, as user code might depend on it.
302 // So the tests can not check on subversion 3.30, and we only enable RTM
303 // with AIX 7.2.
304 if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2.
305 os_too_old = false;
306 }
307 #endif
308 #ifdef LINUX
309 // At least Linux kernel 4.2, as the problematic behavior of syscalls
310 // being called in the middle of a transaction has been addressed.
311 // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
312 // in Linux kernel source tree: https://goo.gl/Kc5i7A
313 if (os::Linux::os_version_is_known()) {
314 if (os::Linux::os_version() >= 0x040200)
315 os_too_old = false;
316 } else {
317 vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
318 }
319 #endif
320 if (os_too_old) {
321 vm_exit_during_initialization("RTM is not supported on this OS version.");
322 }
323 }
324
325 if (UseRTMLocking) {
326 #if INCLUDE_RTM_OPT
327 if (!UnlockExperimentalVMOptions) {
328 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
329 "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
330 } else {
331 warning("UseRTMLocking is only available as experimental option on this platform.");
332 }
333 if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
334 // RTM locking should be used only for applications with
335 // high lock contention. For now we do not use it by default.
336 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
337 }
338 #else
339 // Only C2 does RTM locking optimization.
340 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
341 // setting during arguments processing. See use_biased_locking().
342 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
343 #endif
344 } else { // !UseRTMLocking
345 if (UseRTMForStackLocks) {
346 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
347 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
348 }
349 FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
350 }
351 if (UseRTMDeopt) {
352 FLAG_SET_DEFAULT(UseRTMDeopt, false);
353 }
354 if (PrintPreciseRTMLockingStatistics) {
355 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
356 }
357 }
358
359 // This machine allows unaligned memory accesses
360 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
361 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
362 }
363 }
364
365 bool VM_Version::use_biased_locking() {
366 #if INCLUDE_RTM_OPT
367 // RTM locking is most useful when there is high lock contention and
368 // low data contention. With high lock contention the lock is usually
369 // inflated and biased locking is not suitable for that case.
370 // RTM locking code requires that biased locking is off.
371 // Note: we can't switch off UseBiasedLocking in get_processor_features()
372 // because it is used by Thread::allocate() which is called before
373 // VM_Version::initialize().
374 if (UseRTMLocking && UseBiasedLocking) {
375 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
376 FLAG_SET_DEFAULT(UseBiasedLocking, false);
377 } else {
378 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
379 UseBiasedLocking = false;
380 }
381 }
382 #endif
383 return UseBiasedLocking;
384 }
385
386 void VM_Version::print_features() {
387 tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
388 }
389
390 #ifdef COMPILER2
391 // Determine section size on power6: If section size is 8 instructions,
392 // there should be a difference between the two testloops of ~15 %. If
393 // no difference is detected the section is assumed to be 32 instructions.
394 void VM_Version::determine_section_size() {
395
396 int unroll = 80;
397
398 const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
399
400 // Allocate space for the code.
401 ResourceMark rm;
402 CodeBuffer cb("detect_section_size", code_size, 0);
403 MacroAssembler* a = new MacroAssembler(&cb);
404
405 uint32_t *code = (uint32_t *)a->pc();
406 // Emit code.
407 void (*test1)() = (void(*)())(void *)a->function_entry();
408
409 Label l1;
410
411 a->li(R4, 1);
412 a->sldi(R4, R4, 28);
413 a->b(l1);
414 a->align(CodeEntryAlignment);
415
416 a->bind(l1);
417
418 for (int i = 0; i < unroll; i++) {
419 // Schleife 1
420 // ------- sector 0 ------------
421 // ;; 0
422 a->nop(); // 1
423 a->fpnop0(); // 2
424 a->fpnop1(); // 3
425 a->addi(R4,R4, -1); // 4
426
427 // ;; 1
428 a->nop(); // 5
429 a->fmr(F6, F6); // 6
430 a->fmr(F7, F7); // 7
431 a->endgroup(); // 8
432 // ------- sector 8 ------------
433
434 // ;; 2
435 a->nop(); // 9
436 a->nop(); // 10
437 a->fmr(F8, F8); // 11
438 a->fmr(F9, F9); // 12
439
440 // ;; 3
441 a->nop(); // 13
442 a->fmr(F10, F10); // 14
443 a->fmr(F11, F11); // 15
444 a->endgroup(); // 16
445 // -------- sector 16 -------------
446
447 // ;; 4
448 a->nop(); // 17
449 a->nop(); // 18
450 a->fmr(F15, F15); // 19
451 a->fmr(F16, F16); // 20
452
453 // ;; 5
454 a->nop(); // 21
455 a->fmr(F17, F17); // 22
456 a->fmr(F18, F18); // 23
457 a->endgroup(); // 24
458 // ------- sector 24 ------------
459
460 // ;; 6
461 a->nop(); // 25
462 a->nop(); // 26
463 a->fmr(F19, F19); // 27
464 a->fmr(F20, F20); // 28
465
466 // ;; 7
467 a->nop(); // 29
468 a->fmr(F21, F21); // 30
469 a->fmr(F22, F22); // 31
470 a->brnop0(); // 32
471
472 // ------- sector 32 ------------
473 }
474
475 // ;; 8
476 a->cmpdi(CCR0, R4, unroll); // 33
477 a->bge(CCR0, l1); // 34
478 a->blr();
479
480 // Emit code.
481 void (*test2)() = (void(*)())(void *)a->function_entry();
482 // uint32_t *code = (uint32_t *)a->pc();
483
484 Label l2;
485
486 a->li(R4, 1);
487 a->sldi(R4, R4, 28);
488 a->b(l2);
489 a->align(CodeEntryAlignment);
490
491 a->bind(l2);
492
493 for (int i = 0; i < unroll; i++) {
494 // Schleife 2
495 // ------- sector 0 ------------
496 // ;; 0
497 a->brnop0(); // 1
498 a->nop(); // 2
499 //a->cmpdi(CCR0, R4, unroll);
500 a->fpnop0(); // 3
501 a->fpnop1(); // 4
502 a->addi(R4,R4, -1); // 5
503
504 // ;; 1
505
506 a->nop(); // 6
507 a->fmr(F6, F6); // 7
508 a->fmr(F7, F7); // 8
509 // ------- sector 8 ---------------
510
511 // ;; 2
512 a->endgroup(); // 9
513
514 // ;; 3
515 a->nop(); // 10
516 a->nop(); // 11
517 a->fmr(F8, F8); // 12
518
519 // ;; 4
520 a->fmr(F9, F9); // 13
521 a->nop(); // 14
522 a->fmr(F10, F10); // 15
523
524 // ;; 5
525 a->fmr(F11, F11); // 16
526 // -------- sector 16 -------------
527
528 // ;; 6
529 a->endgroup(); // 17
530
531 // ;; 7
532 a->nop(); // 18
533 a->nop(); // 19
534 a->fmr(F15, F15); // 20
535
536 // ;; 8
537 a->fmr(F16, F16); // 21
538 a->nop(); // 22
539 a->fmr(F17, F17); // 23
540
541 // ;; 9
542 a->fmr(F18, F18); // 24
543 // -------- sector 24 -------------
544
545 // ;; 10
546 a->endgroup(); // 25
547
548 // ;; 11
549 a->nop(); // 26
550 a->nop(); // 27
551 a->fmr(F19, F19); // 28
552
553 // ;; 12
554 a->fmr(F20, F20); // 29
555 a->nop(); // 30
556 a->fmr(F21, F21); // 31
557
558 // ;; 13
559 a->fmr(F22, F22); // 32
560 }
561
562 // -------- sector 32 -------------
563 // ;; 14
564 a->cmpdi(CCR0, R4, unroll); // 33
565 a->bge(CCR0, l2); // 34
566
567 a->blr();
568 uint32_t *code_end = (uint32_t *)a->pc();
569 a->flush();
570
571 double loop1_seconds,loop2_seconds, rel_diff;
572 uint64_t start1, stop1;
573
574 start1 = os::current_thread_cpu_time(false);
575 (*test1)();
576 stop1 = os::current_thread_cpu_time(false);
577 loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
578
579
580 start1 = os::current_thread_cpu_time(false);
581 (*test2)();
582 stop1 = os::current_thread_cpu_time(false);
583
584 loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
585
586 rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
587
588 if (PrintAssembly) {
589 ttyLocker ttyl;
590 tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
591 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
592 tty->print_cr("Time loop1 :%f", loop1_seconds);
593 tty->print_cr("Time loop2 :%f", loop2_seconds);
594 tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
595
596 if (rel_diff > 12.0) {
597 tty->print_cr("Section Size 8 Instructions");
598 } else{
599 tty->print_cr("Section Size 32 Instructions or Power5");
600 }
601 }
602
603 #if 0 // TODO: PPC port
604 // Set sector size (if not set explicitly).
605 if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
606 if (rel_diff > 12.0) {
607 PdScheduling::power6SectorSize = 0x20;
608 } else {
609 PdScheduling::power6SectorSize = 0x80;
610 }
611 } else if (Power6SectorSize128PPC64) {
612 PdScheduling::power6SectorSize = 0x80;
613 } else {
614 PdScheduling::power6SectorSize = 0x20;
615 }
616 #endif
617 if (UsePower6SchedulerPPC64) Unimplemented();
618 }
619 #endif // COMPILER2
620
621 void VM_Version::determine_features() {
622 #if defined(ABI_ELFv2)
623 // 1 InstWord per call for the blr instruction.
624 const int code_size = (num_features+1+2*1)*BytesPerInstWord;
625 #else
626 // 7 InstWords for each call (function descriptor + blr instruction).
627 const int code_size = (num_features+1+2*7)*BytesPerInstWord;
628 #endif
629 int features = 0;
630
631 // create test area
632 enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
633 char test_area[BUFFER_SIZE];
634 char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
635
636 // Allocate space for the code.
637 ResourceMark rm;
638 CodeBuffer cb("detect_cpu_features", code_size, 0);
639 MacroAssembler* a = new MacroAssembler(&cb);
640
641 // Must be set to true so we can generate the test code.
642 _features = VM_Version::all_features_m;
643
644 // Emit code.
645 void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
646 uint32_t *code = (uint32_t *)a->pc();
647 // Don't use R0 in ldarx.
648 // Keep R3_ARG1 unmodified, it contains &field (see below).
649 // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
650 a->fsqrt(F3, F4); // code[0] -> fsqrt_m
651 a->fsqrts(F3, F4); // code[1] -> fsqrts_m
652 a->isel(R7, R5, R6, 0); // code[2] -> isel_m
653 a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
654 a->cmpb(R7, R5, R6); // code[4] -> cmpb
655 a->popcntb(R7, R5); // code[5] -> popcntb
656 a->popcntw(R7, R5); // code[6] -> popcntw
657 a->fcfids(F3, F4); // code[7] -> fcfids
658 a->vand(VR0, VR0, VR0); // code[8] -> vand
659 // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
660 a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
661 a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
662 a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
663 a->tcheck(0); // code[12] -> tcheck
664 a->mfdscr(R0); // code[13] -> mfdscr
665 a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
666 a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
667 a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
668 a->blr();
669
670 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
671 void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
672 a->dcbz(R3_ARG1); // R3_ARG1 = addr
673 a->blr();
674
675 uint32_t *code_end = (uint32_t *)a->pc();
676 a->flush();
677 _features = VM_Version::unknown_m;
678
679 // Print the detection code.
680 if (PrintAssembly) {
681 ttyLocker ttyl;
682 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
683 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
684 }
685
686 // Measure cache line size.
687 memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
688 (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
689 int count = 0; // count zeroed bytes
690 for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
691 guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
692 _L1_data_cache_line_size = count;
693
694 // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
695 VM_Version::_is_determine_features_test_running = true;
696 // We must align the first argument to 16 bytes because of the lqarx check.
697 (*test)(align_up((address)mid_of_test_area, 16), 0);
698 VM_Version::_is_determine_features_test_running = false;
699
700 // determine which instructions are legal.
701 int feature_cntr = 0;
702 if (code[feature_cntr++]) features |= fsqrt_m;
703 if (code[feature_cntr++]) features |= fsqrts_m;
704 if (code[feature_cntr++]) features |= isel_m;
705 if (code[feature_cntr++]) features |= lxarxeh_m;
706 if (code[feature_cntr++]) features |= cmpb_m;
707 if (code[feature_cntr++]) features |= popcntb_m;
708 if (code[feature_cntr++]) features |= popcntw_m;
709 if (code[feature_cntr++]) features |= fcfids_m;
710 if (code[feature_cntr++]) features |= vand_m;
711 if (code[feature_cntr++]) features |= lqarx_m;
712 if (code[feature_cntr++]) features |= vcipher_m;
713 if (code[feature_cntr++]) features |= vpmsumb_m;
714 if (code[feature_cntr++]) features |= tcheck_m;
715 if (code[feature_cntr++]) features |= mfdscr_m;
716 if (code[feature_cntr++]) features |= vsx_m;
717 if (code[feature_cntr++]) features |= ldbrx_m;
718 if (code[feature_cntr++]) features |= stdbrx_m;
719
720 // Print the detection code.
721 if (PrintAssembly) {
722 ttyLocker ttyl;
723 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
724 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
725 }
726
727 _features = features;
728 }
729
730 // Power 8: Configure Data Stream Control Register.
731 void VM_Version::config_dscr() {
732 // 7 InstWords for each call (function descriptor + blr instruction).
733 const int code_size = (2+2*7)*BytesPerInstWord;
734
735 // Allocate space for the code.
736 ResourceMark rm;
737 CodeBuffer cb("config_dscr", code_size, 0);
738 MacroAssembler* a = new MacroAssembler(&cb);
739
740 // Emit code.
741 uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
742 uint32_t *code = (uint32_t *)a->pc();
743 a->mfdscr(R3);
744 a->blr();
745
746 void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
747 a->mtdscr(R3);
748 a->blr();
749
750 uint32_t *code_end = (uint32_t *)a->pc();
751 a->flush();
752
753 // Print the detection code.
754 if (PrintAssembly) {
755 ttyLocker ttyl;
756 tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
757 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
758 }
759
760 // Apply the configuration if needed.
761 _dscr_val = (*get_dscr)();
762 if (Verbose) {
763 tty->print_cr("dscr value was 0x%lx" , _dscr_val);
764 }
765 bool change_requested = false;
766 if (DSCR_PPC64 != (uintx)-1) {
767 _dscr_val = DSCR_PPC64;
768 change_requested = true;
769 }
770 if (DSCR_DPFD_PPC64 <= 7) {
771 uint64_t mask = 0x7;
772 if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
773 _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
774 change_requested = true;
775 }
776 }
777 if (DSCR_URG_PPC64 <= 7) {
778 uint64_t mask = 0x7 << 6;
779 if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
780 _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
781 change_requested = true;
782 }
783 }
784 if (change_requested) {
785 (*set_dscr)(_dscr_val);
786 if (Verbose) {
787 tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
788 }
789 }
790 }
791
792 static uint64_t saved_features = 0;
793
794 void VM_Version::allow_all() {
795 saved_features = _features;
796 _features = all_features_m;
797 }
798
799 void VM_Version::revert() {
800 _features = saved_features;
801 }