Line data Source code
1 : /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2 :
3 : Copyright (C) 1996-2013
4 : CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 : Copyright (C) 2009 Theobroma Systems Ltd.
6 :
7 : This file is part of CACAO.
8 :
9 : This program is free software; you can redistribute it and/or
10 : modify it under the terms of the GNU General Public License as
11 : published by the Free Software Foundation; either version 2, or (at
12 : your option) any later version.
13 :
14 : This program is distributed in the hope that it will be useful, but
15 : WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 : General Public License for more details.
18 :
19 : You should have received a copy of the GNU General Public License
20 : along with this program; if not, write to the Free Software
21 : Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 : 02110-1301, USA.
23 :
24 : All functions assume the following code area / data area layout:
25 :
26 : +-----------+
27 : | |
28 : | code area | code area grows to higher addresses
29 : | |
30 : +-----------+ <-- start of procedure
31 : | |
32 : | data area | data area grows to lower addresses
33 : | |
34 : +-----------+
35 :
36 : The functions first write into a temporary code/data area allocated by
37 : "codegen_init". "codegen_finish" copies the code and data area into permanent
38 : memory. All functions writing values into the data area return the offset
39 : relative the begin of the code area (start of procedure).
40 :
41 : */
42 :
43 :
44 : #include "config.h"
45 :
46 : #include <cassert>
47 : #include <cstring>
48 :
49 : #include "vm/types.hpp"
50 :
51 : #include "codegen.hpp"
52 : #include "md.hpp"
53 : #include "md-abi.hpp"
54 :
55 : #include "mm/codememory.hpp"
56 : #include "mm/memory.hpp"
57 :
58 : #include "toolbox/avl.hpp"
59 : #include "toolbox/list.hpp"
60 : #include "toolbox/logging.hpp"
61 :
62 : #include "native/llni.hpp"
63 : #include "native/localref.hpp"
64 : #include "native/native.hpp"
65 :
66 : #include "vm/descriptor.hpp"
67 : #include "vm/exceptions.hpp"
68 : #include "vm/field.hpp"
69 : #include "vm/options.hpp"
70 : #include "vm/statistics.hpp"
71 :
72 : #include "vm/jit/abi.hpp"
73 : #include "vm/jit/code.hpp"
74 : #include "vm/jit/codegen-common.hpp"
75 :
76 : #include "vm/jit/builtin.hpp"
77 : #include "vm/jit/dseg.hpp"
78 : #include "vm/jit/disass.hpp"
79 : #include "vm/jit/exceptiontable.hpp"
80 : #include "vm/jit/emit-common.hpp"
81 : #include "vm/jit/jit.hpp"
82 : #include "vm/jit/linenumbertable.hpp"
83 : #include "vm/jit/methodheader.hpp"
84 : #include "vm/jit/methodtree.hpp"
85 : #include "vm/jit/patcher-common.hpp"
86 : #include "vm/jit/replace.hpp"
87 : #include "vm/jit/show.hpp"
88 : #include "vm/jit/stacktrace.hpp"
89 : #include "vm/jit/trace.hpp"
90 :
91 : #include "vm/jit/optimizing/profile.hpp"
92 :
93 : #if defined(ENABLE_SSA)
94 : # include "vm/jit/optimizing/lsra.hpp"
95 : # include "vm/jit/optimizing/ssa.hpp"
96 : #elif defined(ENABLE_LSRA)
97 : # include "vm/jit/allocator/lsra.hpp"
98 : #endif
99 :
100 : #if defined(ENABLE_INTRP)
101 : #include "vm/jit/intrp/intrp.h"
102 : #endif
103 :
104 :
105 : STAT_REGISTER_VAR(int,count_branches_unresolved,0,"unresolved branches","unresolved branches")
106 : STAT_DECLARE_GROUP(function_call_stat)
107 : STAT_REGISTER_GROUP_VAR(u8,count_calls_java_to_native,0,"calls java to native","java-to-native calls",function_call_stat)
108 :
109 : STAT_REGISTER_GROUP(memory_stat,"mem. stat.","Memory usage")
110 : STAT_REGISTER_SUM_SUBGROUP(code_data_stat,"code data","Code and data usage",memory_stat)
111 : STAT_REGISTER_GROUP_VAR(int,count_code_len,0,"code len","code length",code_data_stat)
112 : STAT_REGISTER_GROUP_VAR(int,count_data_len,0,"data len","data length",code_data_stat)
113 :
114 : struct methodinfo;
115 :
116 : using namespace cacao;
117 :
118 :
119 : /* codegen_init ****************************************************************
120 :
121 : TODO
122 :
123 : *******************************************************************************/
124 :
125 163 : void codegen_init(void)
126 : {
127 163 : }
128 :
129 :
130 : /* codegen_setup ***************************************************************
131 :
132 : Allocates and initialises code area, data area and references.
133 :
134 : *******************************************************************************/
135 :
136 105324 : void codegen_setup(jitdata *jd)
137 : {
138 : //methodinfo *m;
139 : codegendata *cd;
140 :
141 : /* get required compiler data */
142 :
143 : //m = jd->m;
144 105324 : cd = jd->cd;
145 :
146 : /* initialize members */
147 :
148 : // Set flags as requested.
149 105324 : if (opt_AlwaysEmitLongBranches) {
150 0 : cd->flags = CODEGENDATA_FLAG_LONGBRANCHES;
151 : }
152 : else {
153 105324 : cd->flags = 0;
154 : }
155 :
156 105324 : cd->mcodebase = (u1*) DumpMemory::allocate(MCODEINITSIZE);
157 105324 : cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
158 105324 : cd->mcodesize = MCODEINITSIZE;
159 :
160 : /* initialize mcode variables */
161 :
162 105324 : cd->mcodeptr = cd->mcodebase;
163 105324 : cd->lastmcodeptr = cd->mcodebase;
164 :
165 : #if defined(ENABLE_INTRP)
166 : /* native dynamic superinstructions variables */
167 :
168 : if (opt_intrp) {
169 : cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
170 : cd->ncodesize = NCODEINITSIZE;
171 :
172 : /* initialize ncode variables */
173 :
174 : cd->ncodeptr = cd->ncodebase;
175 :
176 : cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
177 : cd->superstarts = NULL;
178 : }
179 : #endif
180 :
181 105324 : cd->dseg = NULL;
182 105324 : cd->dseglen = 0;
183 :
184 105324 : cd->jumpreferences = NULL;
185 :
186 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
187 105324 : cd->datareferences = NULL;
188 : #endif
189 :
190 105324 : cd->brancheslabel = new DumpList<branch_label_ref_t*>();
191 105324 : cd->linenumbers = new DumpList<Linenumber>();
192 105324 : }
193 :
194 :
195 : /* codegen_reset ***************************************************************
196 :
197 : Resets the codegen data structure so we can recompile the method.
198 :
199 : *******************************************************************************/
200 :
201 0 : static void codegen_reset(jitdata *jd)
202 : {
203 : codeinfo *code;
204 : codegendata *cd;
205 : basicblock *bptr;
206 :
207 : /* get required compiler data */
208 :
209 0 : code = jd->code;
210 0 : cd = jd->cd;
211 :
212 : /* reset error flag */
213 :
214 0 : cd->flags &= ~CODEGENDATA_FLAG_ERROR;
215 :
216 : /* reset some members, we reuse the code memory already allocated
217 : as this should have almost the correct size */
218 :
219 0 : cd->mcodeptr = cd->mcodebase;
220 0 : cd->lastmcodeptr = cd->mcodebase;
221 :
222 0 : cd->dseg = NULL;
223 0 : cd->dseglen = 0;
224 :
225 0 : cd->jumpreferences = NULL;
226 :
227 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
228 0 : cd->datareferences = NULL;
229 : #endif
230 :
231 0 : cd->brancheslabel = new DumpList<branch_label_ref_t*>();
232 0 : cd->linenumbers = new DumpList<Linenumber>();
233 :
234 : /* We need to clear the mpc and the branch references from all
235 : basic blocks as they will definitely change. */
236 :
237 0 : for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
238 0 : bptr->mpc = -1;
239 0 : bptr->branchrefs = NULL;
240 : }
241 :
242 : /* We need to clear all the patcher references from the codeinfo
243 : since they all will be regenerated */
244 :
245 0 : patcher_list_reset(code);
246 :
247 : #if defined(ENABLE_REPLACEMENT)
248 : code->rplpoints = NULL;
249 : code->rplpointcount = 0;
250 : code->regalloc = NULL;
251 : code->regalloccount = 0;
252 : code->globalcount = 0;
253 : #endif
254 0 : }
255 :
256 :
257 : /* codegen_generate ************************************************************
258 :
259 : Generates the code for the currently compiled method.
260 :
261 : *******************************************************************************/
262 :
263 86399 : bool codegen_generate(jitdata *jd)
264 : {
265 : codegendata *cd;
266 :
267 : /* get required compiler data */
268 :
269 86399 : cd = jd->cd;
270 :
271 : /* call the machine-dependent code generation function */
272 :
273 86399 : if (!codegen_emit(jd))
274 0 : return false;
275 :
276 : /* check for an error */
277 :
278 86399 : if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
279 : /* check for long-branches flag, if it is set we recompile the
280 : method */
281 :
282 : #if !defined(NDEBUG)
283 0 : if (compileverbose)
284 0 : log_message_method("Re-generating code: ", jd->m);
285 : #endif
286 :
287 : /* XXX maybe we should tag long-branches-methods for recompilation */
288 :
289 0 : if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
290 : /* we have to reset the codegendata structure first */
291 :
292 0 : codegen_reset(jd);
293 :
294 : /* and restart the compiler run */
295 :
296 0 : if (!codegen_emit(jd))
297 0 : return false;
298 : }
299 : else {
300 0 : os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
301 : }
302 :
303 : #if !defined(NDEBUG)
304 0 : if (compileverbose)
305 0 : log_message_method("Re-generating code done: ", jd->m);
306 : #endif
307 : }
308 :
309 : /* reallocate the memory and finish the code generation */
310 :
311 86399 : codegen_finish(jd);
312 :
313 : /* everything's ok */
314 :
315 86399 : return true;
316 : }
317 :
318 :
319 : /* codegen_close ***************************************************************
320 :
321 : TODO
322 :
323 : *******************************************************************************/
324 :
325 0 : void codegen_close(void)
326 : {
327 : /* TODO: release avl tree on i386 and x86_64 */
328 0 : }
329 :
330 :
331 : /* codegen_increase ************************************************************
332 :
333 : Doubles code area.
334 :
335 : *******************************************************************************/
336 :
337 11 : void codegen_increase(codegendata *cd)
338 : {
339 : u1 *oldmcodebase;
340 :
341 : /* save old mcodebase pointer */
342 :
343 11 : oldmcodebase = cd->mcodebase;
344 :
345 : /* reallocate to new, doubled memory */
346 :
347 : cd->mcodebase = (u1*) DumpMemory::reallocate(cd->mcodebase,
348 : cd->mcodesize,
349 11 : cd->mcodesize * 2);
350 11 : cd->mcodesize *= 2;
351 11 : cd->mcodeend = cd->mcodebase + cd->mcodesize;
352 :
353 : /* set new mcodeptr */
354 :
355 11 : cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
356 :
357 : #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP) \
358 : || defined(__SPARC_64__)
359 : /* adjust the pointer to the last patcher position */
360 :
361 11 : if (cd->lastmcodeptr != NULL)
362 11 : cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
363 : #endif
364 11 : }
365 :
366 :
367 : /* codegen_ncode_increase ******************************************************
368 :
369 : Doubles code area.
370 :
371 : *******************************************************************************/
372 :
373 : #if defined(ENABLE_INTRP)
374 : u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
375 : {
376 : u1 *oldncodebase;
377 :
378 : /* save old ncodebase pointer */
379 :
380 : oldncodebase = cd->ncodebase;
381 :
382 : /* reallocate to new, doubled memory */
383 :
384 : cd->ncodebase = DMREALLOC(cd->ncodebase,
385 : u1,
386 : cd->ncodesize,
387 : cd->ncodesize * 2);
388 : cd->ncodesize *= 2;
389 :
390 : /* return the new ncodeptr */
391 :
392 : return (cd->ncodebase + (ncodeptr - oldncodebase));
393 : }
394 : #endif
395 :
396 :
397 : /* codegen_add_branch_ref ******************************************************
398 :
399 : Prepends an branch to the list.
400 :
401 : *******************************************************************************/
402 :
403 128968 : void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
404 : {
405 : branchref *br;
406 : s4 branchmpc;
407 :
408 : STATISTICS(count_branches_unresolved++);
409 :
410 : /* calculate the mpc of the branch instruction */
411 :
412 128968 : branchmpc = cd->mcodeptr - cd->mcodebase;
413 :
414 128968 : br = (branchref*) DumpMemory::allocate(sizeof(branchref));
415 :
416 128968 : br->branchmpc = branchmpc;
417 128968 : br->condition = condition;
418 128968 : br->reg = reg;
419 128968 : br->options = options;
420 128968 : br->next = target->branchrefs;
421 :
422 128968 : target->branchrefs = br;
423 128968 : }
424 :
425 :
426 : /* codegen_resolve_branchrefs **************************************************
427 :
428 : Resolves and patches the branch references of a given basic block.
429 :
430 : *******************************************************************************/
431 :
432 319280 : void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
433 : {
434 : branchref *br;
435 : u1 *mcodeptr;
436 :
437 : /* Save the mcodeptr because in the branch emitting functions
438 : we generate code somewhere inside already generated code,
439 : but we're still in the actual code generation phase. */
440 :
441 319280 : mcodeptr = cd->mcodeptr;
442 :
443 : /* just to make sure */
444 :
445 319280 : assert(bptr->mpc >= 0);
446 :
447 448248 : for (br = bptr->branchrefs; br != NULL; br = br->next) {
448 : /* temporary set the mcodeptr */
449 :
450 128968 : cd->mcodeptr = cd->mcodebase + br->branchmpc;
451 :
452 : /* emit_bccz and emit_branch emit the correct code, even if we
453 : pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
454 :
455 128968 : emit_bccz(cd, bptr, br->condition, br->reg, br->options);
456 : }
457 :
458 : /* restore mcodeptr */
459 :
460 319280 : cd->mcodeptr = mcodeptr;
461 319280 : }
462 :
463 :
464 : /* codegen_branch_label_add ****************************************************
465 :
466 : Append an branch to the label-branch list.
467 :
468 : *******************************************************************************/
469 :
470 29858 : void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
471 : {
472 : // Calculate the current mpc.
473 29858 : int32_t mpc = cd->mcodeptr - cd->mcodebase;
474 :
475 29858 : branch_label_ref_t* br = (branch_label_ref_t*) DumpMemory::allocate(sizeof(branch_label_ref_t));
476 :
477 29858 : br->mpc = mpc;
478 29858 : br->label = label;
479 29858 : br->condition = condition;
480 29858 : br->reg = reg;
481 29858 : br->options = options;
482 :
483 : // Add the branch to the list.
484 29858 : cd->brancheslabel->push_back(br);
485 29858 : }
486 :
487 :
488 : /* codegen_set_replacement_point_notrap ****************************************
489 :
490 : Record the position of a non-trappable replacement point.
491 :
492 : *******************************************************************************/
493 :
494 : #if defined(ENABLE_REPLACEMENT)
495 : #if !defined(NDEBUG)
496 : void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
497 : #else
498 : void codegen_set_replacement_point_notrap(codegendata *cd)
499 : #endif
500 : {
501 : assert(cd->replacementpoint);
502 : assert(cd->replacementpoint->type == type);
503 : assert(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP);
504 :
505 : cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
506 :
507 : cd->replacementpoint++;
508 : }
509 : #endif /* defined(ENABLE_REPLACEMENT) */
510 :
511 :
512 : /* codegen_set_replacement_point ***********************************************
513 :
514 : Record the position of a trappable replacement point.
515 :
516 : *******************************************************************************/
517 :
518 : #if defined(ENABLE_REPLACEMENT)
519 : #if !defined(NDEBUG)
520 : void codegen_set_replacement_point(codegendata *cd, s4 type)
521 : #else
522 : void codegen_set_replacement_point(codegendata *cd)
523 : #endif
524 : {
525 : assert(cd->replacementpoint);
526 : assert(cd->replacementpoint->type == type);
527 : assert(!(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP));
528 :
529 : cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
530 :
531 : cd->replacementpoint++;
532 :
533 : #if !defined(NDEBUG)
534 : /* XXX actually we should use an own REPLACEMENT_NOPS here! */
535 : if (opt_TestReplacement)
536 : PATCHER_NOPS;
537 : #endif
538 :
539 : /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
540 :
541 : cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
542 : }
543 : #endif /* defined(ENABLE_REPLACEMENT) */
544 :
545 :
546 : /* codegen_finish **************************************************************
547 :
548 : Finishes the code generation. A new memory, large enough for both
549 : data and code, is allocated and data and code are copied together
550 : to their final layout, unresolved jumps are resolved, ...
551 :
552 : *******************************************************************************/
553 :
554 99456 : void codegen_finish(jitdata *jd)
555 : {
556 : s4 mcodelen;
557 : #if defined(ENABLE_INTRP)
558 : s4 ncodelen;
559 : #endif
560 : s4 alignedmcodelen;
561 : jumpref *jr;
562 : u1 *epoint;
563 : s4 alignedlen;
564 :
565 : /* Get required compiler data. */
566 :
567 99456 : codeinfo* code = jd->code;
568 99456 : codegendata* cd = jd->cd;
569 99456 : registerdata* rd = jd->rd;
570 :
571 : /* prevent compiler warning */
572 :
573 : #if defined(ENABLE_INTRP)
574 : ncodelen = 0;
575 : #endif
576 :
577 : /* calculate the code length */
578 :
579 99456 : mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
580 :
581 : STATISTICS(count_code_len += mcodelen);
582 : STATISTICS(count_data_len += cd->dseglen);
583 :
584 99456 : alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
585 :
586 : #if defined(ENABLE_INTRP)
587 : if (opt_intrp)
588 : ncodelen = cd->ncodeptr - cd->ncodebase;
589 : else {
590 : ncodelen = 0; /* avoid compiler warning */
591 : }
592 : #endif
593 :
594 99456 : cd->dseglen = MEMORY_ALIGN(cd->dseglen, MAX_ALIGN);
595 99456 : alignedlen = alignedmcodelen + cd->dseglen;
596 :
597 : #if defined(ENABLE_INTRP)
598 : if (opt_intrp) {
599 : alignedlen += ncodelen;
600 : }
601 : #endif
602 :
603 : /* allocate new memory */
604 :
605 99456 : code->mcodelength = mcodelen + cd->dseglen;
606 99456 : code->mcode = CNEW(u1, alignedlen);
607 :
608 : /* set the entrypoint of the method */
609 :
610 99456 : assert(code->entrypoint == NULL);
611 99456 : code->entrypoint = epoint = (code->mcode + cd->dseglen);
612 :
613 : /* fill the data segment (code->entrypoint must already be set!) */
614 :
615 99456 : dseg_finish(jd);
616 :
617 : /* copy code to the new location */
618 :
619 99456 : MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
620 :
621 : #if defined(ENABLE_INTRP)
622 : /* relocate native dynamic superinstruction code (if any) */
623 :
624 : if (opt_intrp) {
625 : cd->mcodebase = code->entrypoint;
626 :
627 : if (ncodelen > 0) {
628 : u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
629 :
630 : MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
631 :
632 : /* flush the instruction and data caches */
633 :
634 : md_cacheflush(ncodebase, ncodelen);
635 :
636 : /* set some cd variables for dynamic_super_rerwite */
637 :
638 : cd->ncodebase = ncodebase;
639 :
640 : } else {
641 : cd->ncodebase = NULL;
642 : }
643 :
644 : dynamic_super_rewrite(cd);
645 : }
646 : #endif
647 :
648 : /* Fill runtime information about generated code. */
649 :
650 99456 : code->stackframesize = cd->stackframesize;
651 99456 : code->synchronizedoffset = rd->memuse * 8;
652 99456 : code->savedintcount = INT_SAV_CNT - rd->savintreguse;
653 99456 : code->savedfltcount = FLT_SAV_CNT - rd->savfltreguse;
654 :
655 : /* Create the exception table. */
656 :
657 99456 : exceptiontable_create(jd);
658 :
659 : /* Create the linenumber table. */
660 :
661 99456 : code->linenumbertable = new LinenumberTable(jd);
662 :
663 : /* jump table resolving */
664 :
665 100530 : for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
666 : *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
667 1074 : (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
668 :
669 : /* patcher resolving */
670 :
671 99456 : patcher_resolve(jd);
672 :
673 : #if defined(ENABLE_REPLACEMENT)
674 : /* replacement point resolving */
675 : {
676 : int i;
677 : rplpoint *rp;
678 :
679 : rp = code->rplpoints;
680 : for (i=0; i<code->rplpointcount; ++i, ++rp) {
681 : rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
682 : }
683 : }
684 : #endif /* defined(ENABLE_REPLACEMENT) */
685 :
686 : /* Insert method into methodtree to find the entrypoint. */
687 :
688 99456 : methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
689 :
690 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
691 : /* resolve data segment references */
692 :
693 99456 : dseg_resolve_datareferences(jd);
694 : #endif
695 :
696 : /* flush the instruction and data caches */
697 :
698 99456 : md_cacheflush(code->mcode, code->mcodelength);
699 99456 : }
700 :
701 : namespace {
702 : /**
703 : * Outsource stack adjustment logic to reduce in-code `#if defined`s.
704 : *
705 : * @note should be moved to a backend code unit.
706 : */
707 : #if defined(__ALPHA__)
708 : struct FrameInfo {
709 : u1 *sp;
710 : int32_t framesize;
711 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
712 : uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
713 : uint8_t *get_javasp() const { return sp + framesize; }
714 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
715 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
716 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
717 : };
718 : #elif defined(__ARM__)
719 : struct FrameInfo {
720 : u1 *sp;
721 : int32_t framesize;
722 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
723 : uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
724 : uint8_t *get_javasp() const { return sp + framesize; }
725 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
726 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
727 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
728 : };
729 : #elif defined(__I386__)
730 : struct FrameInfo {
731 : u1 *sp;
732 : int32_t framesize;
733 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
734 : uint8_t *get_datasp() const { return sp + framesize; }
735 : uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
736 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
737 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
738 : uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 2 * SIZEOF_VOID_P); }
739 : };
740 : #elif defined(__MIPS__)
741 : struct FrameInfo {
742 : u1 *sp;
743 : int32_t framesize;
744 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
745 : /* MIPS always uses 8 bytes to store the RA */
746 : uint8_t *get_datasp() const { return sp + framesize - 8; }
747 : uint8_t *get_javasp() const { return sp + framesize; }
748 : uint64_t *get_arg_regs() const {
749 : # if SIZEOF_VOID_P == 8
750 : return (uint64_t *) sp;
751 : # else
752 : return (uint64_t *) (sp + 5 * 8);
753 : # endif
754 : }
755 : uint64_t *get_ret_regs() const {
756 : # if SIZEOF_VOID_P == 8
757 : return (uint64_t *) sp;
758 : # else
759 : return (uint64_t *) (sp + 1 * 8);
760 : # endif
761 : }
762 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
763 : };
764 : #elif defined(__S390__)
765 : struct FrameInfo {
766 : u1 *sp;
767 : int32_t framesize;
768 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
769 : uint8_t *get_datasp() const { return sp + framesize - 8; }
770 : uint8_t *get_javasp() const { return sp + framesize; }
771 : uint64_t *get_arg_regs() const { return (uint64_t *) (sp + 96); }
772 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
773 : uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 96); }
774 : };
775 : #elif defined(__POWERPC__)
776 : struct FrameInfo {
777 : u1 *sp;
778 : int32_t framesize;
779 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
780 : uint8_t *get_datasp() const { return sp + framesize; }
781 : uint8_t *get_javasp() const { return sp + framesize; }
782 : uint64_t *get_arg_regs() const {
783 : return (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
784 : }
785 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
786 : uint64_t *get_ret_regs() const {
787 : return (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
788 : }
789 : };
790 : #elif defined(__POWERPC64__)
791 : struct FrameInfo {
792 : u1 *sp;
793 : int32_t framesize;
794 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
795 : uint8_t *get_datasp() const { return sp + framesize; }
796 : uint8_t *get_javasp() const { return sp + framesize; }
797 : uint64_t *get_arg_regs() const {
798 : return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
799 : }
800 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
801 : uint64_t *get_ret_regs() const {
802 : return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
803 : }
804 : };
805 : #elif defined(__X86_64__)
806 : struct FrameInfo {
807 : u1 *sp;
808 : int32_t framesize;
809 6200305 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
810 6200197 : uint8_t *get_datasp() const { return sp + framesize; }
811 3100515 : uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
812 3100466 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
813 3100482 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
814 3100322 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
815 : };
816 : #else
817 : // dummy
818 : struct FrameInfo {
819 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {
820 : /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
821 : /* XXX maybe we need to pass the RA as argument there */
822 : os::abort("codegen_start_native_call: unsupported architecture");
823 : }
824 : uint8_t *get_datasp() const { return NULL; }
825 : uint8_t *get_javasp() const { return NULL; }
826 : uint64_t *get_arg_regs() const { return NULL; }
827 : uint64_t *get_arg_stack() const { return NULL; }
828 : uint64_t *get_ret_regs() const { return NULL; }
829 : };
830 : #endif
831 :
832 : } // end anonymous namespace
833 :
834 : /* codegen_start_native_call ***************************************************
835 :
836 : Prepares the stuff required for a native (JNI) function call:
837 :
838 : - adds a stackframe info structure to the chain, for stacktraces
839 : - prepares the local references table on the stack
840 :
841 : The layout of the native stub stackframe should look like this:
842 :
843 : +---------------------------+ <- java SP (of parent Java function)
844 : | return address |
845 : +---------------------------+ <- data SP
846 : | |
847 : | stackframe info structure |
848 : | |
849 : +---------------------------+
850 : | |
851 : | local references table |
852 : | |
853 : +---------------------------+
854 : | |
855 : | saved registers (if any) |
856 : | |
857 : +---------------------------+
858 : | |
859 : | arguments (if any) |
860 : | |
861 : +---------------------------+ <- current SP (native stub)
862 :
863 : *******************************************************************************/
864 :
865 3100520 : java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
866 : {
867 : stackframeinfo_t *sfi;
868 : localref_table *lrt;
869 : codeinfo *code;
870 : methodinfo *m;
871 : int32_t framesize;
872 :
873 : STATISTICS(count_calls_java_to_native++);
874 :
875 : // Get information from method header.
876 3100520 : code = code_get_codeinfo_for_pv(pv);
877 3100514 : assert(code != NULL);
878 :
879 3100514 : framesize = md_stacktrace_get_framesize(code);
880 3100509 : assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
881 :
882 : // Get the methodinfo.
883 3100509 : m = code_get_methodinfo_for_pv(pv);
884 3100546 : assert(m);
885 :
886 : /* calculate needed values */
887 :
888 3100546 : FrameInfo FI(sp,framesize);
889 :
890 3100511 : uint8_t *datasp = FI.get_datasp();
891 : //uint8_t *javasp = FI.get_javasp();
892 : #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
893 3100486 : uint64_t *arg_regs = FI.get_arg_regs();
894 3100480 : uint64_t *arg_stack = FI.get_arg_stack();
895 : #endif
896 :
897 : /* get data structures from stack */
898 :
899 3100555 : sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
900 : lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
901 3100555 : sizeof(localref_table));
902 :
903 : #if defined(ENABLE_JNI)
904 : /* add current JNI local references table to this thread */
905 :
906 3100555 : localref_table_add(lrt);
907 : #endif
908 :
909 : #if !defined(NDEBUG)
910 : # if defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
911 : /* print the call-trace if necesarry */
912 : /* BEFORE: filling the local reference table */
913 :
914 3100598 : if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
915 0 : trace_java_call_enter(m, arg_regs, arg_stack);
916 : # endif
917 : #endif
918 :
919 : #if defined(ENABLE_HANDLES)
920 : /* place all references into the local reference table */
921 : /* BEFORE: creating stackframeinfo */
922 :
923 : localref_native_enter(m, arg_regs, arg_stack);
924 : #endif
925 :
926 : /* Add a stackframeinfo for this native method. We don't have RA
927 : and XPC here. These are determined in
928 : stacktrace_stackframeinfo_add. */
929 :
930 3100599 : stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
931 :
932 : /* Return a wrapped classinfo for static methods. */
933 :
934 3100618 : if (m->flags & ACC_STATIC)
935 811017 : return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
936 : else
937 2289601 : return NULL;
938 : }
939 :
940 :
941 : /* codegen_finish_native_call **************************************************
942 :
943 : Removes the stuff required for a native (JNI) function call.
944 : Additionally it checks for an exceptions and in case, get the
945 : exception object and clear the pointer.
946 :
947 : *******************************************************************************/
948 :
949 3100273 : java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
950 : {
951 : stackframeinfo_t *sfi;
952 : java_handle_t *e;
953 : java_object_t *o;
954 : codeinfo *code;
955 : int32_t framesize;
956 :
957 :
958 : // Get information from method header.
959 3100273 : code = code_get_codeinfo_for_pv(pv);
960 3100251 : assert(code != NULL);
961 :
962 3100251 : framesize = md_stacktrace_get_framesize(code);
963 :
964 : // Get the methodinfo.
965 : #if defined(ENABLE_HANDLES) || !defined(NDEBUG)
966 3100353 : methodinfo *m = code->m;
967 3100353 : assert(m != NULL);
968 : #endif
969 :
970 : /* calculate needed values */
971 :
972 3100353 : FrameInfo FI(sp,framesize);
973 :
974 3100298 : uint8_t *datasp = FI.get_datasp();
975 : #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
976 3100337 : uint64_t *ret_regs = FI.get_ret_regs();
977 : #endif
978 :
979 : /* get data structures from stack */
980 :
981 3100344 : sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
982 :
983 : /* Remove current stackframeinfo from chain. */
984 :
985 3100344 : stacktrace_stackframeinfo_remove(sfi);
986 :
987 : #if defined(ENABLE_HANDLES)
988 : /* unwrap the return value from the local reference table */
989 : /* AFTER: removing the stackframeinfo */
990 : /* BEFORE: releasing the local reference table */
991 :
992 : localref_native_exit(m, ret_regs);
993 : #endif
994 :
995 : /* get and unwrap the exception */
996 : /* AFTER: removing the stackframe info */
997 : /* BEFORE: releasing the local reference table */
998 :
999 3100375 : e = exceptions_get_and_clear_exception();
1000 3100428 : o = LLNI_UNWRAP(e);
1001 :
1002 : #if defined(ENABLE_JNI)
1003 : /* release JNI local references table for this thread */
1004 :
1005 3100428 : localref_frame_pop_all();
1006 3100467 : localref_table_remove();
1007 : #endif
1008 :
1009 : #if !defined(NDEBUG)
1010 : # if defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
1011 : /* print the call-trace if necesarry */
1012 : /* AFTER: unwrapping the return value */
1013 :
1014 3100472 : if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
1015 0 : trace_java_call_exit(m, ret_regs);
1016 : # endif
1017 : #endif
1018 :
1019 3100475 : return o;
1020 : }
1021 :
1022 :
1023 : /* codegen_reg_of_var **********************************************************
1024 :
1025 : This function determines a register, to which the result of an
1026 : operation should go, when it is ultimatively intended to store the
1027 : result in pseudoregister v. If v is assigned to an actual
1028 : register, this register will be returned. Otherwise (when v is
1029 : spilled) this function returns tempregnum. If not already done,
1030 : regoff and flags are set in the stack location.
1031 :
1032 : *******************************************************************************/
1033 :
1034 1411671 : s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
1035 : {
1036 1411671 : if (!(v->flags & INMEMORY))
1037 1355594 : return v->vv.regoff;
1038 :
1039 56077 : return tempregnum;
1040 : }
1041 :
1042 :
1043 : /* codegen_reg_of_dst **********************************************************
1044 :
1045 : This function determines a register, to which the result of an
1046 : operation should go, when it is ultimatively intended to store the
1047 : result in iptr->dst.var. If dst.var is assigned to an actual
1048 : register, this register will be returned. Otherwise (when it is
1049 : spilled) this function returns tempregnum. If not already done,
1050 : regoff and flags are set in the stack location.
1051 :
1052 : *******************************************************************************/
1053 :
1054 1316291 : s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
1055 : {
1056 1316291 : return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
1057 : }
1058 :
1059 : /**
1060 : * Fix up register locations in the case where control is transferred to an
1061 : * exception handler block via normal control flow (no exception).
1062 : */
1063 9964 : static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
1064 : {
1065 : // Exception handlers have exactly 1 in-slot
1066 9964 : assert(bptr->indepth == 1);
1067 9964 : varinfo *var = VAR(bptr->invars[0]);
1068 9964 : int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1069 9964 : emit_load(jd, NULL, var, d);
1070 : // Copy the interface variable to ITMP1 (XPTR) because that's where
1071 : // the handler expects it.
1072 9964 : emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1073 9964 : }
1074 :
1075 : /**
1076 : * Generates machine code.
1077 : */
1078 86399 : bool codegen_emit(jitdata *jd)
1079 : {
1080 : varinfo* var;
1081 86399 : builtintable_entry* bte = 0;
1082 : methoddesc* md;
1083 : int32_t s1, s2, /*s3,*/ d;
1084 : #if !defined(__I386__)
1085 : int32_t fieldtype;
1086 : int32_t disp;
1087 : #endif
1088 : int i;
1089 :
1090 : // Get required compiler data.
1091 : //methodinfo* m = jd->m;
1092 86399 : codeinfo* code = jd->code;
1093 86399 : codegendata* cd = jd->cd;
1094 86399 : registerdata* rd = jd->rd;
1095 : #if defined(ENABLE_SSA)
1096 : lsradata* ls = jd->ls;
1097 : bool last_cmd_was_goto = false;
1098 : #endif
1099 :
1100 : // Space to save used callee saved registers.
1101 86399 : int32_t savedregs_num = 0;
1102 86399 : savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1103 86399 : savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1104 :
1105 : // Calculate size of stackframe.
1106 86399 : cd->stackframesize = rd->memuse + savedregs_num;
1107 :
1108 : // Space to save the return address.
1109 : #if STACKFRAME_RA_TOP_OF_FRAME
1110 : # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1111 : if (!code_is_leafmethod(code))
1112 : # endif
1113 : cd->stackframesize += 1;
1114 : #endif
1115 :
1116 : // Space to save argument of monitor_enter.
1117 86399 : if (checksync && code_is_synchronized(code))
1118 : #if STACKFRAME_SYNC_NEEDS_TWO_SLOTS
1119 : /* On some architectures the stack position for the argument can
1120 : not be shared with place to save the return register values to
1121 : survive monitor_exit since both values reside in the same register. */
1122 : cd->stackframesize += 2;
1123 : #else
1124 3217 : cd->stackframesize += 1;
1125 : #endif
1126 :
1127 : // Keep stack of non-leaf functions 16-byte aligned for calls into
1128 : // native code.
1129 86399 : if (!code_is_leafmethod(code) || JITDATA_HAS_FLAG_VERBOSECALL(jd))
1130 : #if STACKFRMAE_RA_BETWEEN_FRAMES
1131 76287 : ALIGN_ODD(cd->stackframesize);
1132 : #else
1133 : ALIGN_EVEN(cd->stackframesize);
1134 : #endif
1135 :
1136 : #if defined(SPECIALMEMUSE)
1137 : // On architectures having a linkage area, we can get rid of the whole
1138 : // stackframe in leaf functions without saved registers.
1139 : if (code_is_leafmethod(code) && (cd->stackframesize == LA_SIZE_IN_POINTERS))
1140 : cd->stackframesize = 0;
1141 : #endif
1142 :
1143 : /*
1144 : * SECTION 1: Method header generation.
1145 : */
1146 :
1147 : // The method header was reduced to the bare minimum of one pointer
1148 : // to the codeinfo structure, which in turn contains all runtime
1149 : // information. However this section together with the methodheader.h
1150 : // file will be kept alive for historical reasons. It might come in
1151 : // handy at some point.
1152 :
1153 86399 : (void) dseg_add_unique_address(cd, code); ///< CodeinfoPointer
1154 :
1155 : // XXX, REMOVEME: We still need it for exception handling in assembler.
1156 : // XXX ARM: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1157 : #if defined(__I386__)
1158 : int align_off = (cd->stackframesize != 0) ? 4 : 0;
1159 : (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize */
1160 : #else
1161 86399 : (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
1162 : #endif
1163 86399 : (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1164 86399 : (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1165 86399 : (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1166 :
1167 : /*
1168 : * SECTION 2: Method prolog generation.
1169 : */
1170 :
1171 : #if defined(ENABLE_PROFILING)
1172 : // Generate method profiling code.
1173 : if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1174 :
1175 : // Count method frequency.
1176 : emit_profile_method(cd, code);
1177 :
1178 : // Start CPU cycle counting.
1179 : emit_profile_cycle_start(cd, code);
1180 : }
1181 : #endif
1182 :
1183 : // Emit code for the method prolog.
1184 86399 : codegen_emit_prolog(jd);
1185 :
1186 : // Emit code to call monitorenter function.
1187 86399 : if (checksync && code_is_synchronized(code))
1188 3217 : emit_monitor_enter(jd, rd->memuse * 8);
1189 :
1190 : #if !defined(NDEBUG)
1191 : // Call trace function.
1192 86399 : if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1193 0 : emit_verbosecall_enter(jd);
1194 : #endif
1195 :
1196 : #if defined(ENABLE_SSA)
1197 : // With SSA the header is basicblock 0, insert phi moves if necessary.
1198 : if (ls != NULL)
1199 : codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1200 : #endif
1201 :
1202 : // Create replacement points.
1203 : REPLACEMENT_POINTS_INIT(cd, jd);
1204 :
1205 : /*
1206 : * SECTION 3: ICMD code generation.
1207 : */
1208 :
1209 : // Walk through all basic blocks.
1210 492080 : for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1211 :
1212 405681 : bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1213 :
1214 : // Is this basic block reached?
1215 405681 : if (bptr->state < basicblock::REACHED)
1216 86401 : continue;
1217 :
1218 : // Branch resolving.
1219 319280 : codegen_resolve_branchrefs(cd, bptr);
1220 :
1221 : // Handle replacement points.
1222 : REPLACEMENT_POINT_BLOCK_START(cd, bptr);
1223 :
1224 : #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1225 : // Generate countdown trap code.
1226 : methodinfo* m = jd->m;
1227 : if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1228 : if (cd->replacementpoint[-1].flags & rplpoint::FLAG_COUNTDOWN) {
1229 : MCODECHECK(32);
1230 : emit_trap_countdown(cd, &(m->hitcountdown));
1231 : }
1232 : }
1233 : #endif
1234 :
1235 : #if defined(ENABLE_PROFILING)
1236 : // Generate basicblock profiling code.
1237 : if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1238 :
1239 : // Count basicblock frequency.
1240 : emit_profile_basicblock(cd, code, bptr);
1241 :
1242 : // If this is an exception handler, start profiling again.
1243 : if (bptr->type == basicblock::TYPE_EXH)
1244 : emit_profile_cycle_start(cd, code);
1245 : }
1246 : #endif
1247 :
1248 : // Copy interface registers to their destination.
1249 319280 : int32_t indepth = bptr->indepth;
1250 : // XXX Check if this is true for all archs.
1251 319280 : MCODECHECK(64+indepth); // All
1252 319280 : MCODECHECK(128+indepth); // PPC64
1253 319280 : MCODECHECK(512); // I386, X86_64, S390
1254 : #if defined(ENABLE_SSA)
1255 : // XXX Check if this is correct and add a propper comment!
1256 : if (ls != NULL) {
1257 : last_cmd_was_goto = false;
1258 : } else {
1259 : #elif defined(ENABLE_LSRA)
1260 : if (opt_lsra) {
1261 : while (indepth > 0) {
1262 : indepth--;
1263 : var = VAR(bptr->invars[indepth]);
1264 : if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1265 : if (!IS_INMEMORY(src->flags))
1266 : d = var->vv.regoff;
1267 : else
1268 : d = REG_ITMP1_XPTR;
1269 : // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1270 : // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1271 : emit_imove(cd, REG_ITMP1_XPTR, d);
1272 : emit_store(jd, NULL, var, d);
1273 : }
1274 : }
1275 : } else {
1276 : #endif
1277 669872 : while (indepth > 0) {
1278 31312 : indepth--;
1279 31312 : var = VAR(bptr->invars[indepth]);
1280 41276 : if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1281 9964 : d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1282 : // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1283 : // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1284 9964 : emit_imove(cd, REG_ITMP1_XPTR, d);
1285 9964 : emit_store(jd, NULL, var, d);
1286 : }
1287 : else {
1288 21348 : assert((var->flags & INOUT));
1289 : }
1290 : }
1291 : #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1292 : }
1293 : #endif
1294 :
1295 : // Walk through all instructions.
1296 319280 : int32_t len = bptr->icount;
1297 319280 : uint16_t currentline = 0;
1298 4737866 : for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1299 :
1300 : // Add line number.
1301 4418586 : if (iptr->line != currentline) {
1302 828758 : linenumbertable_list_entry_add(cd, iptr->line);
1303 828758 : currentline = iptr->line;
1304 : }
1305 :
1306 : // An instruction usually needs < 64 words.
1307 : // XXX Check if this is true for all archs.
1308 4418586 : MCODECHECK(64); // All
1309 4418586 : MCODECHECK(128); // PPC64
1310 4418586 : MCODECHECK(1024); // I386, X86_64, S390 /* 1kB should be enough */
1311 :
1312 : // The big switch.
1313 4418586 : switch (iptr->opc) {
1314 :
1315 : case ICMD_NOP: /* ... ==> ... */
1316 : case ICMD_POP: /* ..., value ==> ... */
1317 : case ICMD_POP2: /* ..., value, value ==> ... */
1318 810254 : break;
1319 :
1320 : case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
1321 :
1322 0 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1323 0 : emit_nullpointer_check(cd, iptr, s1);
1324 0 : break;
1325 :
1326 : case ICMD_BREAKPOINT: /* ... ==> ... */
1327 : /* sx.val.anyptr = Breakpoint */
1328 :
1329 0 : patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1330 0 : PATCHER_NOPS;
1331 0 : break;
1332 :
1333 : #if defined(ENABLE_SSA)
1334 : case ICMD_GETEXCEPTION:
1335 :
1336 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1337 : emit_imove(cd, REG_ITMP1, d);
1338 : emit_store_dst(jd, iptr, d);
1339 : break;
1340 : #endif
1341 :
1342 : /* inline operations **********************************************/
1343 :
1344 : case ICMD_INLINE_START:
1345 :
1346 : REPLACEMENT_POINT_INLINE_START(cd, iptr);
1347 0 : break;
1348 :
1349 : case ICMD_INLINE_BODY:
1350 :
1351 : REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
1352 0 : linenumbertable_list_entry_add_inline_start(cd, iptr);
1353 0 : linenumbertable_list_entry_add(cd, iptr->line);
1354 0 : break;
1355 :
1356 : case ICMD_INLINE_END:
1357 :
1358 0 : linenumbertable_list_entry_add_inline_end(cd, iptr);
1359 0 : linenumbertable_list_entry_add(cd, iptr->line);
1360 0 : break;
1361 :
1362 :
1363 : /* constant operations ********************************************/
1364 :
1365 : case ICMD_ICONST: /* ... ==> ..., constant */
1366 :
1367 654581 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1368 654581 : ICONST(d, iptr->sx.val.i);
1369 654581 : emit_store_dst(jd, iptr, d);
1370 654581 : break;
1371 :
1372 : case ICMD_LCONST: /* ... ==> ..., constant */
1373 :
1374 5286 : d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1375 5286 : LCONST(d, iptr->sx.val.l);
1376 5286 : emit_store_dst(jd, iptr, d);
1377 5286 : break;
1378 :
1379 :
1380 : /* load/store/copy/move operations ********************************/
1381 :
1382 : case ICMD_COPY:
1383 : case ICMD_MOVE:
1384 : case ICMD_ILOAD: /* ... ==> ..., content of local variable */
1385 : case ICMD_LLOAD: /* s1 = local variable */
1386 : case ICMD_FLOAD:
1387 : case ICMD_DLOAD:
1388 : case ICMD_ALOAD:
1389 : case ICMD_ISTORE: /* ..., value ==> ... */
1390 : case ICMD_LSTORE:
1391 : case ICMD_FSTORE:
1392 : case ICMD_DSTORE:
1393 :
1394 1196889 : emit_copy(jd, iptr);
1395 1196889 : break;
1396 :
1397 : case ICMD_ASTORE:
1398 :
1399 59060 : if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1400 59043 : emit_copy(jd, iptr);
1401 59060 : break;
1402 :
1403 :
1404 : /* integer operations *********************************************/
1405 :
1406 : case ICMD_FCONST: /* ... ==> ..., constant */
1407 : case ICMD_DCONST: /* ... ==> ..., constant */
1408 : case ICMD_ACONST: /* ... ==> ..., constant */
1409 : case ICMD_INEG: /* ..., value ==> ..., - value */
1410 : case ICMD_LNEG: /* ..., value ==> ..., - value */
1411 : case ICMD_I2L: /* ..., value ==> ..., value */
1412 : case ICMD_L2I: /* ..., value ==> ..., value */
1413 : case ICMD_INT2BYTE: /* ..., value ==> ..., value */
1414 : case ICMD_INT2CHAR: /* ..., value ==> ..., value */
1415 : case ICMD_INT2SHORT: /* ..., value ==> ..., value */
1416 : case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1417 : case ICMD_IINC:
1418 : case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
1419 : /* sx.val.i = constant */
1420 : case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1421 : case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
1422 : /* sx.val.l = constant */
1423 : case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1424 : case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
1425 : /* sx.val.i = constant */
1426 : case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1427 : case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
1428 : /* sx.val.l = constant */
1429 : case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1430 : case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
1431 : /* sx.val.i = constant */
1432 : case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1433 : /* sx.val.i = constant */
1434 : case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1435 : case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
1436 : /* sx.val.l = constant */
1437 : case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1438 : /* sx.val.l = constant */
1439 : case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1440 : case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1441 : case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
1442 : /* sx.val.i = constant */
1443 : case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
1444 : /* sx.val.i = constant */
1445 : case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1446 : case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1447 : case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
1448 : /* sx.val.i = constant */
1449 : case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
1450 : /* sx.val.l = constant */
1451 : case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1452 : case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
1453 : /* sx.val.i = constant */
1454 : case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1455 : case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
1456 : /* sx.val.i = constant */
1457 : case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1458 : case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
1459 : /* sx.val.i = constant */
1460 : case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1461 : case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
1462 : /* sx.val.i = constant */
1463 : case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1464 : case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
1465 : /* sx.val.i = constant */
1466 : case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1467 : case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
1468 : /* sx.val.l = constant */
1469 : case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1470 : case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
1471 : /* sx.val.i = constant */
1472 : case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1473 : case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
1474 : /* sx.val.l = constant */
1475 : case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1476 : case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
1477 : /* sx.val.i = constant */
1478 : case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1479 : case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
1480 : /* sx.val.l = constant */
1481 : case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1482 : case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
1483 : /* sx.val.i = constant */
1484 : case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1485 : case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
1486 : /* sx.val.l = constant */
1487 :
1488 : // Generate architecture specific instructions.
1489 215359 : codegen_emit_instruction(jd, iptr);
1490 215359 : break;
1491 :
1492 :
1493 : /* floating operations ********************************************/
1494 :
1495 : #if !defined(ENABLE_SOFTFLOAT)
1496 : case ICMD_FNEG: /* ..., value ==> ..., - value */
1497 : case ICMD_DNEG:
1498 : case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1499 : case ICMD_DADD:
1500 : case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1501 : case ICMD_DSUB:
1502 : case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1503 : case ICMD_DMUL:
1504 : case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1505 : case ICMD_DDIV:
1506 : case ICMD_FREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1507 : case ICMD_DREM:
1508 : case ICMD_I2F: /* ..., value ==> ..., (float) value */
1509 : case ICMD_I2D: /* ..., value ==> ..., (double) value */
1510 : case ICMD_L2F: /* ..., value ==> ..., (float) value */
1511 : case ICMD_L2D: /* ..., value ==> ..., (double) value */
1512 : case ICMD_F2I: /* ..., value ==> ..., (int) value */
1513 : case ICMD_D2I:
1514 : case ICMD_F2L: /* ..., value ==> ..., (long) value */
1515 : case ICMD_D2L:
1516 : case ICMD_F2D: /* ..., value ==> ..., (double) value */
1517 : case ICMD_D2F: /* ..., value ==> ..., (float) value */
1518 : case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1519 : case ICMD_DCMPL: /* == => 0, < => 1, > => -1 */
1520 : case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1521 : case ICMD_DCMPG: /* == => 0, < => 1, > => -1 */
1522 :
1523 : // Generate architecture specific instructions.
1524 5303 : codegen_emit_instruction(jd, iptr);
1525 5303 : break;
1526 : #endif /* !defined(ENABLE_SOFTFLOAT) */
1527 :
1528 :
1529 : /* memory operations **********************************************/
1530 :
1531 : case ICMD_ARRAYLENGTH:/* ..., arrayref ==> ..., length */
1532 :
1533 9670 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1534 9670 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1535 : /* implicit null-pointer check */
1536 : // XXX PPC64: Here we had an explicit null-pointer check
1537 : // which I think was obsolete, please confirm. Otherwise:
1538 : // emit_nullpointer_check(cd, iptr, s1);
1539 9670 : M_ILD(d, s1, OFFSET(java_array_t, size));
1540 9670 : emit_store_dst(jd, iptr, d);
1541 9670 : break;
1542 :
1543 : case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1544 : case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1545 : case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1546 : case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1547 : case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1548 : case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1549 : case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1550 : case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1551 : case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1552 : case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1553 : case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1554 : case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1555 : case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1556 : case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1557 : case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1558 : case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1559 : case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
1560 : case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
1561 : case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
1562 : case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
1563 : case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
1564 : case ICMD_FASTORECONST: /* ..., arrayref, index ==> ... */
1565 : case ICMD_DASTORECONST: /* ..., arrayref, index ==> ... */
1566 : case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
1567 : case ICMD_GETFIELD: /* ... ==> ..., value */
1568 : case ICMD_PUTFIELD: /* ..., value ==> ... */
1569 : case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
1570 : /* val = value (in current instruction) */
1571 : case ICMD_PUTSTATICCONST: /* ... ==> ... */
1572 : /* val = value (in current instruction) */
1573 :
1574 : // Generate architecture specific instructions.
1575 759348 : codegen_emit_instruction(jd, iptr);
1576 759348 : break;
1577 :
1578 : case ICMD_GETSTATIC: /* ... ==> ..., value */
1579 :
1580 : #if defined(__I386__)
1581 : // Generate architecture specific instructions.
1582 : codegen_emit_instruction(jd, iptr);
1583 : break;
1584 : #else
1585 : {
1586 : fieldinfo* fi;
1587 : //patchref_t* pr;
1588 48504 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1589 18189 : unresolved_field* uf = iptr->sx.s23.s3.uf;
1590 18189 : fieldtype = uf->fieldref->parseddesc.fd->type;
1591 18189 : disp = dseg_add_unique_address(cd, 0);
1592 :
1593 : //pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1594 18189 : patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1595 :
1596 18189 : fi = NULL; /* Silence compiler warning */
1597 : }
1598 : else {
1599 30315 : fi = iptr->sx.s23.s3.fmiref->p.field;
1600 30315 : fieldtype = fi->type;
1601 30315 : disp = dseg_add_address(cd, fi->value);
1602 :
1603 30315 : if (!class_is_or_almost_initialized(fi->clazz)) {
1604 : PROFILE_CYCLE_STOP;
1605 745 : patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1606 : PROFILE_CYCLE_START;
1607 : }
1608 :
1609 : //pr = NULL; /* Silence compiler warning */
1610 : }
1611 :
1612 : // XXX X86_64: Here We had this:
1613 : /* This approach is much faster than moving the field
1614 : address inline into a register. */
1615 :
1616 48504 : M_ALD_DSEG(REG_ITMP1, disp);
1617 :
1618 48504 : switch (fieldtype) {
1619 : case TYPE_ADR:
1620 40208 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1621 40208 : M_ALD(d, REG_ITMP1, 0);
1622 40208 : break;
1623 : case TYPE_INT:
1624 : #if defined(ENABLE_SOFTFLOAT)
1625 : case TYPE_FLT:
1626 : #endif
1627 6077 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1628 6077 : M_ILD(d, REG_ITMP1, 0);
1629 6077 : break;
1630 : case TYPE_LNG:
1631 : #if defined(ENABLE_SOFTFLOAT)
1632 : case TYPE_DBL:
1633 : #endif
1634 191 : d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1635 191 : M_LLD(d, REG_ITMP1, 0);
1636 191 : break;
1637 : #if !defined(ENABLE_SOFTFLOAT)
1638 : case TYPE_FLT:
1639 2017 : d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1640 2017 : M_FLD(d, REG_ITMP1, 0);
1641 2017 : break;
1642 : case TYPE_DBL:
1643 11 : d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1644 11 : M_DLD(d, REG_ITMP1, 0);
1645 11 : break;
1646 : #endif
1647 : default:
1648 : // Silence compiler warning.
1649 0 : d = 0;
1650 : }
1651 48504 : emit_store_dst(jd, iptr, d);
1652 48504 : break;
1653 : }
1654 : #endif
1655 :
1656 : case ICMD_PUTSTATIC: /* ..., value ==> ... */
1657 :
1658 : #if defined(__I386__)
1659 : // Generate architecture specific instructions.
1660 : codegen_emit_instruction(jd, iptr);
1661 : break;
1662 : #else
1663 : {
1664 : fieldinfo* fi;
1665 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1666 21810 : patchref_t* pr = NULL;
1667 : #endif
1668 :
1669 21810 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1670 1332 : unresolved_field* uf = iptr->sx.s23.s3.uf;
1671 1332 : fieldtype = uf->fieldref->parseddesc.fd->type;
1672 1332 : disp = dseg_add_unique_address(cd, 0);
1673 :
1674 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1675 : pr =
1676 : #endif
1677 1332 : patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1678 :
1679 1332 : fi = NULL; /* Silence compiler warning */
1680 : }
1681 : else {
1682 20478 : fi = iptr->sx.s23.s3.fmiref->p.field;
1683 20478 : fieldtype = fi->type;
1684 20478 : disp = dseg_add_address(cd, fi->value);
1685 :
1686 20478 : if (!class_is_or_almost_initialized(fi->clazz)) {
1687 : PROFILE_CYCLE_STOP;
1688 0 : patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1689 : PROFILE_CYCLE_START;
1690 : }
1691 : }
1692 :
1693 : // XXX X86_64: Here We had this:
1694 : /* This approach is much faster than moving the field
1695 : address inline into a register. */
1696 :
1697 21810 : M_ALD_DSEG(REG_ITMP1, disp);
1698 :
1699 21810 : switch (fieldtype) {
1700 : case TYPE_ADR:
1701 19085 : s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1702 19085 : M_AST(s1, REG_ITMP1, 0);
1703 19085 : break;
1704 : case TYPE_INT:
1705 : #if defined(ENABLE_SOFTFLOAT)
1706 : case TYPE_FLT:
1707 : #endif
1708 1534 : s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1709 1534 : M_IST(s1, REG_ITMP1, 0);
1710 1534 : break;
1711 : case TYPE_LNG:
1712 : #if defined(ENABLE_SOFTFLOAT)
1713 : case TYPE_DBL:
1714 : #endif
1715 173 : s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1716 173 : M_LST(s1, REG_ITMP1, 0);
1717 173 : break;
1718 : #if !defined(ENABLE_SOFTFLOAT)
1719 : case TYPE_FLT:
1720 1011 : s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1721 1011 : M_FST(s1, REG_ITMP1, 0);
1722 1011 : break;
1723 : case TYPE_DBL:
1724 7 : s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1725 7 : M_DST(s1, REG_ITMP1, 0);
1726 : break;
1727 : #endif
1728 : }
1729 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1730 21810 : codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1731 : #endif
1732 21810 : break;
1733 : }
1734 : #endif
1735 :
1736 : /* branch operations **********************************************/
1737 :
1738 : case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1739 :
1740 : // We might leave this method, stop profiling.
1741 : PROFILE_CYCLE_STOP;
1742 :
1743 18423 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1744 : // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1745 18423 : emit_imove(cd, s1, REG_ITMP1_XPTR);
1746 :
1747 : #ifdef ENABLE_VERIFIER
1748 18423 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1749 15042 : unresolved_class *uc = iptr->sx.s23.s2.uc;
1750 15042 : patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1751 : }
1752 : #endif /* ENABLE_VERIFIER */
1753 :
1754 : // Generate architecture specific instructions.
1755 18423 : codegen_emit_instruction(jd, iptr);
1756 18423 : ALIGNCODENOP;
1757 18423 : break;
1758 :
1759 : case ICMD_GOTO: /* ... ==> ... */
1760 : case ICMD_RET: /* ... ==> ... */
1761 :
1762 : #if defined(ENABLE_SSA)
1763 : // In case of a goto, phimoves have to be inserted
1764 : // before the jump.
1765 : if (ls != NULL) {
1766 : last_cmd_was_goto = true;
1767 : codegen_emit_phi_moves(jd, bptr);
1768 : }
1769 : #endif
1770 34753 : if (iptr->dst.block->type == basicblock::TYPE_EXH)
1771 0 : fixup_exc_handler_interface(jd, iptr->dst.block);
1772 34753 : emit_br(cd, iptr->dst.block);
1773 34753 : ALIGNCODENOP;
1774 34753 : break;
1775 :
1776 : case ICMD_JSR: /* ... ==> ... */
1777 :
1778 17 : assert(iptr->sx.s23.s3.jsrtarget.block->type != basicblock::TYPE_EXH);
1779 17 : emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1780 17 : ALIGNCODENOP;
1781 17 : break;
1782 :
1783 : case ICMD_IFNULL: /* ..., value ==> ... */
1784 : case ICMD_IFNONNULL:
1785 :
1786 28803 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1787 28803 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1788 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1789 : emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1790 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1791 28803 : M_TEST(s1);
1792 28803 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1793 : #else
1794 : # error Unable to generate code for this configuration!
1795 : #endif
1796 28803 : break;
1797 :
1798 : case ICMD_IFEQ: /* ..., value ==> ... */
1799 : case ICMD_IFNE:
1800 : case ICMD_IFLT:
1801 : case ICMD_IFLE:
1802 : case ICMD_IFGT:
1803 : case ICMD_IFGE:
1804 :
1805 : // XXX Sparc64: int compares must not branch on the
1806 : // register directly. Reason is, that register content is
1807 : // not 32-bit clean. Fix this!
1808 :
1809 57418 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1810 :
1811 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1812 : if (iptr->sx.val.i == 0) {
1813 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1814 : emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1815 : } else {
1816 : // Generate architecture specific instructions.
1817 : codegen_emit_instruction(jd, iptr);
1818 : }
1819 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1820 57418 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1821 57418 : emit_icmp_imm(cd, s1, iptr->sx.val.i);
1822 57418 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1823 : #else
1824 : # error Unable to generate code for this configuration!
1825 : #endif
1826 57418 : break;
1827 :
1828 : case ICMD_IF_LEQ: /* ..., value ==> ... */
1829 : case ICMD_IF_LNE:
1830 : case ICMD_IF_LLT:
1831 : case ICMD_IF_LGE:
1832 : case ICMD_IF_LGT:
1833 : case ICMD_IF_LLE:
1834 :
1835 295 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1836 :
1837 : // Generate architecture specific instructions.
1838 295 : codegen_emit_instruction(jd, iptr);
1839 295 : break;
1840 :
1841 : case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
1842 : case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
1843 :
1844 2575 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1845 :
1846 2575 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1847 2575 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1848 : #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1849 : switch (iptr->opc) {
1850 : case ICMD_IF_ACMPEQ:
1851 : emit_beq(cd, iptr->dst.block, s1, s2);
1852 : break;
1853 : case ICMD_IF_ACMPNE:
1854 : emit_bne(cd, iptr->dst.block, s1, s2);
1855 : break;
1856 : default:
1857 : break;
1858 : }
1859 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1860 2575 : M_ACMP(s1, s2);
1861 2575 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1862 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1863 : M_CMPEQ(s1, s2, REG_ITMP1);
1864 : switch (iptr->opc) {
1865 : case ICMD_IF_ACMPEQ:
1866 : emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1867 : break;
1868 : case ICMD_IF_ACMPNE:
1869 : emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1870 : break;
1871 : default:
1872 : break;
1873 : }
1874 : #else
1875 : # error Unable to generate code for this configuration!
1876 : #endif
1877 2575 : break;
1878 :
1879 : case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
1880 : case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
1881 :
1882 4632 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1883 :
1884 : #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1885 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1886 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1887 : switch (iptr->opc) {
1888 : case ICMD_IF_ICMPEQ:
1889 : emit_beq(cd, iptr->dst.block, s1, s2);
1890 : break;
1891 : case ICMD_IF_ICMPNE:
1892 : emit_bne(cd, iptr->dst.block, s1, s2);
1893 : break;
1894 : }
1895 : break;
1896 : #else
1897 : /* fall-through */
1898 : #endif
1899 :
1900 : case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
1901 : case ICMD_IF_ICMPGT: /* op1 = target JavaVM pc */
1902 : case ICMD_IF_ICMPLE:
1903 : case ICMD_IF_ICMPGE:
1904 :
1905 16840 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1906 :
1907 16840 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1908 16840 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1909 : #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1910 : # if defined(__I386__) || defined(__X86_64__)
1911 : // XXX Fix this soon!!!
1912 16840 : M_ICMP(s2, s1);
1913 : # else
1914 : M_ICMP(s1, s2);
1915 : # endif
1916 16840 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1917 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1918 : // Generate architecture specific instructions.
1919 : codegen_emit_instruction(jd, iptr);
1920 : #else
1921 : # error Unable to generate code for this configuration!
1922 : #endif
1923 16840 : break;
1924 :
1925 : case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1926 : case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1927 : case ICMD_IF_LCMPLT:
1928 : case ICMD_IF_LCMPGT:
1929 : case ICMD_IF_LCMPLE:
1930 : case ICMD_IF_LCMPGE:
1931 :
1932 124 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1933 :
1934 : // Generate architecture specific instructions.
1935 124 : codegen_emit_instruction(jd, iptr);
1936 124 : break;
1937 :
1938 : case ICMD_RETURN: /* ... ==> ... */
1939 :
1940 : REPLACEMENT_POINT_RETURN(cd, iptr);
1941 44342 : goto nowperformreturn;
1942 :
1943 : case ICMD_ARETURN: /* ..., retvalue ==> ... */
1944 :
1945 : REPLACEMENT_POINT_RETURN(cd, iptr);
1946 38748 : s1 = emit_load_s1(jd, iptr, REG_RESULT);
1947 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1948 38748 : emit_imove(cd, s1, REG_RESULT);
1949 :
1950 : #ifdef ENABLE_VERIFIER
1951 38748 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1952 : PROFILE_CYCLE_STOP;
1953 3305 : unresolved_class *uc = iptr->sx.s23.s2.uc;
1954 3305 : patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1955 : PROFILE_CYCLE_START;
1956 : }
1957 : #endif /* ENABLE_VERIFIER */
1958 38748 : goto nowperformreturn;
1959 :
1960 : case ICMD_IRETURN: /* ..., retvalue ==> ... */
1961 : #if defined(ENABLE_SOFTFLOAT)
1962 : case ICMD_FRETURN:
1963 : #endif
1964 :
1965 : REPLACEMENT_POINT_RETURN(cd, iptr);
1966 19768 : s1 = emit_load_s1(jd, iptr, REG_RESULT);
1967 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1968 19768 : emit_imove(cd, s1, REG_RESULT);
1969 19768 : goto nowperformreturn;
1970 :
1971 : case ICMD_LRETURN: /* ..., retvalue ==> ... */
1972 : #if defined(ENABLE_SOFTFLOAT)
1973 : case ICMD_DRETURN:
1974 : #endif
1975 :
1976 : REPLACEMENT_POINT_RETURN(cd, iptr);
1977 246 : s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1978 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1979 246 : emit_lmove(cd, s1, REG_LRESULT);
1980 246 : goto nowperformreturn;
1981 :
1982 : #if !defined(ENABLE_SOFTFLOAT)
1983 : case ICMD_FRETURN: /* ..., retvalue ==> ... */
1984 :
1985 : REPLACEMENT_POINT_RETURN(cd, iptr);
1986 154 : s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1987 : #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1988 : M_CAST_F2I(s1, REG_RESULT);
1989 : #else
1990 154 : emit_fmove(cd, s1, REG_FRESULT);
1991 : #endif
1992 154 : goto nowperformreturn;
1993 :
1994 : case ICMD_DRETURN: /* ..., retvalue ==> ... */
1995 :
1996 : REPLACEMENT_POINT_RETURN(cd, iptr);
1997 9 : s1 = emit_load_s1(jd, iptr, REG_FRESULT);
1998 : #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
1999 : M_CAST_D2L(s1, REG_LRESULT);
2000 : #else
2001 9 : emit_dmove(cd, s1, REG_FRESULT);
2002 : #endif
2003 : goto nowperformreturn;
2004 : #endif
2005 :
2006 : nowperformreturn:
2007 : #if !defined(NDEBUG)
2008 : // Call trace function.
2009 103267 : if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
2010 0 : emit_verbosecall_exit(jd);
2011 : #endif
2012 :
2013 : // Emit code to call monitorexit function.
2014 103267 : if (checksync && code_is_synchronized(code)) {
2015 4574 : emit_monitor_exit(jd, rd->memuse * 8);
2016 : }
2017 :
2018 : // Generate method profiling code.
2019 : PROFILE_CYCLE_STOP;
2020 :
2021 : // Emit code for the method epilog.
2022 103267 : codegen_emit_epilog(jd);
2023 103267 : ALIGNCODENOP;
2024 103267 : break;
2025 :
2026 : case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
2027 :
2028 : REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr);
2029 :
2030 77258 : bte = iptr->sx.s23.s3.bte;
2031 77258 : md = bte->md;
2032 :
2033 : #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
2034 : if (bte->fp == BUILTIN_escape_reason_new) {
2035 : void set_escape_reasons(void *);
2036 : M_ASUB_IMM(8, REG_SP);
2037 : M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
2038 : M_AST(EDX, REG_SP, 4);
2039 : M_AST(REG_ITMP1, REG_SP, 0);
2040 : M_MOV_IMM(set_escape_reasons, REG_ITMP1);
2041 : M_CALL(REG_ITMP1);
2042 : M_ALD(EDX, REG_SP, 4);
2043 : M_AADD_IMM(8, REG_SP);
2044 : }
2045 : #endif
2046 :
2047 : // Emit the fast-path if available.
2048 77258 : if (bte->emit_fastpath != NULL) {
2049 : void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
2050 8011 : emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
2051 :
2052 8011 : assert(md->returntype.type == TYPE_VOID);
2053 8011 : d = REG_ITMP1;
2054 :
2055 : // Actually call the fast-path emitter.
2056 8011 : emit_fastpath(jd, iptr, d);
2057 :
2058 : // If fast-path succeeded, jump to the end of the builtin
2059 : // invocation.
2060 : // XXX Actually the slow-path block below should be moved
2061 : // out of the instruction stream and the jump below should be
2062 : // inverted.
2063 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2064 : os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2065 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2066 8011 : M_TEST(d);
2067 8011 : emit_label_bne(cd, BRANCH_LABEL_10);
2068 : #else
2069 : # error Unable to generate code for this configuration!
2070 : #endif
2071 : }
2072 :
2073 77258 : goto gen_method;
2074 :
2075 : case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
2076 : case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2077 : case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
2078 : case ICMD_INVOKEINTERFACE:
2079 :
2080 : REPLACEMENT_POINT_INVOKE(cd, iptr);
2081 :
2082 282144 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2083 64232 : unresolved_method* um = iptr->sx.s23.s3.um;
2084 64232 : md = um->methodref->parseddesc.md;
2085 : }
2086 : else {
2087 217912 : methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2088 217912 : md = lm->parseddesc;
2089 : }
2090 :
2091 : gen_method:
2092 359402 : i = md->paramcount;
2093 :
2094 : // XXX Check this again!
2095 359402 : MCODECHECK((i << 1) + 64); // PPC
2096 :
2097 : // Copy arguments to registers or stack location.
2098 937960 : for (i = i - 1; i >= 0; i--) {
2099 578558 : var = VAR(iptr->sx.s23.s2.args[i]);
2100 578558 : d = md->params[i].regoff;
2101 :
2102 : // Already pre-allocated?
2103 578558 : if (var->flags & PREALLOC)
2104 310574 : continue;
2105 :
2106 267984 : if (!md->params[i].inmemory) {
2107 266196 : switch (var->type) {
2108 : case TYPE_ADR:
2109 : case TYPE_INT:
2110 : #if defined(ENABLE_SOFTFLOAT)
2111 : case TYPE_FLT:
2112 : #endif
2113 264586 : s1 = emit_load(jd, iptr, var, d);
2114 264586 : emit_imove(cd, s1, d);
2115 264586 : break;
2116 :
2117 : case TYPE_LNG:
2118 : #if defined(ENABLE_SOFTFLOAT)
2119 : case TYPE_DBL:
2120 : #endif
2121 305 : s1 = emit_load(jd, iptr, var, d);
2122 305 : emit_lmove(cd, s1, d);
2123 305 : break;
2124 :
2125 : #if !defined(ENABLE_SOFTFLOAT)
2126 : case TYPE_FLT:
2127 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2128 1070 : s1 = emit_load(jd, iptr, var, d);
2129 1070 : emit_fmove(cd, s1, d);
2130 : #else
2131 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2132 : M_CAST_F2I(s1, d);
2133 : #endif
2134 1070 : break;
2135 :
2136 : case TYPE_DBL:
2137 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2138 235 : s1 = emit_load(jd, iptr, var, d);
2139 235 : emit_dmove(cd, s1, d);
2140 : #else
2141 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2142 : M_CAST_D2L(s1, d);
2143 : #endif
2144 235 : break;
2145 : #endif
2146 : default:
2147 0 : assert(false);
2148 : break;
2149 : }
2150 : }
2151 : else {
2152 1788 : switch (var->type) {
2153 : case TYPE_ADR:
2154 1400 : s1 = emit_load(jd, iptr, var, REG_ITMP1);
2155 : // XXX Sparc64: Here this actually was:
2156 : // M_STX(s1, REG_SP, JITSTACK + d);
2157 1400 : M_AST(s1, REG_SP, d);
2158 1400 : break;
2159 :
2160 : case TYPE_INT:
2161 : #if defined(ENABLE_SOFTFLOAT)
2162 : case TYPE_FLT:
2163 : #endif
2164 : #if SIZEOF_VOID_P == 4
2165 : s1 = emit_load(jd, iptr, var, REG_ITMP1);
2166 : M_IST(s1, REG_SP, d);
2167 : break;
2168 : #else
2169 : /* fall-through */
2170 : #endif
2171 :
2172 : case TYPE_LNG:
2173 : #if defined(ENABLE_SOFTFLOAT)
2174 : case TYPE_DBL:
2175 : #endif
2176 343 : s1 = emit_load(jd, iptr, var, REG_LTMP12);
2177 : // XXX Sparc64: Here this actually was:
2178 : // M_STX(s1, REG_SP, JITSTACK + d);
2179 343 : M_LST(s1, REG_SP, d);
2180 343 : break;
2181 :
2182 : #if !defined(ENABLE_SOFTFLOAT)
2183 : case TYPE_FLT:
2184 23 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2185 23 : M_FST(s1, REG_SP, d);
2186 23 : break;
2187 :
2188 : case TYPE_DBL:
2189 22 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2190 : // XXX Sparc64: Here this actually was:
2191 : // M_DST(s1, REG_SP, JITSTACK + d);
2192 22 : M_DST(s1, REG_SP, d);
2193 22 : break;
2194 : #endif
2195 : default:
2196 0 : assert(false);
2197 : break;
2198 : }
2199 : }
2200 : }
2201 :
2202 : // Generate method profiling code.
2203 : PROFILE_CYCLE_STOP;
2204 :
2205 : // Generate architecture specific instructions.
2206 359402 : codegen_emit_instruction(jd, iptr);
2207 :
2208 : // Generate method profiling code.
2209 : PROFILE_CYCLE_START;
2210 :
2211 : // Store size of call code in replacement point.
2212 : REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
2213 : REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr);
2214 :
2215 : // Recompute the procedure vector (PV).
2216 359402 : emit_recompute_pv(cd);
2217 :
2218 : // Store return value.
2219 : #if defined(ENABLE_SSA)
2220 : if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2221 : (ls->lifetime[iptr->dst.varindex].type != jitdata::UNUSED))
2222 : /* a "living" stackslot */
2223 : #endif
2224 359402 : switch (md->returntype.type) {
2225 : case TYPE_INT:
2226 : case TYPE_ADR:
2227 : #if defined(ENABLE_SOFTFLOAT)
2228 : case TYPE_FLT:
2229 : #endif
2230 239883 : s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2231 : // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2232 239883 : emit_imove(cd, REG_RESULT, s1);
2233 239883 : emit_store_dst(jd, iptr, s1);
2234 239883 : break;
2235 :
2236 : case TYPE_LNG:
2237 : #if defined(ENABLE_SOFTFLOAT)
2238 : case TYPE_DBL:
2239 : #endif
2240 476 : s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2241 : // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2242 476 : emit_lmove(cd, REG_LRESULT, s1);
2243 476 : emit_store_dst(jd, iptr, s1);
2244 476 : break;
2245 :
2246 : #if !defined(ENABLE_SOFTFLOAT)
2247 : case TYPE_FLT:
2248 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2249 1198 : s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2250 1198 : emit_fmove(cd, REG_FRESULT, s1);
2251 : #else
2252 : s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2253 : M_CAST_I2F(REG_RESULT, s1);
2254 : #endif
2255 1198 : emit_store_dst(jd, iptr, s1);
2256 1198 : break;
2257 :
2258 : case TYPE_DBL:
2259 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2260 51 : s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2261 51 : emit_dmove(cd, REG_FRESULT, s1);
2262 : #else
2263 : s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2264 : M_CAST_L2D(REG_LRESULT, s1);
2265 : #endif
2266 51 : emit_store_dst(jd, iptr, s1);
2267 51 : break;
2268 : #endif
2269 :
2270 : case TYPE_VOID:
2271 117794 : break;
2272 : default:
2273 0 : assert(false);
2274 : break;
2275 : }
2276 :
2277 : // If we are emitting a fast-path block, this is the label for
2278 : // successful fast-path execution.
2279 359402 : if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2280 8011 : emit_label(cd, BRANCH_LABEL_10);
2281 : }
2282 :
2283 359402 : break;
2284 :
2285 : case ICMD_TABLESWITCH: /* ..., index ==> ... */
2286 :
2287 : // Generate architecture specific instructions.
2288 72 : codegen_emit_instruction(jd, iptr);
2289 72 : break;
2290 :
2291 : case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
2292 :
2293 60 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2294 60 : i = iptr->sx.s23.s2.lookupcount;
2295 :
2296 : // XXX Again we need to check this
2297 60 : MCODECHECK((i<<2)+8); // Alpha, ARM, i386, MIPS, Sparc64
2298 60 : MCODECHECK((i<<3)+8); // PPC64
2299 60 : MCODECHECK(8 + ((7 + 6) * i) + 5); // X86_64, S390
2300 :
2301 : // Compare keys.
2302 540 : for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2303 : #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2304 480 : emit_icmp_imm(cd, s1, lookup->value);
2305 480 : emit_beq(cd, lookup->target.block);
2306 : #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2307 : ICONST(REG_ITMP2, lookup->value);
2308 : emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2309 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2310 : emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2311 : emit_bnez(cd, lookup->target.block, REG_ITMP2);
2312 : #else
2313 : # error Unable to generate code for this configuration!
2314 : #endif
2315 : }
2316 :
2317 : // Default branch.
2318 60 : emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2319 60 : ALIGNCODENOP;
2320 60 : break;
2321 :
2322 : case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
2323 : case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
2324 : case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
2325 :
2326 : // Generate architecture specific instructions.
2327 10473 : codegen_emit_instruction(jd, iptr);
2328 10473 : break;
2329 :
2330 : default:
2331 : exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2332 0 : iptr->opc);
2333 0 : return false;
2334 :
2335 : } // the big switch
2336 :
2337 : } // for all instructions
2338 :
2339 : #if defined(ENABLE_SSA)
2340 : // By edge splitting, in blocks with phi moves there can only
2341 : // be a goto as last command, no other jump/branch command.
2342 : if (ls != NULL) {
2343 : if (!last_cmd_was_goto)
2344 : codegen_emit_phi_moves(jd, bptr);
2345 : }
2346 : #endif
2347 :
2348 : #if defined(__I386__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2349 : // XXX Again!!!
2350 : /* XXX require a lower number? */
2351 319280 : MCODECHECK(64); // I386, MIPS, Sparc64
2352 319280 : MCODECHECK(512); // S390, X86_64
2353 :
2354 : /* XXX We can remove that when we don't use UD2 anymore on i386
2355 : and x86_64. */
2356 :
2357 : /* At the end of a basic block we may have to append some nops,
2358 : because the patcher stub calling code might be longer than the
2359 : actual instruction. So codepatching does not change the
2360 : following block unintentionally. */
2361 :
2362 319280 : if (cd->mcodeptr < cd->lastmcodeptr) {
2363 0 : while (cd->mcodeptr < cd->lastmcodeptr) {
2364 0 : M_NOP;
2365 : }
2366 : }
2367 : #endif
2368 :
2369 319280 : if (bptr->next && bptr->next->type == basicblock::TYPE_EXH)
2370 9964 : fixup_exc_handler_interface(jd, bptr->next);
2371 :
2372 : } // for all basic blocks
2373 :
2374 : // Generate traps.
2375 86399 : emit_patcher_traps(jd);
2376 :
2377 : // Everything's ok.
2378 86399 : return true;
2379 : }
2380 :
2381 :
2382 : /* codegen_emit_phi_moves ****************************************************
2383 :
2384 : Emits phi moves at the end of the basicblock.
2385 :
2386 : *******************************************************************************/
2387 :
2388 : #if defined(ENABLE_SSA)
2389 : void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2390 : {
2391 : int lt_d,lt_s,i;
2392 : lsradata *ls;
2393 : codegendata *cd;
2394 : varinfo *s, *d;
2395 : instruction tmp_i;
2396 :
2397 : cd = jd->cd;
2398 : ls = jd->ls;
2399 :
2400 : MCODECHECK(512);
2401 :
2402 : /* Moves from phi functions with highest indices have to be */
2403 : /* inserted first, since this is the order as is used for */
2404 : /* conflict resolution */
2405 :
2406 : for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2407 : lt_d = ls->phi_moves[bptr->nr][i][0];
2408 : lt_s = ls->phi_moves[bptr->nr][i][1];
2409 : #if defined(SSA_DEBUG_VERBOSE)
2410 : if (compileverbose)
2411 : printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2412 : #endif
2413 : if (lt_s == jitdata::UNUSED) {
2414 : #if defined(SSA_DEBUG_VERBOSE)
2415 : if (compileverbose)
2416 : printf(" ... not processed \n");
2417 : #endif
2418 : continue;
2419 : }
2420 :
2421 : d = VAR(ls->lifetime[lt_d].v_index);
2422 : s = VAR(ls->lifetime[lt_s].v_index);
2423 :
2424 :
2425 : if (d->type == Type(-1)) {
2426 : #if defined(SSA_DEBUG_VERBOSE)
2427 : if (compileverbose)
2428 : printf("...returning - phi lifetimes where joined\n");
2429 : #endif
2430 : continue;
2431 : }
2432 :
2433 : if (s->type == Type(-1)) {
2434 : #if defined(SSA_DEBUG_VERBOSE)
2435 : if (compileverbose)
2436 : printf("...returning - phi lifetimes where joined\n");
2437 : #endif
2438 : continue;
2439 : }
2440 :
2441 : tmp_i.opc = ICMD_NOP;
2442 : tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2443 : tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2444 : emit_copy(jd, &tmp_i);
2445 :
2446 : #if defined(SSA_DEBUG_VERBOSE)
2447 : if (compileverbose) {
2448 : if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2449 : /* mem -> mem */
2450 : printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2451 : }
2452 : else if (IS_INMEMORY(s->flags)) {
2453 : /* mem -> reg */
2454 : printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2455 : }
2456 : else if (IS_INMEMORY(d->flags)) {
2457 : /* reg -> mem */
2458 : printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2459 : }
2460 : else {
2461 : /* reg -> reg */
2462 : printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2463 : }
2464 : printf("\n");
2465 : }
2466 : #endif /* defined(SSA_DEBUG_VERBOSE) */
2467 : }
2468 : }
2469 : #endif /* defined(ENABLE_SSA) */
2470 :
2471 :
2472 : /* REMOVEME When we have exception handling in C. */
2473 :
2474 0 : void *md_asm_codegen_get_pv_from_pc(void *ra)
2475 : {
2476 0 : return md_codegen_get_pv_from_pc(ra);
2477 : }
2478 :
2479 :
2480 : /*
2481 : * These are local overrides for various environment variables in Emacs.
2482 : * Please do not remove this and leave it at the end of the file, where
2483 : * Emacs will automagically detect them.
2484 : * ---------------------------------------------------------------------
2485 : * Local variables:
2486 : * mode: c++
2487 : * indent-tabs-mode: t
2488 : * c-basic-offset: 4
2489 : * tab-width: 4
2490 : * End:
2491 : * vim:noexpandtab:sw=4:ts=4:
2492 : */
|