Line data Source code
1 : /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2 :
3 : Copyright (C) 1996-2013
4 : CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 : Copyright (C) 2009 Theobroma Systems Ltd.
6 :
7 : This file is part of CACAO.
8 :
9 : This program is free software; you can redistribute it and/or
10 : modify it under the terms of the GNU General Public License as
11 : published by the Free Software Foundation; either version 2, or (at
12 : your option) any later version.
13 :
14 : This program is distributed in the hope that it will be useful, but
15 : WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 : General Public License for more details.
18 :
19 : You should have received a copy of the GNU General Public License
20 : along with this program; if not, write to the Free Software
21 : Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 : 02110-1301, USA.
23 :
24 : All functions assume the following code area / data area layout:
25 :
26 : +-----------+
27 : | |
28 : | code area | code area grows to higher addresses
29 : | |
30 : +-----------+ <-- start of procedure
31 : | |
32 : | data area | data area grows to lower addresses
33 : | |
34 : +-----------+
35 :
36 : The functions first write into a temporary code/data area allocated by
37 : "codegen_init". "codegen_finish" copies the code and data area into permanent
38 : memory. All functions writing values into the data area return the offset
39 : relative the begin of the code area (start of procedure).
40 :
41 : */
42 :
43 :
44 : #include "config.h"
45 :
46 : #include <cassert>
47 : #include <cstring>
48 :
49 : #include "vm/types.hpp"
50 :
51 : #include "codegen.hpp"
52 : #include "md.hpp"
53 : #include "md-abi.hpp"
54 :
55 : #include "mm/codememory.hpp"
56 : #include "mm/memory.hpp"
57 :
58 : #include "toolbox/avl.hpp"
59 : #include "toolbox/list.hpp"
60 : #include "toolbox/logging.hpp"
61 :
62 : #include "native/llni.hpp"
63 : #include "native/localref.hpp"
64 : #include "native/native.hpp"
65 :
66 : #include "vm/descriptor.hpp"
67 : #include "vm/exceptions.hpp"
68 : #include "vm/field.hpp"
69 : #include "vm/options.hpp"
70 : #include "vm/statistics.hpp"
71 :
72 : #include "vm/jit/abi.hpp"
73 : #include "vm/jit/code.hpp"
74 : #include "vm/jit/codegen-common.hpp"
75 :
76 : #include "vm/jit/builtin.hpp"
77 : #include "vm/jit/dseg.hpp"
78 : #include "vm/jit/disass.hpp"
79 : #include "vm/jit/exceptiontable.hpp"
80 : #include "vm/jit/emit-common.hpp"
81 : #include "vm/jit/jit.hpp"
82 : #include "vm/jit/linenumbertable.hpp"
83 : #include "vm/jit/methodheader.hpp"
84 : #include "vm/jit/methodtree.hpp"
85 : #include "vm/jit/patcher-common.hpp"
86 : #include "vm/jit/replace.hpp"
87 : #include "vm/jit/show.hpp"
88 : #include "vm/jit/stacktrace.hpp"
89 : #include "vm/jit/stubs.hpp"
90 : #include "vm/jit/trace.hpp"
91 :
92 : #include "vm/jit/optimizing/profile.hpp"
93 :
94 : #if defined(ENABLE_SSA)
95 : # include "vm/jit/optimizing/lsra.hpp"
96 : # include "vm/jit/optimizing/ssa.hpp"
97 : #elif defined(ENABLE_LSRA)
98 : # include "vm/jit/allocator/lsra.hpp"
99 : #endif
100 :
101 : #if defined(ENABLE_INTRP)
102 : #include "vm/jit/intrp/intrp.h"
103 : #endif
104 :
105 :
106 : STAT_REGISTER_VAR(int,count_branches_unresolved,0,"unresolved branches","unresolved branches")
107 : STAT_DECLARE_GROUP(function_call_stat)
108 : STAT_REGISTER_GROUP_VAR(u8,count_calls_java_to_native,0,"calls java to native","java-to-native calls",function_call_stat)
109 :
110 : STAT_REGISTER_GROUP(memory_stat,"mem. stat.","Memory usage")
111 : STAT_REGISTER_SUM_SUBGROUP(code_data_stat,"code data","Code and data usage",memory_stat)
112 : STAT_REGISTER_GROUP_VAR(int,count_code_len,0,"code len","code length",code_data_stat)
113 : STAT_REGISTER_GROUP_VAR(int,count_data_len,0,"data len","data length",code_data_stat)
114 :
115 : struct methodinfo;
116 :
117 : using namespace cacao;
118 :
119 :
120 : /* codegen_init ****************************************************************
121 :
122 : TODO
123 :
124 : *******************************************************************************/
125 :
126 163 : void codegen_init(void)
127 : {
128 163 : AbstractMethodErrorStub::generate();
129 163 : }
130 :
131 :
132 : /* codegen_setup ***************************************************************
133 :
134 : Allocates and initialises code area, data area and references.
135 :
136 : *******************************************************************************/
137 :
138 105324 : void codegen_setup(jitdata *jd)
139 : {
140 : //methodinfo *m;
141 : codegendata *cd;
142 :
143 : /* get required compiler data */
144 :
145 : //m = jd->m;
146 105324 : cd = jd->cd;
147 :
148 : /* initialize members */
149 :
150 : // Set flags as requested.
151 105324 : if (opt_AlwaysEmitLongBranches) {
152 0 : cd->flags = CODEGENDATA_FLAG_LONGBRANCHES;
153 : }
154 : else {
155 105324 : cd->flags = 0;
156 : }
157 :
158 105324 : cd->mcodebase = (u1*) DumpMemory::allocate(MCODEINITSIZE);
159 105324 : cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
160 105324 : cd->mcodesize = MCODEINITSIZE;
161 :
162 : /* initialize mcode variables */
163 :
164 105324 : cd->mcodeptr = cd->mcodebase;
165 105324 : cd->lastmcodeptr = cd->mcodebase;
166 :
167 : #if defined(ENABLE_INTRP)
168 : /* native dynamic superinstructions variables */
169 :
170 : if (opt_intrp) {
171 : cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
172 : cd->ncodesize = NCODEINITSIZE;
173 :
174 : /* initialize ncode variables */
175 :
176 : cd->ncodeptr = cd->ncodebase;
177 :
178 : cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
179 : cd->superstarts = NULL;
180 : }
181 : #endif
182 :
183 105324 : cd->dseg = NULL;
184 105324 : cd->dseglen = 0;
185 :
186 105324 : cd->jumpreferences = NULL;
187 :
188 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
189 105324 : cd->datareferences = NULL;
190 : #endif
191 :
192 105324 : cd->brancheslabel = new DumpList<branch_label_ref_t*>();
193 105324 : cd->linenumbers = new DumpList<Linenumber>();
194 105324 : }
195 :
196 :
197 : /* codegen_reset ***************************************************************
198 :
199 : Resets the codegen data structure so we can recompile the method.
200 :
201 : *******************************************************************************/
202 :
203 0 : static void codegen_reset(jitdata *jd)
204 : {
205 : codeinfo *code;
206 : codegendata *cd;
207 : basicblock *bptr;
208 :
209 : /* get required compiler data */
210 :
211 0 : code = jd->code;
212 0 : cd = jd->cd;
213 :
214 : /* reset error flag */
215 :
216 0 : cd->flags &= ~CODEGENDATA_FLAG_ERROR;
217 :
218 : /* reset some members, we reuse the code memory already allocated
219 : as this should have almost the correct size */
220 :
221 0 : cd->mcodeptr = cd->mcodebase;
222 0 : cd->lastmcodeptr = cd->mcodebase;
223 :
224 0 : cd->dseg = NULL;
225 0 : cd->dseglen = 0;
226 :
227 0 : cd->jumpreferences = NULL;
228 :
229 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
230 0 : cd->datareferences = NULL;
231 : #endif
232 :
233 0 : cd->brancheslabel = new DumpList<branch_label_ref_t*>();
234 0 : cd->linenumbers = new DumpList<Linenumber>();
235 :
236 : /* We need to clear the mpc and the branch references from all
237 : basic blocks as they will definitely change. */
238 :
239 0 : for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
240 0 : bptr->mpc = -1;
241 0 : bptr->branchrefs = NULL;
242 : }
243 :
244 : /* We need to clear all the patcher references from the codeinfo
245 : since they all will be regenerated */
246 :
247 0 : patcher_list_reset(code);
248 :
249 : #if defined(ENABLE_REPLACEMENT)
250 : code->rplpoints = NULL;
251 : code->rplpointcount = 0;
252 : code->regalloc = NULL;
253 : code->regalloccount = 0;
254 : code->globalcount = 0;
255 : #endif
256 0 : }
257 :
258 :
259 : /* codegen_generate ************************************************************
260 :
261 : Generates the code for the currently compiled method.
262 :
263 : *******************************************************************************/
264 :
265 86399 : bool codegen_generate(jitdata *jd)
266 : {
267 : codegendata *cd;
268 :
269 : /* get required compiler data */
270 :
271 86399 : cd = jd->cd;
272 :
273 : /* call the machine-dependent code generation function */
274 :
275 86399 : if (!codegen_emit(jd))
276 0 : return false;
277 :
278 : /* check for an error */
279 :
280 86399 : if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
281 : /* check for long-branches flag, if it is set we recompile the
282 : method */
283 :
284 : #if !defined(NDEBUG)
285 0 : if (compileverbose)
286 0 : log_message_method("Re-generating code: ", jd->m);
287 : #endif
288 :
289 : /* XXX maybe we should tag long-branches-methods for recompilation */
290 :
291 0 : if (CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)) {
292 : /* we have to reset the codegendata structure first */
293 :
294 0 : codegen_reset(jd);
295 :
296 : /* and restart the compiler run */
297 :
298 0 : if (!codegen_emit(jd))
299 0 : return false;
300 : }
301 : else {
302 0 : os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
303 : }
304 :
305 : #if !defined(NDEBUG)
306 0 : if (compileverbose)
307 0 : log_message_method("Re-generating code done: ", jd->m);
308 : #endif
309 : }
310 :
311 : /* reallocate the memory and finish the code generation */
312 :
313 86399 : codegen_finish(jd);
314 :
315 : /* everything's ok */
316 :
317 86399 : return true;
318 : }
319 :
320 :
321 : /* codegen_close ***************************************************************
322 :
323 : TODO
324 :
325 : *******************************************************************************/
326 :
327 0 : void codegen_close(void)
328 : {
329 : /* TODO: release avl tree on i386 and x86_64 */
330 0 : }
331 :
332 :
333 : /* codegen_increase ************************************************************
334 :
335 : Doubles code area.
336 :
337 : *******************************************************************************/
338 :
339 11 : void codegen_increase(codegendata *cd)
340 : {
341 : u1 *oldmcodebase;
342 :
343 : /* save old mcodebase pointer */
344 :
345 11 : oldmcodebase = cd->mcodebase;
346 :
347 : /* reallocate to new, doubled memory */
348 :
349 : cd->mcodebase = (u1*) DumpMemory::reallocate(cd->mcodebase,
350 : cd->mcodesize,
351 11 : cd->mcodesize * 2);
352 11 : cd->mcodesize *= 2;
353 11 : cd->mcodeend = cd->mcodebase + cd->mcodesize;
354 :
355 : /* set new mcodeptr */
356 :
357 11 : cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
358 :
359 : #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP) \
360 : || defined(__SPARC_64__)
361 : /* adjust the pointer to the last patcher position */
362 :
363 11 : if (cd->lastmcodeptr != NULL)
364 11 : cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
365 : #endif
366 11 : }
367 :
368 :
369 : /* codegen_ncode_increase ******************************************************
370 :
371 : Doubles code area.
372 :
373 : *******************************************************************************/
374 :
375 : #if defined(ENABLE_INTRP)
376 : u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
377 : {
378 : u1 *oldncodebase;
379 :
380 : /* save old ncodebase pointer */
381 :
382 : oldncodebase = cd->ncodebase;
383 :
384 : /* reallocate to new, doubled memory */
385 :
386 : cd->ncodebase = DMREALLOC(cd->ncodebase,
387 : u1,
388 : cd->ncodesize,
389 : cd->ncodesize * 2);
390 : cd->ncodesize *= 2;
391 :
392 : /* return the new ncodeptr */
393 :
394 : return (cd->ncodebase + (ncodeptr - oldncodebase));
395 : }
396 : #endif
397 :
398 :
399 : /* codegen_add_branch_ref ******************************************************
400 :
401 : Prepends an branch to the list.
402 :
403 : *******************************************************************************/
404 :
405 128968 : void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
406 : {
407 : branchref *br;
408 : s4 branchmpc;
409 :
410 : STATISTICS(count_branches_unresolved++);
411 :
412 : /* calculate the mpc of the branch instruction */
413 :
414 128968 : branchmpc = cd->mcodeptr - cd->mcodebase;
415 :
416 128968 : br = (branchref*) DumpMemory::allocate(sizeof(branchref));
417 :
418 128968 : br->branchmpc = branchmpc;
419 128968 : br->condition = condition;
420 128968 : br->reg = reg;
421 128968 : br->options = options;
422 128968 : br->next = target->branchrefs;
423 :
424 128968 : target->branchrefs = br;
425 128968 : }
426 :
427 :
428 : /* codegen_resolve_branchrefs **************************************************
429 :
430 : Resolves and patches the branch references of a given basic block.
431 :
432 : *******************************************************************************/
433 :
434 319280 : void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
435 : {
436 : branchref *br;
437 : u1 *mcodeptr;
438 :
439 : /* Save the mcodeptr because in the branch emitting functions
440 : we generate code somewhere inside already generated code,
441 : but we're still in the actual code generation phase. */
442 :
443 319280 : mcodeptr = cd->mcodeptr;
444 :
445 : /* just to make sure */
446 :
447 319280 : assert(bptr->mpc >= 0);
448 :
449 448248 : for (br = bptr->branchrefs; br != NULL; br = br->next) {
450 : /* temporary set the mcodeptr */
451 :
452 128968 : cd->mcodeptr = cd->mcodebase + br->branchmpc;
453 :
454 : /* emit_bccz and emit_branch emit the correct code, even if we
455 : pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
456 :
457 128968 : emit_bccz(cd, bptr, br->condition, br->reg, br->options);
458 : }
459 :
460 : /* restore mcodeptr */
461 :
462 319280 : cd->mcodeptr = mcodeptr;
463 319280 : }
464 :
465 :
466 : /* codegen_branch_label_add ****************************************************
467 :
468 : Append an branch to the label-branch list.
469 :
470 : *******************************************************************************/
471 :
472 29858 : void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
473 : {
474 : // Calculate the current mpc.
475 29858 : int32_t mpc = cd->mcodeptr - cd->mcodebase;
476 :
477 29858 : branch_label_ref_t* br = (branch_label_ref_t*) DumpMemory::allocate(sizeof(branch_label_ref_t));
478 :
479 29858 : br->mpc = mpc;
480 29858 : br->label = label;
481 29858 : br->condition = condition;
482 29858 : br->reg = reg;
483 29858 : br->options = options;
484 :
485 : // Add the branch to the list.
486 29858 : cd->brancheslabel->push_back(br);
487 29858 : }
488 :
489 :
490 : /* codegen_set_replacement_point_notrap ****************************************
491 :
492 : Record the position of a non-trappable replacement point.
493 :
494 : *******************************************************************************/
495 :
496 : #if defined(ENABLE_REPLACEMENT)
497 : #if !defined(NDEBUG)
498 : void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
499 : #else
500 : void codegen_set_replacement_point_notrap(codegendata *cd)
501 : #endif
502 : {
503 : assert(cd->replacementpoint);
504 : assert(cd->replacementpoint->type == type);
505 : assert(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP);
506 :
507 : cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
508 :
509 : cd->replacementpoint++;
510 : }
511 : #endif /* defined(ENABLE_REPLACEMENT) */
512 :
513 :
514 : /* codegen_set_replacement_point ***********************************************
515 :
516 : Record the position of a trappable replacement point.
517 :
518 : *******************************************************************************/
519 :
520 : #if defined(ENABLE_REPLACEMENT)
521 : #if !defined(NDEBUG)
522 : void codegen_set_replacement_point(codegendata *cd, s4 type)
523 : #else
524 : void codegen_set_replacement_point(codegendata *cd)
525 : #endif
526 : {
527 : assert(cd->replacementpoint);
528 : assert(cd->replacementpoint->type == type);
529 : assert(!(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP));
530 :
531 : cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
532 :
533 : cd->replacementpoint++;
534 :
535 : #if !defined(NDEBUG)
536 : /* XXX actually we should use an own REPLACEMENT_NOPS here! */
537 : if (opt_TestReplacement)
538 : PATCHER_NOPS;
539 : #endif
540 :
541 : /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
542 :
543 : cd->lastmcodeptr = cd->mcodeptr + PATCHER_CALL_SIZE;
544 : }
545 : #endif /* defined(ENABLE_REPLACEMENT) */
546 :
547 :
548 : /* codegen_finish **************************************************************
549 :
550 : Finishes the code generation. A new memory, large enough for both
551 : data and code, is allocated and data and code are copied together
552 : to their final layout, unresolved jumps are resolved, ...
553 :
554 : *******************************************************************************/
555 :
556 99456 : void codegen_finish(jitdata *jd)
557 : {
558 : s4 mcodelen;
559 : #if defined(ENABLE_INTRP)
560 : s4 ncodelen;
561 : #endif
562 : s4 alignedmcodelen;
563 : jumpref *jr;
564 : u1 *epoint;
565 : s4 alignedlen;
566 :
567 : /* Get required compiler data. */
568 :
569 99456 : codeinfo* code = jd->code;
570 99456 : codegendata* cd = jd->cd;
571 99456 : registerdata* rd = jd->rd;
572 :
573 : /* prevent compiler warning */
574 :
575 : #if defined(ENABLE_INTRP)
576 : ncodelen = 0;
577 : #endif
578 :
579 : /* calculate the code length */
580 :
581 99456 : mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
582 :
583 : STATISTICS(count_code_len += mcodelen);
584 : STATISTICS(count_data_len += cd->dseglen);
585 :
586 99456 : alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
587 :
588 : #if defined(ENABLE_INTRP)
589 : if (opt_intrp)
590 : ncodelen = cd->ncodeptr - cd->ncodebase;
591 : else {
592 : ncodelen = 0; /* avoid compiler warning */
593 : }
594 : #endif
595 :
596 99456 : cd->dseglen = MEMORY_ALIGN(cd->dseglen, MAX_ALIGN);
597 99456 : alignedlen = alignedmcodelen + cd->dseglen;
598 :
599 : #if defined(ENABLE_INTRP)
600 : if (opt_intrp) {
601 : alignedlen += ncodelen;
602 : }
603 : #endif
604 :
605 : /* allocate new memory */
606 :
607 99456 : code->mcodelength = mcodelen + cd->dseglen;
608 99456 : code->mcode = CNEW(u1, alignedlen);
609 :
610 : /* set the entrypoint of the method */
611 :
612 99456 : assert(code->entrypoint == NULL);
613 99456 : code->entrypoint = epoint = (code->mcode + cd->dseglen);
614 :
615 : /* fill the data segment (code->entrypoint must already be set!) */
616 :
617 99456 : dseg_finish(jd);
618 :
619 : /* copy code to the new location */
620 :
621 99456 : MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
622 :
623 : #if defined(ENABLE_INTRP)
624 : /* relocate native dynamic superinstruction code (if any) */
625 :
626 : if (opt_intrp) {
627 : cd->mcodebase = code->entrypoint;
628 :
629 : if (ncodelen > 0) {
630 : u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
631 :
632 : MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
633 :
634 : /* flush the instruction and data caches */
635 :
636 : md_cacheflush(ncodebase, ncodelen);
637 :
638 : /* set some cd variables for dynamic_super_rerwite */
639 :
640 : cd->ncodebase = ncodebase;
641 :
642 : } else {
643 : cd->ncodebase = NULL;
644 : }
645 :
646 : dynamic_super_rewrite(cd);
647 : }
648 : #endif
649 :
650 : /* Fill runtime information about generated code. */
651 :
652 99456 : code->stackframesize = cd->stackframesize;
653 99456 : code->synchronizedoffset = rd->memuse * 8;
654 99456 : code->savedintcount = INT_SAV_CNT - rd->savintreguse;
655 99456 : code->savedfltcount = FLT_SAV_CNT - rd->savfltreguse;
656 :
657 : /* Create the exception table. */
658 :
659 99456 : exceptiontable_create(jd);
660 :
661 : /* Create the linenumber table. */
662 :
663 99456 : code->linenumbertable = new LinenumberTable(jd);
664 :
665 : /* jump table resolving */
666 :
667 100530 : for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
668 : *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
669 1074 : (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
670 :
671 : /* patcher resolving */
672 :
673 99456 : patcher_resolve(jd->code);
674 :
675 : #if defined(ENABLE_REPLACEMENT)
676 : /* replacement point resolving */
677 : {
678 : int i;
679 : rplpoint *rp;
680 :
681 : rp = code->rplpoints;
682 : for (i=0; i<code->rplpointcount; ++i, ++rp) {
683 : rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
684 : }
685 : }
686 : #endif /* defined(ENABLE_REPLACEMENT) */
687 :
688 : /* Insert method into methodtree to find the entrypoint. */
689 :
690 99456 : methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
691 :
692 : #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
693 : /* resolve data segment references */
694 :
695 99456 : dseg_resolve_datareferences(jd);
696 : #endif
697 :
698 : /* flush the instruction and data caches */
699 :
700 99456 : md_cacheflush(code->mcode, code->mcodelength);
701 99456 : }
702 :
703 : namespace {
704 : /**
705 : * Outsource stack adjustment logic to reduce in-code `#if defined`s.
706 : *
707 : * @note should be moved to a backend code unit.
708 : */
709 : #if defined(__AARCH64__)
710 : struct FrameInfo {
711 : u1 *sp;
712 : int32_t framesize;
713 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
714 : uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
715 : uint8_t *get_javasp() const { return sp + framesize; }
716 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
717 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
718 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
719 : };
720 : #elif defined(__ALPHA__)
721 : struct FrameInfo {
722 : u1 *sp;
723 : int32_t framesize;
724 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
725 : uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
726 : uint8_t *get_javasp() const { return sp + framesize; }
727 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
728 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
729 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
730 : };
731 : #elif defined(__ARM__)
732 : struct FrameInfo {
733 : u1 *sp;
734 : int32_t framesize;
735 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
736 : uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
737 : uint8_t *get_javasp() const { return sp + framesize; }
738 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
739 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
740 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
741 : };
742 : #elif defined(__I386__)
743 : struct FrameInfo {
744 : u1 *sp;
745 : int32_t framesize;
746 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
747 : uint8_t *get_datasp() const { return sp + framesize; }
748 : uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
749 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
750 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
751 : uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 2 * SIZEOF_VOID_P); }
752 : };
753 : #elif defined(__MIPS__)
754 : struct FrameInfo {
755 : u1 *sp;
756 : int32_t framesize;
757 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
758 : /* MIPS always uses 8 bytes to store the RA */
759 : uint8_t *get_datasp() const { return sp + framesize - 8; }
760 : uint8_t *get_javasp() const { return sp + framesize; }
761 : uint64_t *get_arg_regs() const {
762 : # if SIZEOF_VOID_P == 8
763 : return (uint64_t *) sp;
764 : # else
765 : return (uint64_t *) (sp + 5 * 8);
766 : # endif
767 : }
768 : uint64_t *get_ret_regs() const {
769 : # if SIZEOF_VOID_P == 8
770 : return (uint64_t *) sp;
771 : # else
772 : return (uint64_t *) (sp + 1 * 8);
773 : # endif
774 : }
775 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
776 : };
777 : #elif defined(__S390__)
778 : struct FrameInfo {
779 : u1 *sp;
780 : int32_t framesize;
781 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
782 : uint8_t *get_datasp() const { return sp + framesize - 8; }
783 : uint8_t *get_javasp() const { return sp + framesize; }
784 : uint64_t *get_arg_regs() const { return (uint64_t *) (sp + 96); }
785 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
786 : uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 96); }
787 : };
788 : #elif defined(__POWERPC__)
789 : struct FrameInfo {
790 : u1 *sp;
791 : int32_t framesize;
792 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
793 : uint8_t *get_datasp() const { return sp + framesize; }
794 : uint8_t *get_javasp() const { return sp + framesize; }
795 : uint64_t *get_arg_regs() const {
796 : return (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
797 : }
798 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
799 : uint64_t *get_ret_regs() const {
800 : return (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
801 : }
802 : };
803 : #elif defined(__POWERPC64__)
804 : struct FrameInfo {
805 : u1 *sp;
806 : int32_t framesize;
807 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
808 : uint8_t *get_datasp() const { return sp + framesize; }
809 : uint8_t *get_javasp() const { return sp + framesize; }
810 : uint64_t *get_arg_regs() const {
811 : return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
812 : }
813 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
814 : uint64_t *get_ret_regs() const {
815 : return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
816 : }
817 : };
818 : #elif defined(__X86_64__)
819 : struct FrameInfo {
820 : u1 *sp;
821 : int32_t framesize;
822 6231947 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
823 6231415 : uint8_t *get_datasp() const { return sp + framesize; }
824 3116549 : uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
825 3116527 : uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
826 3116543 : uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
827 3116146 : uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
828 : };
829 : #else
830 : // dummy
831 : struct FrameInfo {
832 : u1 *sp;
833 : int32_t framesize;
834 : FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {
835 : /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
836 : /* XXX maybe we need to pass the RA as argument there */
837 : os::abort("codegen_start_native_call: unsupported architecture");
838 : }
839 : uint8_t *get_datasp() const { return NULL; }
840 : uint8_t *get_javasp() const { return NULL; }
841 : uint64_t *get_arg_regs() const { return NULL; }
842 : uint64_t *get_arg_stack() const { return NULL; }
843 : uint64_t *get_ret_regs() const { return NULL; }
844 : };
845 : #endif
846 :
847 : } // end anonymous namespace
848 :
849 : /* codegen_start_native_call ***************************************************
850 :
851 : Prepares the stuff required for a native (JNI) function call:
852 :
853 : - adds a stackframe info structure to the chain, for stacktraces
854 : - prepares the local references table on the stack
855 :
856 : The layout of the native stub stackframe should look like this:
857 :
858 : +---------------------------+ <- java SP (of parent Java function)
859 : | return address |
860 : +---------------------------+ <- data SP
861 : | |
862 : | stackframe info structure |
863 : | |
864 : +---------------------------+
865 : | |
866 : | local references table |
867 : | |
868 : +---------------------------+
869 : | |
870 : | saved registers (if any) |
871 : | |
872 : +---------------------------+
873 : | |
874 : | arguments (if any) |
875 : | |
876 : +---------------------------+ <- current SP (native stub)
877 :
878 : *******************************************************************************/
879 :
880 3116516 : java_handle_t *codegen_start_native_call(u1 *sp, u1 *pv)
881 : {
882 3116516 : assert(sp);
883 3116516 : assert(pv);
884 :
885 : stackframeinfo_t *sfi;
886 : localref_table *lrt;
887 : codeinfo *code;
888 : methodinfo *m;
889 : int32_t framesize;
890 :
891 : STATISTICS(count_calls_java_to_native++);
892 :
893 : // Get information from method header.
894 3116516 : code = code_get_codeinfo_for_pv(pv);
895 3116605 : assert(code != NULL);
896 :
897 3116605 : framesize = md_stacktrace_get_framesize(code);
898 3116575 : assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
899 :
900 : // Get the methodinfo.
901 3116575 : m = code_get_methodinfo_for_pv(pv);
902 3116584 : assert(m);
903 :
904 : /* calculate needed values */
905 :
906 3116584 : FrameInfo FI(sp,framesize);
907 :
908 3116468 : uint8_t *datasp = FI.get_datasp();
909 : //uint8_t *javasp = FI.get_javasp();
910 : #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
911 3116517 : uint64_t *arg_regs = FI.get_arg_regs();
912 3116548 : uint64_t *arg_stack = FI.get_arg_stack();
913 : #endif
914 :
915 : /* get data structures from stack */
916 :
917 3116561 : sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
918 : lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
919 3116561 : sizeof(localref_table));
920 :
921 : #if defined(ENABLE_JNI)
922 : /* add current JNI local references table to this thread */
923 :
924 3116561 : localref_table_add(lrt);
925 : #endif
926 :
927 : #if !defined(NDEBUG)
928 : # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
929 : /* print the call-trace if necesarry */
930 : /* BEFORE: filling the local reference table */
931 :
932 3116625 : if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
933 0 : trace_java_call_enter(m, arg_regs, arg_stack);
934 : # endif
935 : #endif
936 :
937 : #if defined(ENABLE_HANDLES)
938 : /* place all references into the local reference table */
939 : /* BEFORE: creating stackframeinfo */
940 :
941 : localref_native_enter(m, arg_regs, arg_stack);
942 : #endif
943 :
944 : /* Add a stackframeinfo for this native method. We don't have RA
945 : and XPC here. These are determined in
946 : stacktrace_stackframeinfo_add. */
947 :
948 3116638 : stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
949 :
950 : /* Return a wrapped classinfo for static methods. */
951 :
952 3116728 : if (m->flags & ACC_STATIC)
953 816384 : return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
954 : else
955 2300344 : return NULL;
956 : }
957 :
958 :
959 : /* codegen_finish_native_call **************************************************
960 :
961 : Removes the stuff required for a native (JNI) function call.
962 : Additionally it checks for an exceptions and in case, get the
963 : exception object and clear the pointer.
964 :
965 : *******************************************************************************/
966 :
967 3116333 : java_object_t *codegen_finish_native_call(u1 *sp, u1 *pv)
968 : {
969 3116333 : assert(sp);
970 3116333 : assert(pv);
971 :
972 : stackframeinfo_t *sfi;
973 : java_handle_t *e;
974 : java_object_t *o;
975 : codeinfo *code;
976 : int32_t framesize;
977 :
978 :
979 : // Get information from method header.
980 3116333 : code = code_get_codeinfo_for_pv(pv);
981 3116295 : assert(code != NULL);
982 :
983 3116295 : framesize = md_stacktrace_get_framesize(code);
984 :
985 : // Get the methodinfo.
986 : #if defined(ENABLE_HANDLES) || !defined(NDEBUG)
987 3116385 : methodinfo *m = code->m;
988 3116385 : assert(m != NULL);
989 : #endif
990 :
991 : /* calculate needed values */
992 :
993 3116385 : FrameInfo FI(sp,framesize);
994 :
995 3116258 : uint8_t *datasp = FI.get_datasp();
996 : #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
997 3116201 : uint64_t *ret_regs = FI.get_ret_regs();
998 : #endif
999 :
1000 : /* get data structures from stack */
1001 :
1002 3116136 : sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
1003 :
1004 : /* Remove current stackframeinfo from chain. */
1005 :
1006 3116136 : stacktrace_stackframeinfo_remove(sfi);
1007 :
1008 : #if defined(ENABLE_HANDLES)
1009 : /* unwrap the return value from the local reference table */
1010 : /* AFTER: removing the stackframeinfo */
1011 : /* BEFORE: releasing the local reference table */
1012 :
1013 : localref_native_exit(m, ret_regs);
1014 : #endif
1015 :
1016 : /* get and unwrap the exception */
1017 : /* AFTER: removing the stackframe info */
1018 : /* BEFORE: releasing the local reference table */
1019 :
1020 3116236 : e = exceptions_get_and_clear_exception();
1021 3116456 : o = LLNI_UNWRAP(e);
1022 :
1023 : #if defined(ENABLE_JNI)
1024 : /* release JNI local references table for this thread */
1025 :
1026 3116456 : localref_frame_pop_all();
1027 3116432 : localref_table_remove();
1028 : #endif
1029 :
1030 : #if !defined(NDEBUG)
1031 : # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
1032 : /* print the call-trace if necesarry */
1033 : /* AFTER: unwrapping the return value */
1034 :
1035 3116480 : if (opt_TraceJavaCalls || opt_TraceBuiltinCalls)
1036 0 : trace_java_call_exit(m, ret_regs);
1037 : # endif
1038 : #endif
1039 :
1040 3116481 : return o;
1041 : }
1042 :
1043 :
1044 : /* codegen_reg_of_var **********************************************************
1045 :
1046 : This function determines a register, to which the result of an
1047 : operation should go, when it is ultimatively intended to store the
1048 : result in pseudoregister v. If v is assigned to an actual
1049 : register, this register will be returned. Otherwise (when v is
1050 : spilled) this function returns tempregnum. If not already done,
1051 : regoff and flags are set in the stack location.
1052 :
1053 : *******************************************************************************/
1054 :
1055 1411671 : s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
1056 : {
1057 1411671 : if (!(v->flags & INMEMORY))
1058 1355594 : return v->vv.regoff;
1059 :
1060 56077 : return tempregnum;
1061 : }
1062 :
1063 :
1064 : /* codegen_reg_of_dst **********************************************************
1065 :
1066 : This function determines a register, to which the result of an
1067 : operation should go, when it is ultimatively intended to store the
1068 : result in iptr->dst.var. If dst.var is assigned to an actual
1069 : register, this register will be returned. Otherwise (when it is
1070 : spilled) this function returns tempregnum. If not already done,
1071 : regoff and flags are set in the stack location.
1072 :
1073 : *******************************************************************************/
1074 :
1075 1316291 : s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
1076 : {
1077 1316291 : return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
1078 : }
1079 :
1080 : /**
1081 : * Fix up register locations in the case where control is transferred to an
1082 : * exception handler block via normal control flow (no exception).
1083 : */
1084 9964 : static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
1085 : {
1086 : // Exception handlers have exactly 1 in-slot
1087 9964 : assert(bptr->indepth == 1);
1088 9964 : varinfo *var = VAR(bptr->invars[0]);
1089 9964 : int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1090 9964 : emit_load(jd, NULL, var, d);
1091 : // Copy the interface variable to ITMP1 (XPTR) because that's where
1092 : // the handler expects it.
1093 9964 : emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1094 9964 : }
1095 :
1096 : /**
1097 : * Generates machine code.
1098 : */
1099 86399 : bool codegen_emit(jitdata *jd)
1100 : {
1101 : varinfo* var;
1102 86399 : builtintable_entry* bte = 0;
1103 : methoddesc* md;
1104 : int32_t s1, s2, /*s3,*/ d;
1105 : #if !defined(__I386__)
1106 : int32_t fieldtype;
1107 : int32_t disp;
1108 : #endif
1109 : int i;
1110 :
1111 : // Get required compiler data.
1112 : //methodinfo* m = jd->m;
1113 86399 : codeinfo* code = jd->code;
1114 86399 : codegendata* cd = jd->cd;
1115 86399 : registerdata* rd = jd->rd;
1116 : #if defined(ENABLE_SSA)
1117 : lsradata* ls = jd->ls;
1118 : bool last_cmd_was_goto = false;
1119 : #endif
1120 :
1121 : // Space to save used callee saved registers.
1122 86399 : int32_t savedregs_num = 0;
1123 86399 : savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1124 86399 : savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1125 :
1126 : // Calculate size of stackframe.
1127 86399 : cd->stackframesize = rd->memuse + savedregs_num;
1128 :
1129 : // Space to save the return address.
1130 : #if STACKFRAME_RA_TOP_OF_FRAME
1131 : # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1132 : if (!code_is_leafmethod(code))
1133 : # endif
1134 : cd->stackframesize += 1;
1135 : #endif
1136 :
1137 : // Space to save argument of monitor_enter.
1138 86399 : if (checksync && code_is_synchronized(code))
1139 : #if STACKFRAME_SYNC_NEEDS_TWO_SLOTS
1140 : /* On some architectures the stack position for the argument can
1141 : not be shared with place to save the return register values to
1142 : survive monitor_exit since both values reside in the same register. */
1143 : cd->stackframesize += 2;
1144 : #else
1145 3217 : cd->stackframesize += 1;
1146 : #endif
1147 :
1148 : // Keep stack of non-leaf functions 16-byte aligned for calls into
1149 : // native code.
1150 86399 : if (!code_is_leafmethod(code) || JITDATA_HAS_FLAG_VERBOSECALL(jd))
1151 : #if STACKFRAME_RA_BETWEEN_FRAMES
1152 76287 : ALIGN_ODD(cd->stackframesize);
1153 : #else
1154 : ALIGN_EVEN(cd->stackframesize);
1155 : #endif
1156 :
1157 : #if defined(SPECIALMEMUSE)
1158 : // On architectures having a linkage area, we can get rid of the whole
1159 : // stackframe in leaf functions without saved registers.
1160 : if (code_is_leafmethod(code) && (cd->stackframesize == LA_SIZE_IN_POINTERS))
1161 : cd->stackframesize = 0;
1162 : #endif
1163 :
1164 : /*
1165 : * SECTION 1: Method header generation.
1166 : */
1167 :
1168 : // The method header was reduced to the bare minimum of one pointer
1169 : // to the codeinfo structure, which in turn contains all runtime
1170 : // information. However this section together with the methodheader.h
1171 : // file will be kept alive for historical reasons. It might come in
1172 : // handy at some point.
1173 :
1174 86399 : (void) dseg_add_unique_address(cd, code); ///< CodeinfoPointer
1175 :
1176 : // XXX, REMOVEME: We still need it for exception handling in assembler.
1177 : // XXX ARM: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1178 : #if defined(__I386__)
1179 : int align_off = (cd->stackframesize != 0) ? 4 : 0;
1180 : (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize */
1181 : #else
1182 86399 : (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
1183 : #endif
1184 86399 : (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1185 86399 : (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1186 86399 : (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1187 :
1188 : /*
1189 : * SECTION 2: Method prolog generation.
1190 : */
1191 :
1192 : #if defined(ENABLE_PROFILING)
1193 : // Generate method profiling code.
1194 : if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1195 :
1196 : // Count method frequency.
1197 : emit_profile_method(cd, code);
1198 :
1199 : // Start CPU cycle counting.
1200 : emit_profile_cycle_start(cd, code);
1201 : }
1202 : #endif
1203 :
1204 : // Emit code for the method prolog.
1205 86399 : codegen_emit_prolog(jd);
1206 :
1207 : // Emit code to call monitorenter function.
1208 86399 : if (checksync && code_is_synchronized(code))
1209 3217 : emit_monitor_enter(jd, rd->memuse * 8);
1210 :
1211 : #if !defined(NDEBUG)
1212 : // Call trace function.
1213 86399 : if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
1214 0 : emit_verbosecall_enter(jd);
1215 : #endif
1216 :
1217 : #if defined(ENABLE_SSA)
1218 : // With SSA the header is basicblock 0, insert phi moves if necessary.
1219 : if (ls != NULL)
1220 : codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1221 : #endif
1222 :
1223 : // Create replacement points.
1224 : REPLACEMENT_POINTS_INIT(cd, jd);
1225 :
1226 : /*
1227 : * SECTION 3: ICMD code generation.
1228 : */
1229 :
1230 : // Walk through all basic blocks.
1231 492080 : for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1232 :
1233 405681 : bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1234 :
1235 : // Is this basic block reached?
1236 405681 : if (bptr->state < basicblock::REACHED)
1237 86401 : continue;
1238 :
1239 : // Branch resolving.
1240 319280 : codegen_resolve_branchrefs(cd, bptr);
1241 :
1242 : // Handle replacement points.
1243 : REPLACEMENT_POINT_BLOCK_START(cd, bptr);
1244 :
1245 : #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1246 : // Generate countdown trap code.
1247 : methodinfo* m = jd->m;
1248 : if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1249 : if (cd->replacementpoint[-1].flags & rplpoint::FLAG_COUNTDOWN) {
1250 : MCODECHECK(32);
1251 : emit_trap_countdown(cd, &(m->hitcountdown));
1252 : }
1253 : }
1254 : #endif
1255 :
1256 : #if defined(ENABLE_PROFILING)
1257 : // Generate basicblock profiling code.
1258 : if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1259 :
1260 : // Count basicblock frequency.
1261 : emit_profile_basicblock(cd, code, bptr);
1262 :
1263 : // If this is an exception handler, start profiling again.
1264 : if (bptr->type == basicblock::TYPE_EXH)
1265 : emit_profile_cycle_start(cd, code);
1266 : }
1267 : #endif
1268 :
1269 : // Copy interface registers to their destination.
1270 319280 : int32_t indepth = bptr->indepth;
1271 : // XXX Check if this is true for all archs.
1272 319280 : MCODECHECK(64+indepth); // All
1273 319279 : MCODECHECK(128+indepth); // PPC64
1274 319279 : MCODECHECK(512); // I386, X86_64, S390
1275 : #if defined(ENABLE_SSA)
1276 : // XXX Check if this is correct and add a propper comment!
1277 : if (ls != NULL) {
1278 : last_cmd_was_goto = false;
1279 : } else {
1280 : #elif defined(ENABLE_LSRA)
1281 : if (opt_lsra) {
1282 : while (indepth > 0) {
1283 : indepth--;
1284 : var = VAR(bptr->invars[indepth]);
1285 : if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1286 : if (!IS_INMEMORY(src->flags))
1287 : d = var->vv.regoff;
1288 : else
1289 : d = REG_ITMP1_XPTR;
1290 : // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1291 : // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1292 : emit_imove(cd, REG_ITMP1_XPTR, d);
1293 : emit_store(jd, NULL, var, d);
1294 : }
1295 : }
1296 : } else {
1297 : #endif
1298 669870 : while (indepth > 0) {
1299 31312 : indepth--;
1300 31312 : var = VAR(bptr->invars[indepth]);
1301 41276 : if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1302 9964 : d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1303 : // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1304 : // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1305 9964 : emit_imove(cd, REG_ITMP1_XPTR, d);
1306 9964 : emit_store(jd, NULL, var, d);
1307 : }
1308 : else {
1309 21348 : assert((var->flags & INOUT));
1310 : }
1311 : }
1312 : #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1313 : }
1314 : #endif
1315 :
1316 : // Walk through all instructions.
1317 319279 : int32_t len = bptr->icount;
1318 319279 : uint16_t currentline = 0;
1319 4737864 : for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1320 :
1321 : // Add line number.
1322 4418584 : if (iptr->line != currentline) {
1323 828758 : linenumbertable_list_entry_add(cd, iptr->line);
1324 828758 : currentline = iptr->line;
1325 : }
1326 :
1327 : // An instruction usually needs < 64 words.
1328 : // XXX Check if this is true for all archs.
1329 4418584 : MCODECHECK(64); // All
1330 4418585 : MCODECHECK(128); // PPC64
1331 4418585 : MCODECHECK(1024); // I386, X86_64, S390 /* 1kB should be enough */
1332 :
1333 : // The big switch.
1334 4418585 : switch (iptr->opc) {
1335 :
1336 : case ICMD_NOP: /* ... ==> ... */
1337 : case ICMD_POP: /* ..., value ==> ... */
1338 : case ICMD_POP2: /* ..., value, value ==> ... */
1339 810253 : break;
1340 :
1341 : case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
1342 :
1343 0 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1344 0 : emit_nullpointer_check(cd, iptr, s1);
1345 0 : break;
1346 :
1347 : case ICMD_BREAKPOINT: /* ... ==> ... */
1348 : /* sx.val.anyptr = Breakpoint */
1349 :
1350 0 : patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1351 0 : PATCHER_NOPS;
1352 0 : break;
1353 :
1354 : #if defined(ENABLE_SSA)
1355 : case ICMD_GETEXCEPTION:
1356 :
1357 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1358 : emit_imove(cd, REG_ITMP1, d);
1359 : emit_store_dst(jd, iptr, d);
1360 : break;
1361 : #endif
1362 :
1363 : /* inline operations **********************************************/
1364 :
1365 : case ICMD_INLINE_START:
1366 :
1367 : REPLACEMENT_POINT_INLINE_START(cd, iptr);
1368 0 : break;
1369 :
1370 : case ICMD_INLINE_BODY:
1371 :
1372 : REPLACEMENT_POINT_INLINE_BODY(cd, iptr);
1373 0 : linenumbertable_list_entry_add_inline_start(cd, iptr);
1374 0 : linenumbertable_list_entry_add(cd, iptr->line);
1375 0 : break;
1376 :
1377 : case ICMD_INLINE_END:
1378 :
1379 0 : linenumbertable_list_entry_add_inline_end(cd, iptr);
1380 0 : linenumbertable_list_entry_add(cd, iptr->line);
1381 0 : break;
1382 :
1383 :
1384 : /* constant operations ********************************************/
1385 :
1386 : case ICMD_ICONST: /* ... ==> ..., constant */
1387 :
1388 654581 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1389 654581 : ICONST(d, iptr->sx.val.i);
1390 654581 : emit_store_dst(jd, iptr, d);
1391 654581 : break;
1392 :
1393 : case ICMD_LCONST: /* ... ==> ..., constant */
1394 :
1395 5286 : d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1396 5286 : LCONST(d, iptr->sx.val.l);
1397 5286 : emit_store_dst(jd, iptr, d);
1398 5286 : break;
1399 :
1400 :
1401 : /* load/store/copy/move operations ********************************/
1402 :
1403 : case ICMD_COPY:
1404 : case ICMD_MOVE:
1405 : case ICMD_ILOAD: /* ... ==> ..., content of local variable */
1406 : case ICMD_LLOAD: /* s1 = local variable */
1407 : case ICMD_FLOAD:
1408 : case ICMD_DLOAD:
1409 : case ICMD_ALOAD:
1410 : case ICMD_ISTORE: /* ..., value ==> ... */
1411 : case ICMD_LSTORE:
1412 : case ICMD_FSTORE:
1413 : case ICMD_DSTORE:
1414 :
1415 1196889 : emit_copy(jd, iptr);
1416 1196889 : break;
1417 :
1418 : case ICMD_ASTORE:
1419 :
1420 59060 : if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1421 59043 : emit_copy(jd, iptr);
1422 59060 : break;
1423 :
1424 :
1425 : /* integer operations *********************************************/
1426 :
1427 : case ICMD_FCONST: /* ... ==> ..., constant */
1428 : case ICMD_DCONST: /* ... ==> ..., constant */
1429 : case ICMD_ACONST: /* ... ==> ..., constant */
1430 : case ICMD_INEG: /* ..., value ==> ..., - value */
1431 : case ICMD_LNEG: /* ..., value ==> ..., - value */
1432 : case ICMD_I2L: /* ..., value ==> ..., value */
1433 : case ICMD_L2I: /* ..., value ==> ..., value */
1434 : case ICMD_INT2BYTE: /* ..., value ==> ..., value */
1435 : case ICMD_INT2CHAR: /* ..., value ==> ..., value */
1436 : case ICMD_INT2SHORT: /* ..., value ==> ..., value */
1437 : case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1438 : case ICMD_IINC:
1439 : case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
1440 : /* sx.val.i = constant */
1441 : case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1442 : case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
1443 : /* sx.val.l = constant */
1444 : case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1445 : case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
1446 : /* sx.val.i = constant */
1447 : case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1448 : case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
1449 : /* sx.val.l = constant */
1450 : case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1451 : case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
1452 : /* sx.val.i = constant */
1453 : case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1454 : /* sx.val.i = constant */
1455 : case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1456 : case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
1457 : /* sx.val.l = constant */
1458 : case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1459 : /* sx.val.l = constant */
1460 : case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1461 : case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1462 : case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
1463 : /* sx.val.i = constant */
1464 : case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
1465 : /* sx.val.i = constant */
1466 : case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1467 : case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1468 : case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
1469 : /* sx.val.i = constant */
1470 : case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
1471 : /* sx.val.l = constant */
1472 : case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1473 : case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
1474 : /* sx.val.i = constant */
1475 : case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1476 : case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
1477 : /* sx.val.i = constant */
1478 : case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1479 : case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
1480 : /* sx.val.i = constant */
1481 : case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1482 : case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
1483 : /* sx.val.i = constant */
1484 : case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1485 : case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
1486 : /* sx.val.i = constant */
1487 : case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1488 : case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
1489 : /* sx.val.l = constant */
1490 : case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1491 : case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
1492 : /* sx.val.i = constant */
1493 : case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1494 : case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
1495 : /* sx.val.l = constant */
1496 : case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1497 : case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
1498 : /* sx.val.i = constant */
1499 : case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1500 : case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
1501 : /* sx.val.l = constant */
1502 : case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1503 : case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
1504 : /* sx.val.i = constant */
1505 : case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1506 : case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
1507 : /* sx.val.l = constant */
1508 :
1509 : // Generate architecture specific instructions.
1510 215359 : codegen_emit_instruction(jd, iptr);
1511 215359 : break;
1512 :
1513 :
1514 : /* floating operations ********************************************/
1515 :
1516 : #if !defined(ENABLE_SOFTFLOAT)
1517 : case ICMD_FNEG: /* ..., value ==> ..., - value */
1518 : case ICMD_DNEG:
1519 : case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1520 : case ICMD_DADD:
1521 : case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1522 : case ICMD_DSUB:
1523 : case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1524 : case ICMD_DMUL:
1525 : case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1526 : case ICMD_DDIV:
1527 : case ICMD_FREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1528 : case ICMD_DREM:
1529 : case ICMD_I2F: /* ..., value ==> ..., (float) value */
1530 : case ICMD_I2D: /* ..., value ==> ..., (double) value */
1531 : case ICMD_L2F: /* ..., value ==> ..., (float) value */
1532 : case ICMD_L2D: /* ..., value ==> ..., (double) value */
1533 : case ICMD_F2I: /* ..., value ==> ..., (int) value */
1534 : case ICMD_D2I:
1535 : case ICMD_F2L: /* ..., value ==> ..., (long) value */
1536 : case ICMD_D2L:
1537 : case ICMD_F2D: /* ..., value ==> ..., (double) value */
1538 : case ICMD_D2F: /* ..., value ==> ..., (float) value */
1539 : case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1540 : case ICMD_DCMPL: /* == => 0, < => 1, > => -1 */
1541 : case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1542 : case ICMD_DCMPG: /* == => 0, < => 1, > => -1 */
1543 :
1544 : // Generate architecture specific instructions.
1545 5303 : codegen_emit_instruction(jd, iptr);
1546 5303 : break;
1547 : #endif /* !defined(ENABLE_SOFTFLOAT) */
1548 :
1549 :
1550 : /* memory operations **********************************************/
1551 :
1552 : case ICMD_ARRAYLENGTH:/* ..., arrayref ==> ..., length */
1553 :
1554 9670 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1555 9670 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1556 : /* implicit null-pointer check */
1557 : // XXX PPC64: Here we had an explicit null-pointer check
1558 : // which I think was obsolete, please confirm. Otherwise:
1559 : // emit_nullpointer_check(cd, iptr, s1);
1560 9670 : M_ILD(d, s1, OFFSET(java_array_t, size));
1561 9670 : emit_store_dst(jd, iptr, d);
1562 9670 : break;
1563 :
1564 : case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1565 : case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1566 : case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1567 : case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1568 : case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1569 : case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1570 : case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1571 : case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1572 : case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1573 : case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1574 : case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1575 : case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1576 : case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1577 : case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1578 : case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1579 : case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1580 : case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
1581 : case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
1582 : case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
1583 : case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
1584 : case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
1585 : case ICMD_FASTORECONST: /* ..., arrayref, index ==> ... */
1586 : case ICMD_DASTORECONST: /* ..., arrayref, index ==> ... */
1587 : case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
1588 : case ICMD_GETFIELD: /* ... ==> ..., value */
1589 : case ICMD_PUTFIELD: /* ..., value ==> ... */
1590 : case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
1591 : /* val = value (in current instruction) */
1592 : case ICMD_PUTSTATICCONST: /* ... ==> ... */
1593 : /* val = value (in current instruction) */
1594 :
1595 : // Generate architecture specific instructions.
1596 759348 : codegen_emit_instruction(jd, iptr);
1597 759348 : break;
1598 :
1599 : case ICMD_GETSTATIC: /* ... ==> ..., value */
1600 :
1601 : #if defined(__I386__)
1602 : // Generate architecture specific instructions.
1603 : codegen_emit_instruction(jd, iptr);
1604 : break;
1605 : #else
1606 : {
1607 : fieldinfo* fi;
1608 : //patchref_t* pr;
1609 48504 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1610 18190 : unresolved_field* uf = iptr->sx.s23.s3.uf;
1611 18190 : fieldtype = uf->fieldref->parseddesc.fd->type;
1612 18190 : disp = dseg_add_unique_address(cd, 0);
1613 :
1614 : //pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1615 18190 : patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1616 :
1617 18190 : fi = NULL; /* Silence compiler warning */
1618 : }
1619 : else {
1620 30314 : fi = iptr->sx.s23.s3.fmiref->p.field;
1621 30314 : fieldtype = fi->type;
1622 30314 : disp = dseg_add_address(cd, fi->value);
1623 :
1624 30314 : if (!class_is_or_almost_initialized(fi->clazz)) {
1625 : PROFILE_CYCLE_STOP;
1626 745 : patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1627 : PROFILE_CYCLE_START;
1628 : }
1629 :
1630 : //pr = NULL; /* Silence compiler warning */
1631 : }
1632 :
1633 : // XXX X86_64: Here We had this:
1634 : /* This approach is much faster than moving the field
1635 : address inline into a register. */
1636 :
1637 48504 : M_ALD_DSEG(REG_ITMP1, disp);
1638 :
1639 48504 : switch (fieldtype) {
1640 : case TYPE_ADR:
1641 40208 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1642 40208 : M_ALD(d, REG_ITMP1, 0);
1643 40208 : break;
1644 : case TYPE_INT:
1645 : #if defined(ENABLE_SOFTFLOAT)
1646 : case TYPE_FLT:
1647 : #endif
1648 6077 : d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1649 6077 : M_ILD(d, REG_ITMP1, 0);
1650 6077 : break;
1651 : case TYPE_LNG:
1652 : #if defined(ENABLE_SOFTFLOAT)
1653 : case TYPE_DBL:
1654 : #endif
1655 191 : d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1656 191 : M_LLD(d, REG_ITMP1, 0);
1657 191 : break;
1658 : #if !defined(ENABLE_SOFTFLOAT)
1659 : case TYPE_FLT:
1660 2017 : d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1661 2017 : M_FLD(d, REG_ITMP1, 0);
1662 2017 : break;
1663 : case TYPE_DBL:
1664 11 : d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1665 11 : M_DLD(d, REG_ITMP1, 0);
1666 11 : break;
1667 : #endif
1668 : default:
1669 : // Silence compiler warning.
1670 0 : d = 0;
1671 : }
1672 48504 : emit_store_dst(jd, iptr, d);
1673 48504 : break;
1674 : }
1675 : #endif
1676 :
1677 : case ICMD_PUTSTATIC: /* ..., value ==> ... */
1678 :
1679 : #if defined(__I386__)
1680 : // Generate architecture specific instructions.
1681 : codegen_emit_instruction(jd, iptr);
1682 : break;
1683 : #else
1684 : {
1685 : fieldinfo* fi;
1686 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1687 21810 : patchref_t* pr = NULL;
1688 : #endif
1689 :
1690 21810 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1691 1332 : unresolved_field* uf = iptr->sx.s23.s3.uf;
1692 1332 : fieldtype = uf->fieldref->parseddesc.fd->type;
1693 1332 : disp = dseg_add_unique_address(cd, 0);
1694 :
1695 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1696 : pr =
1697 : #endif
1698 1332 : patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1699 :
1700 1332 : fi = NULL; /* Silence compiler warning */
1701 : }
1702 : else {
1703 20478 : fi = iptr->sx.s23.s3.fmiref->p.field;
1704 20478 : fieldtype = fi->type;
1705 20478 : disp = dseg_add_address(cd, fi->value);
1706 :
1707 20478 : if (!class_is_or_almost_initialized(fi->clazz)) {
1708 : PROFILE_CYCLE_STOP;
1709 0 : patcher_add_patch_ref(jd, PATCHER_initialize_class, fi->clazz, 0);
1710 : PROFILE_CYCLE_START;
1711 : }
1712 : }
1713 :
1714 : // XXX X86_64: Here We had this:
1715 : /* This approach is much faster than moving the field
1716 : address inline into a register. */
1717 :
1718 21810 : M_ALD_DSEG(REG_ITMP1, disp);
1719 :
1720 21810 : switch (fieldtype) {
1721 : case TYPE_ADR:
1722 19085 : s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1723 19085 : M_AST(s1, REG_ITMP1, 0);
1724 19085 : break;
1725 : case TYPE_INT:
1726 : #if defined(ENABLE_SOFTFLOAT)
1727 : case TYPE_FLT:
1728 : #endif
1729 1534 : s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1730 1534 : M_IST(s1, REG_ITMP1, 0);
1731 1534 : break;
1732 : case TYPE_LNG:
1733 : #if defined(ENABLE_SOFTFLOAT)
1734 : case TYPE_DBL:
1735 : #endif
1736 173 : s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1737 173 : M_LST(s1, REG_ITMP1, 0);
1738 173 : break;
1739 : #if !defined(ENABLE_SOFTFLOAT)
1740 : case TYPE_FLT:
1741 1011 : s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1742 1011 : M_FST(s1, REG_ITMP1, 0);
1743 1011 : break;
1744 : case TYPE_DBL:
1745 7 : s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1746 7 : M_DST(s1, REG_ITMP1, 0);
1747 : break;
1748 : #endif
1749 : }
1750 : #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1751 21810 : codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1752 : #endif
1753 21810 : break;
1754 : }
1755 : #endif
1756 :
1757 : /* branch operations **********************************************/
1758 :
1759 : case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1760 :
1761 : // We might leave this method, stop profiling.
1762 : PROFILE_CYCLE_STOP;
1763 :
1764 18423 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1765 : // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1766 18423 : emit_imove(cd, s1, REG_ITMP1_XPTR);
1767 :
1768 : #ifdef ENABLE_VERIFIER
1769 18423 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1770 15042 : unresolved_class *uc = iptr->sx.s23.s2.uc;
1771 15042 : patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1772 : }
1773 : #endif /* ENABLE_VERIFIER */
1774 :
1775 : // Generate architecture specific instructions.
1776 18423 : codegen_emit_instruction(jd, iptr);
1777 18423 : ALIGNCODENOP;
1778 18423 : break;
1779 :
1780 : case ICMD_GOTO: /* ... ==> ... */
1781 : case ICMD_RET: /* ... ==> ... */
1782 :
1783 : #if defined(ENABLE_SSA)
1784 : // In case of a goto, phimoves have to be inserted
1785 : // before the jump.
1786 : if (ls != NULL) {
1787 : last_cmd_was_goto = true;
1788 : codegen_emit_phi_moves(jd, bptr);
1789 : }
1790 : #endif
1791 34753 : if (iptr->dst.block->type == basicblock::TYPE_EXH)
1792 0 : fixup_exc_handler_interface(jd, iptr->dst.block);
1793 34753 : emit_br(cd, iptr->dst.block);
1794 34753 : ALIGNCODENOP;
1795 34753 : break;
1796 :
1797 : case ICMD_JSR: /* ... ==> ... */
1798 :
1799 17 : assert(iptr->sx.s23.s3.jsrtarget.block->type != basicblock::TYPE_EXH);
1800 17 : emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1801 17 : ALIGNCODENOP;
1802 17 : break;
1803 :
1804 : case ICMD_IFNULL: /* ..., value ==> ... */
1805 : case ICMD_IFNONNULL:
1806 :
1807 28803 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1808 28803 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1809 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1810 : emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1811 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1812 28803 : M_TEST(s1);
1813 28803 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1814 : #else
1815 : # error Unable to generate code for this configuration!
1816 : #endif
1817 28803 : break;
1818 :
1819 : case ICMD_IFEQ: /* ..., value ==> ... */
1820 : case ICMD_IFNE:
1821 : case ICMD_IFLT:
1822 : case ICMD_IFLE:
1823 : case ICMD_IFGT:
1824 : case ICMD_IFGE:
1825 :
1826 : // XXX Sparc64: int compares must not branch on the
1827 : // register directly. Reason is, that register content is
1828 : // not 32-bit clean. Fix this!
1829 :
1830 57418 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1831 :
1832 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1833 : if (iptr->sx.val.i == 0) {
1834 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1835 : emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1836 : } else {
1837 : // Generate architecture specific instructions.
1838 : codegen_emit_instruction(jd, iptr);
1839 : }
1840 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1841 57418 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1842 57418 : emit_icmp_imm(cd, s1, iptr->sx.val.i);
1843 57418 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1844 : #else
1845 : # error Unable to generate code for this configuration!
1846 : #endif
1847 57418 : break;
1848 :
1849 : case ICMD_IF_LEQ: /* ..., value ==> ... */
1850 : case ICMD_IF_LNE:
1851 : case ICMD_IF_LLT:
1852 : case ICMD_IF_LGE:
1853 : case ICMD_IF_LGT:
1854 : case ICMD_IF_LLE:
1855 :
1856 295 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1857 :
1858 : // Generate architecture specific instructions.
1859 295 : codegen_emit_instruction(jd, iptr);
1860 295 : break;
1861 :
1862 : case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
1863 : case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
1864 :
1865 2575 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1866 :
1867 2575 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1868 2575 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1869 : #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1870 : switch (iptr->opc) {
1871 : case ICMD_IF_ACMPEQ:
1872 : emit_beq(cd, iptr->dst.block, s1, s2);
1873 : break;
1874 : case ICMD_IF_ACMPNE:
1875 : emit_bne(cd, iptr->dst.block, s1, s2);
1876 : break;
1877 : default:
1878 : break;
1879 : }
1880 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1881 2575 : M_ACMP(s1, s2);
1882 2575 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1883 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1884 : M_CMPEQ(s1, s2, REG_ITMP1);
1885 : switch (iptr->opc) {
1886 : case ICMD_IF_ACMPEQ:
1887 : emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1888 : break;
1889 : case ICMD_IF_ACMPNE:
1890 : emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1891 : break;
1892 : default:
1893 : break;
1894 : }
1895 : #else
1896 : # error Unable to generate code for this configuration!
1897 : #endif
1898 2575 : break;
1899 :
1900 : case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
1901 : case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
1902 :
1903 4632 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1904 :
1905 : #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1906 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1907 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1908 : switch (iptr->opc) {
1909 : case ICMD_IF_ICMPEQ:
1910 : emit_beq(cd, iptr->dst.block, s1, s2);
1911 : break;
1912 : case ICMD_IF_ICMPNE:
1913 : emit_bne(cd, iptr->dst.block, s1, s2);
1914 : break;
1915 : }
1916 : break;
1917 : #else
1918 : /* fall-through */
1919 : #endif
1920 :
1921 : case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
1922 : case ICMD_IF_ICMPGT: /* op1 = target JavaVM pc */
1923 : case ICMD_IF_ICMPLE:
1924 : case ICMD_IF_ICMPGE:
1925 :
1926 16840 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1927 :
1928 16840 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1929 16840 : s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1930 : #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1931 : # if defined(__I386__) || defined(__X86_64__)
1932 : // XXX Fix this soon!!!
1933 16840 : M_ICMP(s2, s1);
1934 : # else
1935 : M_ICMP(s1, s2);
1936 : # endif
1937 16840 : emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1938 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1939 : // Generate architecture specific instructions.
1940 : codegen_emit_instruction(jd, iptr);
1941 : #else
1942 : # error Unable to generate code for this configuration!
1943 : #endif
1944 16840 : break;
1945 :
1946 : case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1947 : case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1948 : case ICMD_IF_LCMPLT:
1949 : case ICMD_IF_LCMPGT:
1950 : case ICMD_IF_LCMPLE:
1951 : case ICMD_IF_LCMPGE:
1952 :
1953 124 : assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1954 :
1955 : // Generate architecture specific instructions.
1956 124 : codegen_emit_instruction(jd, iptr);
1957 124 : break;
1958 :
1959 : case ICMD_RETURN: /* ... ==> ... */
1960 :
1961 : REPLACEMENT_POINT_RETURN(cd, iptr);
1962 44342 : goto nowperformreturn;
1963 :
1964 : case ICMD_ARETURN: /* ..., retvalue ==> ... */
1965 :
1966 : REPLACEMENT_POINT_RETURN(cd, iptr);
1967 38748 : s1 = emit_load_s1(jd, iptr, REG_RESULT);
1968 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1969 38748 : emit_imove(cd, s1, REG_RESULT);
1970 :
1971 : #ifdef ENABLE_VERIFIER
1972 38748 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1973 : PROFILE_CYCLE_STOP;
1974 3305 : unresolved_class *uc = iptr->sx.s23.s2.uc;
1975 3305 : patcher_add_patch_ref(jd, PATCHER_resolve_class, uc, 0);
1976 : PROFILE_CYCLE_START;
1977 : }
1978 : #endif /* ENABLE_VERIFIER */
1979 38748 : goto nowperformreturn;
1980 :
1981 : case ICMD_IRETURN: /* ..., retvalue ==> ... */
1982 : #if defined(ENABLE_SOFTFLOAT)
1983 : case ICMD_FRETURN:
1984 : #endif
1985 :
1986 : REPLACEMENT_POINT_RETURN(cd, iptr);
1987 19768 : s1 = emit_load_s1(jd, iptr, REG_RESULT);
1988 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1989 19768 : emit_imove(cd, s1, REG_RESULT);
1990 19768 : goto nowperformreturn;
1991 :
1992 : case ICMD_LRETURN: /* ..., retvalue ==> ... */
1993 : #if defined(ENABLE_SOFTFLOAT)
1994 : case ICMD_DRETURN:
1995 : #endif
1996 :
1997 : REPLACEMENT_POINT_RETURN(cd, iptr);
1998 246 : s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1999 : // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
2000 246 : emit_lmove(cd, s1, REG_LRESULT);
2001 246 : goto nowperformreturn;
2002 :
2003 : #if !defined(ENABLE_SOFTFLOAT)
2004 : case ICMD_FRETURN: /* ..., retvalue ==> ... */
2005 :
2006 : REPLACEMENT_POINT_RETURN(cd, iptr);
2007 154 : s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2008 : #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2009 : M_CAST_F2I(s1, REG_RESULT);
2010 : #else
2011 154 : emit_fmove(cd, s1, REG_FRESULT);
2012 : #endif
2013 154 : goto nowperformreturn;
2014 :
2015 : case ICMD_DRETURN: /* ..., retvalue ==> ... */
2016 :
2017 : REPLACEMENT_POINT_RETURN(cd, iptr);
2018 9 : s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2019 : #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2020 : M_CAST_D2L(s1, REG_LRESULT);
2021 : #else
2022 9 : emit_dmove(cd, s1, REG_FRESULT);
2023 : #endif
2024 : goto nowperformreturn;
2025 : #endif
2026 :
2027 : nowperformreturn:
2028 : #if !defined(NDEBUG)
2029 : // Call trace function.
2030 103267 : if (JITDATA_HAS_FLAG_VERBOSECALL(jd))
2031 0 : emit_verbosecall_exit(jd);
2032 : #endif
2033 :
2034 : // Emit code to call monitorexit function.
2035 103267 : if (checksync && code_is_synchronized(code)) {
2036 4574 : emit_monitor_exit(jd, rd->memuse * 8);
2037 : }
2038 :
2039 : // Generate method profiling code.
2040 : PROFILE_CYCLE_STOP;
2041 :
2042 : // Emit code for the method epilog.
2043 103267 : codegen_emit_epilog(jd);
2044 103267 : ALIGNCODENOP;
2045 103267 : break;
2046 :
2047 : case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
2048 :
2049 : REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr);
2050 :
2051 77258 : bte = iptr->sx.s23.s3.bte;
2052 77258 : md = bte->md;
2053 :
2054 : #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
2055 : if (bte->fp == BUILTIN_escape_reason_new) {
2056 : void set_escape_reasons(void *);
2057 : M_ASUB_IMM(8, REG_SP);
2058 : M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
2059 : M_AST(EDX, REG_SP, 4);
2060 : M_AST(REG_ITMP1, REG_SP, 0);
2061 : M_MOV_IMM(set_escape_reasons, REG_ITMP1);
2062 : M_CALL(REG_ITMP1);
2063 : M_ALD(EDX, REG_SP, 4);
2064 : M_AADD_IMM(8, REG_SP);
2065 : }
2066 : #endif
2067 :
2068 : // Emit the fast-path if available.
2069 77258 : if (bte->emit_fastpath != NULL) {
2070 : void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
2071 8011 : emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
2072 :
2073 8011 : assert(md->returntype.type == TYPE_VOID);
2074 8011 : d = REG_ITMP1;
2075 :
2076 : // Actually call the fast-path emitter.
2077 8011 : emit_fastpath(jd, iptr, d);
2078 :
2079 : // If fast-path succeeded, jump to the end of the builtin
2080 : // invocation.
2081 : // XXX Actually the slow-path block below should be moved
2082 : // out of the instruction stream and the jump below should be
2083 : // inverted.
2084 : #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2085 : os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2086 : #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2087 8011 : M_TEST(d);
2088 8011 : emit_label_bne(cd, BRANCH_LABEL_10);
2089 : #else
2090 : # error Unable to generate code for this configuration!
2091 : #endif
2092 : }
2093 :
2094 77258 : goto gen_method;
2095 :
2096 : case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
2097 : case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2098 : case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
2099 : case ICMD_INVOKEINTERFACE:
2100 :
2101 : REPLACEMENT_POINT_INVOKE(cd, iptr);
2102 :
2103 282144 : if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2104 64232 : unresolved_method* um = iptr->sx.s23.s3.um;
2105 64232 : md = um->methodref->parseddesc.md;
2106 : }
2107 : else {
2108 217912 : methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2109 217912 : md = lm->parseddesc;
2110 : }
2111 :
2112 : gen_method:
2113 359402 : i = md->paramcount;
2114 :
2115 : // XXX Check this again!
2116 359402 : MCODECHECK((i << 1) + 64); // PPC
2117 :
2118 : // Copy arguments to registers or stack location.
2119 937960 : for (i = i - 1; i >= 0; i--) {
2120 578558 : var = VAR(iptr->sx.s23.s2.args[i]);
2121 578558 : d = md->params[i].regoff;
2122 :
2123 : // Already pre-allocated?
2124 578558 : if (var->flags & PREALLOC)
2125 310574 : continue;
2126 :
2127 267984 : if (!md->params[i].inmemory) {
2128 266196 : switch (var->type) {
2129 : case TYPE_ADR:
2130 : case TYPE_INT:
2131 : #if defined(ENABLE_SOFTFLOAT)
2132 : case TYPE_FLT:
2133 : #endif
2134 264586 : s1 = emit_load(jd, iptr, var, d);
2135 264586 : emit_imove(cd, s1, d);
2136 264586 : break;
2137 :
2138 : case TYPE_LNG:
2139 : #if defined(ENABLE_SOFTFLOAT)
2140 : case TYPE_DBL:
2141 : #endif
2142 305 : s1 = emit_load(jd, iptr, var, d);
2143 305 : emit_lmove(cd, s1, d);
2144 305 : break;
2145 :
2146 : #if !defined(ENABLE_SOFTFLOAT)
2147 : case TYPE_FLT:
2148 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2149 1070 : s1 = emit_load(jd, iptr, var, d);
2150 1070 : emit_fmove(cd, s1, d);
2151 : #else
2152 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2153 : M_CAST_F2I(s1, d);
2154 : #endif
2155 1070 : break;
2156 :
2157 : case TYPE_DBL:
2158 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2159 235 : s1 = emit_load(jd, iptr, var, d);
2160 235 : emit_dmove(cd, s1, d);
2161 : #else
2162 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2163 : M_CAST_D2L(s1, d);
2164 : #endif
2165 235 : break;
2166 : #endif
2167 : default:
2168 0 : assert(false);
2169 : break;
2170 : }
2171 : }
2172 : else {
2173 1788 : switch (var->type) {
2174 : case TYPE_ADR:
2175 1400 : s1 = emit_load(jd, iptr, var, REG_ITMP1);
2176 : // XXX Sparc64: Here this actually was:
2177 : // M_STX(s1, REG_SP, JITSTACK + d);
2178 1400 : M_AST(s1, REG_SP, d);
2179 1400 : break;
2180 :
2181 : case TYPE_INT:
2182 : #if defined(ENABLE_SOFTFLOAT)
2183 : case TYPE_FLT:
2184 : #endif
2185 : #if SIZEOF_VOID_P == 4
2186 : s1 = emit_load(jd, iptr, var, REG_ITMP1);
2187 : M_IST(s1, REG_SP, d);
2188 : break;
2189 : #else
2190 : /* fall-through */
2191 : #endif
2192 :
2193 : case TYPE_LNG:
2194 : #if defined(ENABLE_SOFTFLOAT)
2195 : case TYPE_DBL:
2196 : #endif
2197 343 : s1 = emit_load(jd, iptr, var, REG_LTMP12);
2198 : // XXX Sparc64: Here this actually was:
2199 : // M_STX(s1, REG_SP, JITSTACK + d);
2200 343 : M_LST(s1, REG_SP, d);
2201 343 : break;
2202 :
2203 : #if !defined(ENABLE_SOFTFLOAT)
2204 : case TYPE_FLT:
2205 23 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2206 23 : M_FST(s1, REG_SP, d);
2207 23 : break;
2208 :
2209 : case TYPE_DBL:
2210 22 : s1 = emit_load(jd, iptr, var, REG_FTMP1);
2211 : // XXX Sparc64: Here this actually was:
2212 : // M_DST(s1, REG_SP, JITSTACK + d);
2213 22 : M_DST(s1, REG_SP, d);
2214 22 : break;
2215 : #endif
2216 : default:
2217 0 : assert(false);
2218 : break;
2219 : }
2220 : }
2221 : }
2222 :
2223 : // Generate method profiling code.
2224 : PROFILE_CYCLE_STOP;
2225 :
2226 : // Generate architecture specific instructions.
2227 359402 : codegen_emit_instruction(jd, iptr);
2228 :
2229 : // Generate method profiling code.
2230 : PROFILE_CYCLE_START;
2231 :
2232 : // Store size of call code in replacement point.
2233 : REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr);
2234 : REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr);
2235 :
2236 : // Recompute the procedure vector (PV).
2237 359402 : emit_recompute_pv(cd);
2238 :
2239 : // Store return value.
2240 : #if defined(ENABLE_SSA)
2241 : if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2242 : (ls->lifetime[iptr->dst.varindex].type != jitdata::UNUSED))
2243 : /* a "living" stackslot */
2244 : #endif
2245 359402 : switch (md->returntype.type) {
2246 : case TYPE_INT:
2247 : case TYPE_ADR:
2248 : #if defined(ENABLE_SOFTFLOAT)
2249 : case TYPE_FLT:
2250 : #endif
2251 239883 : s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2252 : // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2253 239883 : emit_imove(cd, REG_RESULT, s1);
2254 239883 : emit_store_dst(jd, iptr, s1);
2255 239883 : break;
2256 :
2257 : case TYPE_LNG:
2258 : #if defined(ENABLE_SOFTFLOAT)
2259 : case TYPE_DBL:
2260 : #endif
2261 476 : s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2262 : // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2263 476 : emit_lmove(cd, REG_LRESULT, s1);
2264 476 : emit_store_dst(jd, iptr, s1);
2265 476 : break;
2266 :
2267 : #if !defined(ENABLE_SOFTFLOAT)
2268 : case TYPE_FLT:
2269 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2270 1198 : s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2271 1198 : emit_fmove(cd, REG_FRESULT, s1);
2272 : #else
2273 : s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2274 : M_CAST_I2F(REG_RESULT, s1);
2275 : #endif
2276 1198 : emit_store_dst(jd, iptr, s1);
2277 1198 : break;
2278 :
2279 : case TYPE_DBL:
2280 : #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2281 51 : s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2282 51 : emit_dmove(cd, REG_FRESULT, s1);
2283 : #else
2284 : s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2285 : M_CAST_L2D(REG_LRESULT, s1);
2286 : #endif
2287 51 : emit_store_dst(jd, iptr, s1);
2288 51 : break;
2289 : #endif
2290 :
2291 : case TYPE_VOID:
2292 117794 : break;
2293 : default:
2294 0 : assert(false);
2295 : break;
2296 : }
2297 :
2298 : // If we are emitting a fast-path block, this is the label for
2299 : // successful fast-path execution.
2300 359402 : if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2301 8011 : emit_label(cd, BRANCH_LABEL_10);
2302 : }
2303 :
2304 359402 : break;
2305 :
2306 : case ICMD_TABLESWITCH: /* ..., index ==> ... */
2307 :
2308 : // Generate architecture specific instructions.
2309 72 : codegen_emit_instruction(jd, iptr);
2310 72 : break;
2311 :
2312 : case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
2313 :
2314 60 : s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2315 60 : i = iptr->sx.s23.s2.lookupcount;
2316 :
2317 : // XXX Again we need to check this
2318 60 : MCODECHECK((i<<2)+8); // Alpha, ARM, i386, MIPS, Sparc64
2319 60 : MCODECHECK((i<<3)+8); // PPC64
2320 60 : MCODECHECK(8 + ((7 + 6) * i) + 5); // X86_64, S390
2321 :
2322 : // Compare keys.
2323 540 : for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2324 : #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2325 480 : emit_icmp_imm(cd, s1, lookup->value);
2326 480 : emit_beq(cd, lookup->target.block);
2327 : #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2328 : ICONST(REG_ITMP2, lookup->value);
2329 : emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2330 : #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2331 : emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2332 : emit_bnez(cd, lookup->target.block, REG_ITMP2);
2333 : #else
2334 : # error Unable to generate code for this configuration!
2335 : #endif
2336 : }
2337 :
2338 : // Default branch.
2339 60 : emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2340 60 : ALIGNCODENOP;
2341 60 : break;
2342 :
2343 : case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
2344 : case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
2345 : case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
2346 :
2347 : // Generate architecture specific instructions.
2348 10473 : codegen_emit_instruction(jd, iptr);
2349 10473 : break;
2350 :
2351 : default:
2352 : exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2353 0 : iptr->opc);
2354 0 : return false;
2355 :
2356 : } // the big switch
2357 :
2358 : } // for all instructions
2359 :
2360 : #if defined(ENABLE_SSA)
2361 : // By edge splitting, in blocks with phi moves there can only
2362 : // be a goto as last command, no other jump/branch command.
2363 : if (ls != NULL) {
2364 : if (!last_cmd_was_goto)
2365 : codegen_emit_phi_moves(jd, bptr);
2366 : }
2367 : #endif
2368 :
2369 : #if defined(__I386__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2370 : // XXX Again!!!
2371 : /* XXX require a lower number? */
2372 319280 : MCODECHECK(64); // I386, MIPS, Sparc64
2373 319280 : MCODECHECK(512); // S390, X86_64
2374 :
2375 : /* XXX We can remove that when we don't use UD2 anymore on i386
2376 : and x86_64. */
2377 :
2378 : /* At the end of a basic block we may have to append some nops,
2379 : because the patcher stub calling code might be longer than the
2380 : actual instruction. So codepatching does not change the
2381 : following block unintentionally. */
2382 :
2383 319280 : if (cd->mcodeptr < cd->lastmcodeptr) {
2384 0 : while (cd->mcodeptr < cd->lastmcodeptr) {
2385 0 : M_NOP;
2386 : }
2387 : }
2388 : #endif
2389 :
2390 319280 : if (bptr->next && bptr->next->type == basicblock::TYPE_EXH)
2391 9964 : fixup_exc_handler_interface(jd, bptr->next);
2392 :
2393 : } // for all basic blocks
2394 :
2395 : // Generate traps.
2396 86399 : emit_patcher_traps(jd);
2397 :
2398 : // Everything's ok.
2399 86399 : return true;
2400 : }
2401 :
2402 :
2403 : /* codegen_emit_phi_moves ****************************************************
2404 :
2405 : Emits phi moves at the end of the basicblock.
2406 :
2407 : *******************************************************************************/
2408 :
2409 : #if defined(ENABLE_SSA)
2410 : void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2411 : {
2412 : int lt_d,lt_s,i;
2413 : lsradata *ls;
2414 : codegendata *cd;
2415 : varinfo *s, *d;
2416 : instruction tmp_i;
2417 :
2418 : cd = jd->cd;
2419 : ls = jd->ls;
2420 :
2421 : MCODECHECK(512);
2422 :
2423 : /* Moves from phi functions with highest indices have to be */
2424 : /* inserted first, since this is the order as is used for */
2425 : /* conflict resolution */
2426 :
2427 : for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2428 : lt_d = ls->phi_moves[bptr->nr][i][0];
2429 : lt_s = ls->phi_moves[bptr->nr][i][1];
2430 : #if defined(SSA_DEBUG_VERBOSE)
2431 : if (compileverbose)
2432 : printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2433 : #endif
2434 : if (lt_s == jitdata::UNUSED) {
2435 : #if defined(SSA_DEBUG_VERBOSE)
2436 : if (compileverbose)
2437 : printf(" ... not processed \n");
2438 : #endif
2439 : continue;
2440 : }
2441 :
2442 : d = VAR(ls->lifetime[lt_d].v_index);
2443 : s = VAR(ls->lifetime[lt_s].v_index);
2444 :
2445 :
2446 : if (d->type == Type(-1)) {
2447 : #if defined(SSA_DEBUG_VERBOSE)
2448 : if (compileverbose)
2449 : printf("...returning - phi lifetimes where joined\n");
2450 : #endif
2451 : continue;
2452 : }
2453 :
2454 : if (s->type == Type(-1)) {
2455 : #if defined(SSA_DEBUG_VERBOSE)
2456 : if (compileverbose)
2457 : printf("...returning - phi lifetimes where joined\n");
2458 : #endif
2459 : continue;
2460 : }
2461 :
2462 : tmp_i.opc = ICMD_NOP;
2463 : tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2464 : tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2465 : emit_copy(jd, &tmp_i);
2466 :
2467 : #if defined(SSA_DEBUG_VERBOSE)
2468 : if (compileverbose) {
2469 : if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2470 : /* mem -> mem */
2471 : printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2472 : }
2473 : else if (IS_INMEMORY(s->flags)) {
2474 : /* mem -> reg */
2475 : printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2476 : }
2477 : else if (IS_INMEMORY(d->flags)) {
2478 : /* reg -> mem */
2479 : printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2480 : }
2481 : else {
2482 : /* reg -> reg */
2483 : printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2484 : }
2485 : printf("\n");
2486 : }
2487 : #endif /* defined(SSA_DEBUG_VERBOSE) */
2488 : }
2489 : }
2490 : #endif /* defined(ENABLE_SSA) */
2491 :
2492 :
2493 : /*
2494 : * These are local overrides for various environment variables in Emacs.
2495 : * Please do not remove this and leave it at the end of the file, where
2496 : * Emacs will automagically detect them.
2497 : * ---------------------------------------------------------------------
2498 : * Local variables:
2499 : * mode: c++
2500 : * indent-tabs-mode: t
2501 : * c-basic-offset: 4
2502 : * tab-width: 4
2503 : * End:
2504 : * vim:noexpandtab:sw=4:ts=4:
2505 : */
|