CACAO
codegen-common.cpp
Go to the documentation of this file.
1 /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2 
3  Copyright (C) 1996-2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5  Copyright (C) 2009 Theobroma Systems Ltd.
6 
7  This file is part of CACAO.
8 
9  This program is free software; you can redistribute it and/or
10  modify it under the terms of the GNU General Public License as
11  published by the Free Software Foundation; either version 2, or (at
12  your option) any later version.
13 
14  This program is distributed in the hope that it will be useful, but
15  WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  General Public License for more details.
18 
19  You should have received a copy of the GNU General Public License
20  along with this program; if not, write to the Free Software
21  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22  02110-1301, USA.
23 
24  All functions assume the following code area / data area layout:
25 
26  +-----------+
27  | |
28  | code area | code area grows to higher addresses
29  | |
30  +-----------+ <-- start of procedure
31  | |
32  | data area | data area grows to lower addresses
33  | |
34  +-----------+
35 
36  The functions first write into a temporary code/data area allocated by
37  "codegen_init". "codegen_finish" copies the code and data area into permanent
38  memory. All functions writing values into the data area return the offset
39  relative the begin of the code area (start of procedure).
40 
41 */
42 
43 
44 #include "config.h"
45 
46 #include <cassert>
47 #include <cstring>
48 
49 #include "vm/types.hpp"
50 
51 #include "codegen.hpp"
52 #include "md.hpp"
53 #include "md-abi.hpp"
54 
55 #include "mm/codememory.hpp"
56 #include "mm/memory.hpp"
57 
58 #include "toolbox/avl.hpp"
59 #include "toolbox/list.hpp"
60 #include "toolbox/logging.hpp"
61 
62 #include "native/llni.hpp"
63 #include "native/localref.hpp"
64 #include "native/native.hpp"
65 
66 #include "vm/descriptor.hpp"
67 #include "vm/exceptions.hpp"
68 #include "vm/field.hpp"
69 #include "vm/options.hpp"
70 #include "vm/statistics.hpp"
71 
72 #include "vm/jit/abi.hpp"
73 #include "vm/jit/code.hpp"
75 
76 #include "vm/jit/builtin.hpp"
77 #include "vm/jit/dseg.hpp"
78 #include "vm/jit/disass.hpp"
80 #include "vm/jit/emit-common.hpp"
81 #include "vm/jit/jit.hpp"
83 #include "vm/jit/methodheader.hpp"
84 #include "vm/jit/methodtree.hpp"
86 #include "vm/jit/replace.hpp"
87 #include "vm/jit/show.hpp"
88 #include "vm/jit/stacktrace.hpp"
89 #include "vm/jit/stubs.hpp"
90 #include "vm/jit/trace.hpp"
91 
93 
94 #if defined(ENABLE_SSA)
96 # include "vm/jit/optimizing/ssa.hpp"
97 #elif defined(ENABLE_LSRA)
98 # include "vm/jit/allocator/lsra.hpp"
99 #endif
100 
101 #if defined(ENABLE_INTRP)
102 #include "vm/jit/intrp/intrp.h"
103 #endif
104 
105 #include "toolbox/logging.hpp"
106 
107 #define DEBUG_NAME "codegen"
108 
109 
110 STAT_REGISTER_VAR(int,count_branches_unresolved,0,"unresolved branches","unresolved branches")
111 STAT_DECLARE_GROUP(function_call_stat)
112 STAT_REGISTER_GROUP_VAR(u8,count_calls_java_to_native,0,"calls java to native","java-to-native calls",function_call_stat)
113 
114 STAT_REGISTER_GROUP(memory_stat,"mem. stat.","Memory usage")
115 STAT_REGISTER_SUM_SUBGROUP(code_data_stat,"code data","Code and data usage",memory_stat)
116 STAT_REGISTER_GROUP_VAR(int,count_code_len,0,"code len","code length",code_data_stat)
117 STAT_REGISTER_GROUP_VAR(int,count_data_len,0,"data len","data length",code_data_stat)
118 
119 struct methodinfo;
120 
121 using namespace cacao;
122 
123 
124 /* codegen_init ****************************************************************
125 
126  TODO
127 
128 *******************************************************************************/
129 
130 void codegen_init(void)
131 {
133 }
134 
135 
136 /* codegen_setup ***************************************************************
137 
138  Allocates and initialises code area, data area and references.
139 
140 *******************************************************************************/
141 
143 {
144  //methodinfo *m;
145  codegendata *cd;
146 
147  /* get required compiler data */
148 
149  //m = jd->m;
150  cd = jd->cd;
151 
152  /* initialize members */
153 
154  // Set flags as requested.
157  }
158  else {
159  cd->flags = 0;
160  }
161 
163  cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
164  cd->mcodesize = MCODEINITSIZE;
165 
166  /* initialize mcode variables */
167 
168  cd->mcodeptr = cd->mcodebase;
169  cd->lastmcodeptr = cd->mcodebase;
170 
171 #if defined(ENABLE_INTRP)
172  /* native dynamic superinstructions variables */
173 
174  if (opt_intrp) {
175  cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
176  cd->ncodesize = NCODEINITSIZE;
177 
178  /* initialize ncode variables */
179 
180  cd->ncodeptr = cd->ncodebase;
181 
182  cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
183  cd->superstarts = NULL;
184  }
185 #endif
186 
187  cd->dseg = NULL;
188  cd->dseglen = 0;
189 
190  cd->jumpreferences = NULL;
191 
192 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
193  cd->datareferences = NULL;
194 #endif
195 
197  cd->linenumbers = new DumpList<Linenumber>();
198 }
199 
200 
201 /* codegen_reset ***************************************************************
202 
203  Resets the codegen data structure so we can recompile the method.
204 
205 *******************************************************************************/
206 
207 static void codegen_reset(jitdata *jd)
208 {
209  codeinfo *code;
210  codegendata *cd;
211  basicblock *bptr;
212 
213  /* get required compiler data */
214 
215  code = jd->code;
216  cd = jd->cd;
217 
218  /* reset error flag */
219 
221 
222  /* reset some members, we reuse the code memory already allocated
223  as this should have almost the correct size */
224 
225  cd->mcodeptr = cd->mcodebase;
226  cd->lastmcodeptr = cd->mcodebase;
227 
228  cd->dseg = NULL;
229  cd->dseglen = 0;
230 
231  cd->jumpreferences = NULL;
232 
233 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
234  cd->datareferences = NULL;
235 #endif
236 
238  cd->linenumbers = new DumpList<Linenumber>();
239 
240  /* We need to clear the mpc and the branch references from all
241  basic blocks as they will definitely change. */
242 
243  for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
244  bptr->mpc = -1;
245  bptr->branchrefs = NULL;
246  }
247 
248  /* We need to clear all the patcher references from the codeinfo
249  since they all will be regenerated */
250 
251  patcher_list_reset(code);
252 
253 #if defined(ENABLE_REPLACEMENT)
254  code->rplpoints = NULL;
255  code->rplpointcount = 0;
256  code->regalloc = NULL;
257  code->regalloccount = 0;
258  code->globalcount = 0;
259 #endif
260 }
261 
262 
263 /* codegen_generate ************************************************************
264 
265  Generates the code for the currently compiled method.
266 
267 *******************************************************************************/
268 
270 {
271  codegendata *cd;
272 
273  /* get required compiler data */
274 
275  cd = jd->cd;
276 
277  /* call the machine-dependent code generation function */
278 
279  if (!codegen_emit(jd))
280  return false;
281 
282  /* check for an error */
283 
284  if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
285  /* check for long-branches flag, if it is set we recompile the
286  method */
287 
288 #if !defined(NDEBUG)
289  if (compileverbose)
290  log_message_method("Re-generating code: ", jd->m);
291 #endif
292 
293  /* XXX maybe we should tag long-branches-methods for recompilation */
294 
296  /* we have to reset the codegendata structure first */
297 
298  codegen_reset(jd);
299 
300  /* and restart the compiler run */
301 
302  if (!codegen_emit(jd))
303  return false;
304  }
305  else {
306  os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
307  }
308 
309 #if !defined(NDEBUG)
310  if (compileverbose)
311  log_message_method("Re-generating code done: ", jd->m);
312 #endif
313  }
314 
315  /* reallocate the memory and finish the code generation */
316 
317  codegen_finish(jd);
318 
319  /* everything's ok */
320 
321  return true;
322 }
323 
324 
325 /* codegen_close ***************************************************************
326 
327  TODO
328 
329 *******************************************************************************/
330 
331 void codegen_close(void)
332 {
333  /* TODO: release avl tree on i386 and x86_64 */
334 }
335 
336 
337 /* codegen_increase ************************************************************
338 
339  Doubles code area.
340 
341 *******************************************************************************/
342 
344 {
345  u1 *oldmcodebase;
346 
347  /* save old mcodebase pointer */
348 
349  oldmcodebase = cd->mcodebase;
350 
351  /* reallocate to new, doubled memory */
352 
354  cd->mcodesize,
355  cd->mcodesize * 2);
356  cd->mcodesize *= 2;
357  cd->mcodeend = cd->mcodebase + cd->mcodesize;
358 
359  /* set new mcodeptr */
360 
361  cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
362 
363 #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP) \
364  || defined(__SPARC_64__)
365  /* adjust the pointer to the last patcher position */
366 
367  if (cd->lastmcodeptr != NULL)
368  cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
369 #endif
370 }
371 
372 
373 /* codegen_ncode_increase ******************************************************
374 
375  Doubles code area.
376 
377 *******************************************************************************/
378 
379 #if defined(ENABLE_INTRP)
380 u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
381 {
382  u1 *oldncodebase;
383 
384  /* save old ncodebase pointer */
385 
386  oldncodebase = cd->ncodebase;
387 
388  /* reallocate to new, doubled memory */
389 
390  cd->ncodebase = DMREALLOC(cd->ncodebase,
391  u1,
392  cd->ncodesize,
393  cd->ncodesize * 2);
394  cd->ncodesize *= 2;
395 
396  /* return the new ncodeptr */
397 
398  return (cd->ncodebase + (ncodeptr - oldncodebase));
399 }
400 #endif
401 
402 
403 /* codegen_add_branch_ref ******************************************************
404 
405  Prepends an branch to the list.
406 
407 *******************************************************************************/
408 
409 void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
410 {
411  branchref *br;
412  s4 branchmpc;
413 
414  STATISTICS(count_branches_unresolved++);
415 
416  /* calculate the mpc of the branch instruction */
417 
418  branchmpc = cd->mcodeptr - cd->mcodebase;
419 
420  br = (branchref*) DumpMemory::allocate(sizeof(branchref));
421 
422  br->branchmpc = branchmpc;
423  br->condition = condition;
424  br->reg = reg;
425  br->options = options;
426  br->next = target->branchrefs;
427 
428  target->branchrefs = br;
429 }
430 
431 
432 /* codegen_resolve_branchrefs **************************************************
433 
434  Resolves and patches the branch references of a given basic block.
435 
436 *******************************************************************************/
437 
439 {
440  branchref *br;
441  u1 *mcodeptr;
442 
443  /* Save the mcodeptr because in the branch emitting functions
444  we generate code somewhere inside already generated code,
445  but we're still in the actual code generation phase. */
446 
447  mcodeptr = cd->mcodeptr;
448 
449  /* just to make sure */
450 
451  assert(bptr->mpc >= 0);
452 
453  for (br = bptr->branchrefs; br != NULL; br = br->next) {
454  /* temporary set the mcodeptr */
455 
456  cd->mcodeptr = cd->mcodebase + br->branchmpc;
457 
458  /* emit_bccz and emit_branch emit the correct code, even if we
459  pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
460 
461  emit_bccz(cd, bptr, br->condition, br->reg, br->options);
462  }
463 
464  /* restore mcodeptr */
465 
466  cd->mcodeptr = mcodeptr;
467 }
468 
469 
470 /* codegen_branch_label_add ****************************************************
471 
472  Append an branch to the label-branch list.
473 
474 *******************************************************************************/
475 
476 void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
477 {
478  // Calculate the current mpc.
479  int32_t mpc = cd->mcodeptr - cd->mcodebase;
480 
482 
483  br->mpc = mpc;
484  br->label = label;
485  br->condition = condition;
486  br->reg = reg;
487  br->options = options;
488 
489  // Add the branch to the list.
490  cd->brancheslabel->push_back(br);
491 }
492 
493 
494 /* codegen_set_replacement_point ***********************************************
495 
496  Record the position of a trappable replacement point.
497 
498 *******************************************************************************/
499 
500 #if defined(ENABLE_REPLACEMENT)
501 void codegen_set_replacement_point(codegendata *cd)
502 {
503  assert(cd->replacementpoint);
504 
505  cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
506 
507 #if (defined(__AARCH64__) || defined(__X86_64__)) && defined(ENABLE_COUNTDOWN_TRAPS)
508  // Generate countdown trap code.
509  methodinfo *m = cd->replacementpoint->method;
510  if (cd->replacementpoint->flags & rplpoint::FLAG_COUNTDOWN) {
511  // XXX Probably 32 bytes aren't enough for every architecture
512  MCODECHECK(32);
513  emit_trap_countdown(cd, &(m->hitcountdown));
514  }
515 #endif
516 
517  cd->replacementpoint++;
518 
519  /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
520 
522 }
523 #endif /* defined(ENABLE_REPLACEMENT) */
524 
525 
526 /* codegen_create_replacement_point ********************************************
527 
528  Create a replacement point.
529 
530  IN:
531  jd...............current jitdata
532  rp...............pre-allocated (uninitialized) replacement point
533  iptr.............current instruction
534  *pra.............current rplalloc pointer
535  javalocals.......the javalocals at the current point
536  stackvars........the stack variables at the current point
537  stackdepth.......the stack depth at the current point
538  paramcount.......number of parameters at the start of stackvars
539  flags............flags for the replacement point
540 
541  OUT:
542  *vpa.............points to the next free rplalloc
543 
544 *******************************************************************************/
545 
546 #if defined(ENABLE_REPLACEMENT)
547 static void codegen_create_replacement_point(
548  jitdata *jd,
549  rplpoint *rp,
550  instruction *iptr,
551  rplalloc **pra,
552  s4 *javalocals,
553  s4 *stackvars,
554  s4 stackdepth,
555  s4 paramcount,
556  u1 flags = 0)
557 {
558  rplalloc *ra = *pra;
559 
560  rp->method = jd->m;
561  rp->pc = NULL; /* set by codegen */
562  rp->regalloc = ra;
563  rp->flags = flags;
564  rp->id = iptr->flags.bits >> INS_FLAG_ID_SHIFT;
565 
566  /* store local allocation info of javalocals */
567 
568  if (javalocals) {
569  for (s4 i = 0; i < rp->method->maxlocals; ++i) {
570  s4 varindex = javalocals[i];
571  if (varindex == jitdata::UNUSED)
572  continue;
573 
574  ra->index = i;
575  if (varindex >= 0) {
576  varinfo *v = VAR(varindex);
577  ra->inmemory = v->flags & (INMEMORY);
578  ra->regoff = v->vv.regoff;
579  ra->type = v->type;
580  } else {
581  ra->regoff = RETADDR_FROM_JAVALOCAL(varindex);
582  ra->type = TYPE_RET;
583  ra->inmemory = false;
584  }
585  ra++;
586  }
587  }
588 
589  /* store allocation info of java stack vars */
590 
591  for (s4 stack_index = 0; stack_index < stackdepth; stack_index++) {
592  s4 varindex = stackvars[stack_index];
593  varinfo *v = VAR(varindex);
594  ra->inmemory = v->flags & (INMEMORY);
595  ra->index = (stack_index < paramcount) ? RPLALLOC_PARAM : RPLALLOC_STACK;
596  ra->type = v->type;
597  /* XXX how to handle locals on the stack containing returnAddresses? */
598  if (v->type == TYPE_RET) {
599  assert(varindex >= jd->localcount);
600  ra->regoff = v->vv.retaddr->nr;
601  } else {
602  ra->regoff = v->vv.regoff;
603  }
604  ra++;
605  }
606 
607  /* total number of allocations */
608 
609  rp->regalloccount = ra - rp->regalloc;
610 
611  *pra = ra;
612 
613  LOG("created replacement point " << rp << nl);
614 }
615 #endif /* defined(ENABLE_REPLACEMENT) */
616 
617 
618 /* codegen_create_replacement_points *******************************************
619 
620  Create the replacement points for the given code.
621 
622  IN:
623  jd...............current jitdata, must not have any replacement points
624 
625  OUT:
626  code->rplpoints.........set to the list of replacement points
627  code->rplpointcount.....number of replacement points
628  code->allocation........list of allocation info
629  code->allocationcount...total length of allocation info list
630  code->globalcount.......number of global allocations at the
631  start of code->allocation
632 
633 *******************************************************************************/
634 
635 #if defined(ENABLE_REPLACEMENT)
636 
637 #define CLEAR_javalocals(array, method) \
638  do { \
639  for (int i=0; i<(method)->maxlocals; ++i) \
640  (array)[i] = jitdata::UNUSED; \
641  } while (0)
642 
643 #define COPY_OR_CLEAR_javalocals(dest, array, method) \
644  do { \
645  if ((array) != NULL) \
646  MCOPY((dest), (array), s4, (method)->maxlocals); \
647  else \
648  CLEAR_javalocals((dest), (method)); \
649  } while (0)
650 
651 #define COUNT_javalocals(array, method, counter) \
652  do { \
653  for (int i=0; i<(method)->maxlocals; ++i) \
654  if ((array)[i] != jitdata::UNUSED) \
655  (counter)++; \
656  } while (0)
657 
658 void codegen_create_replacement_points(jitdata *jd)
659 {
660  LOG("create replacement points for method '" << jd->m->name << "'" << nl);
661 
662  /* get required compiler data */
663 
664  codeinfo *code = jd->code;
665  registerdata *rd = jd->rd;
666 
667  /* assert that we wont overwrite already allocated data */
668 
669  assert(code);
670  assert(code->m);
671  assert(code->rplpoints == NULL);
672  assert(code->rplpointcount == 0);
673  assert(code->regalloc == NULL);
674  assert(code->regalloccount == 0);
675  assert(code->globalcount == 0);
676 
677  /* iterate over the basic block list to find replacement points */
678 
679  int count = 0; /* count of replacement points */
680  int alloccount = 0; /* count of rplallocs */
681 
682  s4 *live_javalocals = (s4*) DumpMemory::allocate(sizeof(s4) * jd->maxlocals);
683 
684  for (basicblock *bptr = jd->basicblocks; bptr; bptr = bptr->next) {
685 
686  /* skip dead code */
687 
688  if (bptr->state < basicblock::FINISHED) {
689  continue;
690  }
691 
692  /* get info about this block */
693 
694  methodinfo *method = bptr->method;
695 
696  /* initialize javalocals at the start of this block */
697 
698  COPY_OR_CLEAR_javalocals(live_javalocals, bptr->javalocals, method);
699 
700  /* create replacement points at method entry and control-flow merge
701  points */
702 
703  if (bptr == jd->basicblocks ||
704  (bptr->predecessorcount > 1 && JITDATA_HAS_FLAG_DEOPTIMIZE(jd))) {
705  count++;
706  alloccount += bptr->indepth;
707  COUNT_javalocals(bptr->javalocals, bptr->method, alloccount);
708  }
709 
710  /* iterate over the instructions */
711 
712  instruction *iptr = bptr->iinstr;
713  instruction *iend = iptr + bptr->icount;
714 
715  for (; iptr != iend; ++iptr) {
716  /* record effects on javalocals */
717 
718  switch (iptr->opc) {
719 #if defined(ENABLE_GC_CACAO)
720  case ICMD_BUILTIN:
721  count++;
722  COUNT_javalocals(live_javalocals, method, alloccount);
723  alloccount += iptr->s1.argcount;
724  break;
725 #endif
726 
727  case ICMD_INVOKESTATIC:
728  case ICMD_INVOKESPECIAL:
729  case ICMD_INVOKEVIRTUAL:
731  count++;
732  COUNT_javalocals(live_javalocals, method, alloccount);
733  alloccount += iptr->s1.argcount;
734  break;
735 
736  case ICMD_ISTORE:
737  case ICMD_LSTORE:
738  case ICMD_FSTORE:
739  case ICMD_DSTORE:
740  case ICMD_ASTORE:
741  stack_javalocals_store(iptr, live_javalocals);
742  break;
743  default:
744  break;
745  }
746 
747  /* count javalocals and stackvars after side-effecting instructions */
748 
750  count++;
751  alloccount += iptr->stackdepth_after;
752  COUNT_javalocals(live_javalocals, method, alloccount);
753  }
754  } /* end instruction loop */
755  } /* end basicblock loop */
756 
757  /* if no points were found, there's nothing to do */
758 
759  if (count == 0) {
760  return;
761  }
762 
763  /* allocate replacement point array and allocation array */
764 
765  rplpoint *rplpoints = MNEW(rplpoint, count);
766  rplalloc *allocs = MNEW(rplalloc, alloccount);
767  rplalloc *ra = allocs;
768 
769  /* initialize replacement point structs */
770 
771  rplpoint *rp = rplpoints;
772 
773  for (basicblock *bptr = jd->basicblocks; bptr; bptr = bptr->next) {
774  /* skip dead code */
775 
776  if (bptr->state < basicblock::FINISHED) {
777  continue;
778  }
779 
780  /* get info about this block */
781 
782  methodinfo *method = bptr->method;
783 
784  /* initialize javalocals at the start of this block */
785 
786  COPY_OR_CLEAR_javalocals(live_javalocals, bptr->javalocals, method);
787 
788  /* create replacement points at method entry and control-flow merge
789  points */
790 
791  if (bptr == jd->basicblocks ||
792  (bptr->predecessorcount > 1 && JITDATA_HAS_FLAG_DEOPTIMIZE(jd))) {
793  u1 flags = 0;
794  /* create countdown traps at targets of backward branches and at the
795  method entry */
796  // XXX for now we only create them at the method entry, until it is
797  // possible to jump into optimized code at backward branch targets.
798 
799  if (bptr == jd->basicblocks && JITDATA_HAS_FLAG_COUNTDOWN(jd)) {
800  flags |= rplpoint::FLAG_TRAPPABLE;
801  flags |= rplpoint::FLAG_COUNTDOWN;
802  }
803 
804  codegen_create_replacement_point(jd, rp++, bptr->iinstr, &ra,
805  live_javalocals, bptr->invars, bptr->indepth, 0, flags);
806  }
807 
808  /* iterate over the instructions */
809 
810  instruction *iptr = bptr->iinstr;
811  instruction *iend = iptr + bptr->icount;
812 
813  for (; iptr != iend; ++iptr) {
814  /* record effects on javalocals */
815 
816  switch (iptr->opc) {
817 #if defined(ENABLE_GC_CACAO)
818  case ICMD_BUILTIN: {
819  methoddesc *md = iptr->sx.s23.s3.bte->md;
820 
821  codegen_create_replacement_point(jd, rp++,
822  iptr, &va, live_javalocals, iptr->sx.s23.s2.args,
823  iptr->s1.argcount,
824  md->paramcount);
825  break;
826  }
827 #endif
828 
829  case ICMD_INVOKESTATIC:
830  case ICMD_INVOKESPECIAL:
831  case ICMD_INVOKEVIRTUAL:
832  case ICMD_INVOKEINTERFACE: {
833  methoddesc *md;
834  INSTRUCTION_GET_METHODDESC(iptr, md);
835 
836  codegen_create_replacement_point(jd, rp++,
837  iptr, &ra, live_javalocals, iptr->sx.s23.s2.args,
838  iptr->s1.argcount,
839  md->paramcount);
840  break;
841  }
842 
843  case ICMD_ISTORE:
844  case ICMD_LSTORE:
845  case ICMD_FSTORE:
846  case ICMD_DSTORE:
847  case ICMD_ASTORE:
848  stack_javalocals_store(iptr, live_javalocals);
849  break;
850  default:
851  break;
852  }
853 
854  /* create replacement points after side-effecting instructions */
855 
857  codegen_create_replacement_point(jd, rp++, &iptr[1], &ra,
858  live_javalocals, iptr->stack_after,
859  iptr->stackdepth_after, 0, 0);
860  }
861  } /* end instruction loop */
862  } /* end basicblock loop */
863 
864  assert((rp - rplpoints) == count);
865  assert((ra - allocs) == alloccount);
866 
867  /* store the data in the codeinfo */
868 
869  code->rplpoints = rplpoints;
870  code->rplpointcount = count;
871  code->regalloc = allocs;
872  code->regalloccount = alloccount;
873  code->globalcount = 0;
874  code->memuse = rd->memuse;
875 }
876 #endif /* defined(ENABLE_REPLACEMENT) */
877 
878 
879 /* codegen_finish **************************************************************
880 
881  Finishes the code generation. A new memory, large enough for both
882  data and code, is allocated and data and code are copied together
883  to their final layout, unresolved jumps are resolved, ...
884 
885 *******************************************************************************/
886 
888 {
889  s4 mcodelen;
890 #if defined(ENABLE_INTRP)
891  s4 ncodelen;
892 #endif
893  s4 alignedmcodelen;
894  jumpref *jr;
895  u1 *epoint;
896  s4 alignedlen;
897 
898  /* Get required compiler data. */
899 
900  codeinfo* code = jd->code;
901  codegendata* cd = jd->cd;
902  registerdata* rd = jd->rd;
903 
904  /* prevent compiler warning */
905 
906 #if defined(ENABLE_INTRP)
907  ncodelen = 0;
908 #endif
909 
910  /* calculate the code length */
911 
912  mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
913 
914  STATISTICS(count_code_len += mcodelen);
915  STATISTICS(count_data_len += cd->dseglen);
916 
917  alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
918 
919 #if defined(ENABLE_INTRP)
920  if (opt_intrp)
921  ncodelen = cd->ncodeptr - cd->ncodebase;
922  else {
923  ncodelen = 0; /* avoid compiler warning */
924  }
925 #endif
926 
928  alignedlen = alignedmcodelen + cd->dseglen;
929 
930 #if defined(ENABLE_INTRP)
931  if (opt_intrp) {
932  alignedlen += ncodelen;
933  }
934 #endif
935 
936  /* allocate new memory */
937 
938  code->mcodelength = mcodelen + cd->dseglen;
939  code->mcode = CNEW(u1, alignedlen);
940 
941  /* set the entrypoint of the method */
942 
943  assert(code->entrypoint == NULL);
944  code->entrypoint = epoint = (code->mcode + cd->dseglen);
945 
946  /* fill the data segment (code->entrypoint must already be set!) */
947 
948  dseg_finish(jd);
949 
950  /* copy code to the new location */
951 
952  MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
953 
954 #if defined(ENABLE_INTRP)
955  /* relocate native dynamic superinstruction code (if any) */
956 
957  if (opt_intrp) {
958  cd->mcodebase = code->entrypoint;
959 
960  if (ncodelen > 0) {
961  u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
962 
963  MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
964 
965  /* flush the instruction and data caches */
966 
967  md_cacheflush(ncodebase, ncodelen);
968 
969  /* set some cd variables for dynamic_super_rerwite */
970 
971  cd->ncodebase = ncodebase;
972 
973  } else {
974  cd->ncodebase = NULL;
975  }
976 
978  }
979 #endif
980 
981  /* Fill runtime information about generated code. */
982 
983  code->stackframesize = cd->stackframesize;
984  code->synchronizedoffset = rd->memuse * 8;
985  code->savedintcount = INT_SAV_CNT - rd->savintreguse;
986  code->savedfltcount = FLT_SAV_CNT - rd->savfltreguse;
987 
988  /* Create the exception table. */
989 
991 
992  /* Create the linenumber table. */
993 
994  code->linenumbertable = new LinenumberTable(jd);
995 
996  /* jump table resolving */
997 
998  for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
999  *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
1000  (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
1001 
1002  /* patcher resolving */
1003 
1004  patcher_resolve(jd->code);
1005 
1006 #if defined(ENABLE_REPLACEMENT)
1007  /* replacement point resolving */
1008  {
1009  int i;
1010  rplpoint *rp;
1011 
1012  rp = code->rplpoints;
1013  for (i=0; i<code->rplpointcount; ++i, ++rp) {
1014  rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
1015  }
1016  }
1017 #endif
1018 
1019  /* Insert method into methodtree to find the entrypoint. */
1020 
1021  methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
1022 
1023 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
1024  /* resolve data segment references */
1025 
1026  dseg_resolve_datareferences(jd);
1027 #endif
1028 
1029  /* flush the instruction and data caches */
1030 
1031  md_cacheflush(code->mcode, code->mcodelength);
1032 }
1033 
1034 namespace {
1035 /**
1036  * Outsource stack adjustment logic to reduce in-code `#if defined`s.
1037  *
1038  * @note should be moved to a backend code unit.
1039  */
1040 #if defined(__AARCH64__)
1041 struct FrameInfo {
1042  u1 *sp;
1043  int32_t framesize;
1044  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1045  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
1046  uint8_t *get_javasp() const { return sp + framesize; }
1047  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
1048  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1049  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
1050 };
1051 #elif defined(__ALPHA__)
1052 struct FrameInfo {
1053  u1 *sp;
1054  int32_t framesize;
1055  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1056  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
1057  uint8_t *get_javasp() const { return sp + framesize; }
1058  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
1059  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1060  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
1061 };
1062 #elif defined(__ARM__)
1063 struct FrameInfo {
1064  u1 *sp;
1065  int32_t framesize;
1066  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1067  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
1068  uint8_t *get_javasp() const { return sp + framesize; }
1069  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
1070  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1071  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
1072 };
1073 #elif defined(__I386__)
1074 struct FrameInfo {
1075  u1 *sp;
1076  int32_t framesize;
1077  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1078  uint8_t *get_datasp() const { return sp + framesize; }
1079  uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
1080  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
1081  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1082  uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 2 * SIZEOF_VOID_P); }
1083 };
1084 #elif defined(__MIPS__)
1085 struct FrameInfo {
1086  u1 *sp;
1087  int32_t framesize;
1088  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1089  /* MIPS always uses 8 bytes to store the RA */
1090  uint8_t *get_datasp() const { return sp + framesize - 8; }
1091  uint8_t *get_javasp() const { return sp + framesize; }
1092  uint64_t *get_arg_regs() const {
1093 # if SIZEOF_VOID_P == 8
1094  return (uint64_t *) sp;
1095 # else
1096  return (uint64_t *) (sp + 5 * 8);
1097 # endif
1098  }
1099  uint64_t *get_ret_regs() const {
1100 # if SIZEOF_VOID_P == 8
1101  return (uint64_t *) sp;
1102 # else
1103  return (uint64_t *) (sp + 1 * 8);
1104 # endif
1105  }
1106  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1107 };
1108 #elif defined(__S390__)
1109 struct FrameInfo {
1110  u1 *sp;
1111  int32_t framesize;
1112  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1113  uint8_t *get_datasp() const { return sp + framesize - 8; }
1114  uint8_t *get_javasp() const { return sp + framesize; }
1115  uint64_t *get_arg_regs() const { return (uint64_t *) (sp + 96); }
1116  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1117  uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 96); }
1118 };
1119 #elif defined(__POWERPC__)
1120 struct FrameInfo {
1121  u1 *sp;
1122  int32_t framesize;
1123  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1124  uint8_t *get_datasp() const { return sp + framesize; }
1125  uint8_t *get_javasp() const { return sp + framesize; }
1126  uint64_t *get_arg_regs() const {
1127  return (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
1128  }
1129  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1130  uint64_t *get_ret_regs() const {
1131  return (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
1132  }
1133 };
1134 #elif defined(__POWERPC64__)
1135 struct FrameInfo {
1136  u1 *sp;
1137  int32_t framesize;
1138  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1139  uint8_t *get_datasp() const { return sp + framesize; }
1140  uint8_t *get_javasp() const { return sp + framesize; }
1141  uint64_t *get_arg_regs() const {
1142  return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
1143  }
1144  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1145  uint64_t *get_ret_regs() const {
1146  return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
1147  }
1148 };
1149 #elif defined(__X86_64__)
1150 struct FrameInfo {
1151  u1 *sp;
1152  int32_t framesize;
1153  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
1154  uint8_t *get_datasp() const { return sp + framesize; }
1155  uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
1156  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
1157  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
1158  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
1159 };
1160 #else
1161 // dummy
1162 struct FrameInfo {
1163  u1 *sp;
1164  int32_t framesize;
1165  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {
1166  /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
1167  /* XXX maybe we need to pass the RA as argument there */
1168  os::abort("codegen_start_native_call: unsupported architecture");
1169  }
1170  uint8_t *get_datasp() const { return NULL; }
1171  uint8_t *get_javasp() const { return NULL; }
1172  uint64_t *get_arg_regs() const { return NULL; }
1173  uint64_t *get_arg_stack() const { return NULL; }
1174  uint64_t *get_ret_regs() const { return NULL; }
1175 };
1176 #endif
1177 
1178 } // end anonymous namespace
1179 
1180 /* codegen_start_native_call ***************************************************
1181 
1182  Prepares the stuff required for a native (JNI) function call:
1183 
1184  - adds a stackframe info structure to the chain, for stacktraces
1185  - prepares the local references table on the stack
1186 
1187  The layout of the native stub stackframe should look like this:
1188 
1189  +---------------------------+ <- java SP (of parent Java function)
1190  | return address |
1191  +---------------------------+ <- data SP
1192  | |
1193  | stackframe info structure |
1194  | |
1195  +---------------------------+
1196  | |
1197  | local references table |
1198  | |
1199  +---------------------------+
1200  | |
1201  | saved registers (if any) |
1202  | |
1203  +---------------------------+
1204  | |
1205  | arguments (if any) |
1206  | |
1207  +---------------------------+ <- current SP (native stub)
1208 
1209 *******************************************************************************/
1210 
1212 {
1213  assert(sp);
1214  assert(pv);
1215 
1216  stackframeinfo_t *sfi;
1217  localref_table *lrt;
1218  codeinfo *code;
1219  methodinfo *m;
1220  int32_t framesize;
1221 
1222  STATISTICS(count_calls_java_to_native++);
1223 
1224  // Get information from method header.
1225  code = code_get_codeinfo_for_pv(pv);
1226  assert(code != NULL);
1227 
1228  framesize = md_stacktrace_get_framesize(code);
1229  assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
1230 
1231  // Get the methodinfo.
1233  assert(m);
1234 
1235  /* calculate needed values */
1236 
1237  FrameInfo FI(sp,framesize);
1238 
1239  uint8_t *datasp = FI.get_datasp();
1240  //uint8_t *javasp = FI.get_javasp();
1241 #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
1242  uint64_t *arg_regs = FI.get_arg_regs();
1243  uint64_t *arg_stack = FI.get_arg_stack();
1244 #endif
1245 
1246  /* get data structures from stack */
1247 
1248  sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
1249  lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
1250  sizeof(localref_table));
1251 
1252 #if defined(ENABLE_JNI)
1253  /* add current JNI local references table to this thread */
1254 
1255  localref_table_add(lrt);
1256 #endif
1257 
1258 #if !defined(NDEBUG)
1259 # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
1260  /* print the call-trace if necesarry */
1261  /* BEFORE: filling the local reference table */
1262 
1264  trace_java_call_enter(m, arg_regs, arg_stack);
1265 # endif
1266 #endif
1267 
1268 #if defined(ENABLE_HANDLES)
1269  /* place all references into the local reference table */
1270  /* BEFORE: creating stackframeinfo */
1271 
1272  localref_native_enter(m, arg_regs, arg_stack);
1273 #endif
1274 
1275  /* Add a stackframeinfo for this native method. We don't have RA
1276  and XPC here. These are determined in
1277  stacktrace_stackframeinfo_add. */
1278 
1279  stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
1280 
1281  /* Return a wrapped classinfo for static methods. */
1282 
1283  if (m->flags & ACC_STATIC)
1284  return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
1285  else
1286  return NULL;
1287 }
1288 
1289 
1290 /* codegen_finish_native_call **************************************************
1291 
1292  Removes the stuff required for a native (JNI) function call.
1293  Additionally it checks for an exceptions and in case, get the
1294  exception object and clear the pointer.
1295 
1296 *******************************************************************************/
1297 
1299 {
1300  assert(sp);
1301  assert(pv);
1302 
1303  stackframeinfo_t *sfi;
1304  java_handle_t *e;
1305  java_object_t *o;
1306  codeinfo *code;
1307  int32_t framesize;
1308 
1309 
1310  // Get information from method header.
1311  code = code_get_codeinfo_for_pv(pv);
1312  assert(code != NULL);
1313 
1314  framesize = md_stacktrace_get_framesize(code);
1315 
1316  // Get the methodinfo.
1317 #if defined(ENABLE_HANDLES) || !defined(NDEBUG)
1318  methodinfo *m = code->m;
1319  assert(m != NULL);
1320 #endif
1321 
1322  /* calculate needed values */
1323 
1324  FrameInfo FI(sp,framesize);
1325 
1326  uint8_t *datasp = FI.get_datasp();
1327 #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
1328  uint64_t *ret_regs = FI.get_ret_regs();
1329 #endif
1330 
1331  /* get data structures from stack */
1332 
1333  sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
1334 
1335  /* Remove current stackframeinfo from chain. */
1336 
1338 
1339 #if defined(ENABLE_HANDLES)
1340  /* unwrap the return value from the local reference table */
1341  /* AFTER: removing the stackframeinfo */
1342  /* BEFORE: releasing the local reference table */
1343 
1344  localref_native_exit(m, ret_regs);
1345 #endif
1346 
1347  /* get and unwrap the exception */
1348  /* AFTER: removing the stackframe info */
1349  /* BEFORE: releasing the local reference table */
1350 
1352  o = LLNI_UNWRAP(e);
1353 
1354 #if defined(ENABLE_JNI)
1355  /* release JNI local references table for this thread */
1356 
1359 #endif
1360 
1361 #if !defined(NDEBUG)
1362 # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
1363  /* print the call-trace if necesarry */
1364  /* AFTER: unwrapping the return value */
1365 
1367  trace_java_call_exit(m, ret_regs);
1368 # endif
1369 #endif
1370 
1371  return o;
1372 }
1373 
1374 
1375 /* codegen_reg_of_var **********************************************************
1376 
1377  This function determines a register, to which the result of an
1378  operation should go, when it is ultimatively intended to store the
1379  result in pseudoregister v. If v is assigned to an actual
1380  register, this register will be returned. Otherwise (when v is
1381  spilled) this function returns tempregnum. If not already done,
1382  regoff and flags are set in the stack location.
1383 
1384 *******************************************************************************/
1385 
1386 s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
1387 {
1388  if (!(v->flags & INMEMORY))
1389  return v->vv.regoff;
1390 
1391  return tempregnum;
1392 }
1393 
1394 
1395 /* codegen_reg_of_dst **********************************************************
1396 
1397  This function determines a register, to which the result of an
1398  operation should go, when it is ultimatively intended to store the
1399  result in iptr->dst.var. If dst.var is assigned to an actual
1400  register, this register will be returned. Otherwise (when it is
1401  spilled) this function returns tempregnum. If not already done,
1402  regoff and flags are set in the stack location.
1403 
1404 *******************************************************************************/
1405 
1406 s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
1407 {
1408  return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
1409 }
1410 
1411 /**
1412  * Fix up register locations in the case where control is transferred to an
1413  * exception handler block via normal control flow (no exception).
1414  */
1416 {
1417  // Exception handlers have exactly 1 in-slot
1418  assert(bptr->indepth == 1);
1419  varinfo *var = VAR(bptr->invars[0]);
1420  int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1421  emit_load(jd, NULL, var, d);
1422  // Copy the interface variable to ITMP1 (XPTR) because that's where
1423  // the handler expects it.
1424  emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1425 }
1426 
1427 /**
1428  * Generates machine code.
1429  */
1431 {
1432  varinfo* var;
1433  builtintable_entry* bte = 0;
1434  methoddesc* md;
1435  int32_t s1, s2, /*s3,*/ d;
1436 #if !defined(__I386__)
1437  int32_t fieldtype;
1438  int32_t disp;
1439 #endif
1440  int i;
1441 
1442  // Get required compiler data.
1443  //methodinfo* m = jd->m;
1444  codeinfo* code = jd->code;
1445  codegendata* cd = jd->cd;
1446  registerdata* rd = jd->rd;
1447 #if defined(ENABLE_SSA)
1448  lsradata* ls = jd->ls;
1449  bool last_cmd_was_goto = false;
1450 #endif
1451 
1452  // Space to save used callee saved registers.
1453  int32_t savedregs_num = 0;
1454  savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1455  savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1456 
1457  // Calculate size of stackframe.
1458  cd->stackframesize = rd->memuse + savedregs_num;
1459 
1460  // Space to save the return address.
1461 #if STACKFRAME_RA_TOP_OF_FRAME
1462 # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1463  if (!code_is_leafmethod(code))
1464 # endif
1465  cd->stackframesize += 1;
1466 #endif
1467 
1468  // Space to save argument of monitor_enter.
1469  if (checksync && code_is_synchronized(code))
1471  /* On some architectures the stack position for the argument can
1472  not be shared with place to save the return register values to
1473  survive monitor_exit since both values reside in the same register. */
1474  cd->stackframesize += 2;
1475 #else
1476  cd->stackframesize += 1;
1477 #endif
1478 
1479  // Keep stack of non-leaf functions 16-byte aligned for calls into
1480  // native code.
1484 #else
1486 #endif
1487 
1488 #if defined(SPECIALMEMUSE)
1489  // On architectures having a linkage area, we can get rid of the whole
1490  // stackframe in leaf functions without saved registers.
1492  cd->stackframesize = 0;
1493 #endif
1494 
1495  /*
1496  * SECTION 1: Method header generation.
1497  */
1498 
1499  // The method header was reduced to the bare minimum of one pointer
1500  // to the codeinfo structure, which in turn contains all runtime
1501  // information. However this section together with the methodheader.h
1502  // file will be kept alive for historical reasons. It might come in
1503  // handy at some point.
1504 
1505  (void) dseg_add_unique_address(cd, code); ///< CodeinfoPointer
1506 
1507  // XXX, REMOVEME: We still need it for exception handling in assembler.
1508  // XXX ARM: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1509 #if defined(__I386__)
1510  int align_off = (cd->stackframesize != 0) ? 4 : 0;
1511  (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize */
1512 #else
1513  (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
1514 #endif
1515  (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1516  (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1517  (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1518 
1519  /*
1520  * SECTION 2: Method prolog generation.
1521  */
1522 
1523 #if defined(ENABLE_PROFILING)
1524  // Generate method profiling code.
1525  if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1526 
1527  // Count method frequency.
1528  emit_profile_method(cd, code);
1529 
1530  // Start CPU cycle counting.
1531  emit_profile_cycle_start(cd, code);
1532  }
1533 #endif
1534 
1535  // Emit code for the method prolog.
1536  codegen_emit_prolog(jd);
1537 
1538  // Emit code to call monitorenter function.
1539  if (checksync && code_is_synchronized(code))
1540  emit_monitor_enter(jd, rd->memuse * 8);
1541 
1542 #if !defined(NDEBUG)
1543  // Call trace function.
1546 #endif
1547 
1548 #if defined(ENABLE_SSA)
1549  // With SSA the header is basicblock 0, insert phi moves if necessary.
1550  if (ls != NULL)
1551  codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1552 #endif
1553 
1554 #if defined(ENABLE_REPLACEMENT)
1555  // Create replacement points.
1556  codegen_create_replacement_points(jd);
1557  cd->replacementpoint = jd->code->rplpoints;
1558 #endif
1559 
1560  /*
1561  * SECTION 3: ICMD code generation.
1562  */
1563 
1564  // Walk through all basic blocks.
1565  for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1566 
1567  bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1568 
1569  // Is this basic block reached?
1570  if (bptr->state < basicblock::REACHED)
1571  continue;
1572 
1573  // Branch resolving.
1574  codegen_resolve_branchrefs(cd, bptr);
1575 
1576 #if defined(ENABLE_REPLACEMENT)
1577  // Set a replacement point at the start of this block if necessary.
1578  if (bptr == jd->basicblocks ||
1579  (bptr->predecessorcount > 1 && JITDATA_HAS_FLAG_DEOPTIMIZE(jd))) {
1580  codegen_set_replacement_point(cd);
1581  }
1582 #endif
1583 
1584 #if defined(ENABLE_PROFILING)
1585  // Generate basicblock profiling code.
1586  if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1587 
1588  // Count basicblock frequency.
1589  emit_profile_basicblock(cd, code, bptr);
1590 
1591  // If this is an exception handler, start profiling again.
1592  if (bptr->type == basicblock::TYPE_EXH)
1593  emit_profile_cycle_start(cd, code);
1594  }
1595 #endif
1596 
1597  // Copy interface registers to their destination.
1598  int32_t indepth = bptr->indepth;
1599  // XXX Check if this is true for all archs.
1600  MCODECHECK(64+indepth); // All
1601  MCODECHECK(128+indepth); // PPC64
1602  MCODECHECK(512); // I386, X86_64, S390
1603 #if defined(ENABLE_SSA)
1604  // XXX Check if this is correct and add a propper comment!
1605  if (ls != NULL) {
1606  last_cmd_was_goto = false;
1607  } else {
1608 #elif defined(ENABLE_LSRA)
1609  if (opt_lsra) {
1610  while (indepth > 0) {
1611  indepth--;
1612  var = VAR(bptr->invars[indepth]);
1613  if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1614  if (!IS_INMEMORY(src->flags))
1615  d = var->vv.regoff;
1616  else
1617  d = REG_ITMP1_XPTR;
1618  // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1619  // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1620  emit_imove(cd, REG_ITMP1_XPTR, d);
1621  emit_store(jd, NULL, var, d);
1622  }
1623  }
1624  } else {
1625 #endif
1626  while (indepth > 0) {
1627  indepth--;
1628  var = VAR(bptr->invars[indepth]);
1629  if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1630  d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1631  // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1632  // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1633  emit_imove(cd, REG_ITMP1_XPTR, d);
1634  emit_store(jd, NULL, var, d);
1635  }
1636  else {
1637  assert((var->flags & INOUT));
1638  }
1639  }
1640 #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1641  }
1642 #endif
1643 
1644  // Walk through all instructions.
1645  int32_t len = bptr->icount;
1646  uint16_t currentline = 0;
1647  for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1648 
1649  // Add line number.
1650  if (iptr->line != currentline) {
1652  currentline = iptr->line;
1653  }
1654 
1655  // An instruction usually needs < 64 words.
1656  // XXX Check if this is true for all archs.
1657  MCODECHECK(64); // All
1658  MCODECHECK(128); // PPC64
1659  MCODECHECK(1024); // I386, X86_64, S390 /* 1kB should be enough */
1660 
1661  // The big switch.
1662  switch (iptr->opc) {
1663 
1664  case ICMD_NOP: /* ... ==> ... */
1665  case ICMD_POP: /* ..., value ==> ... */
1666  case ICMD_POP2: /* ..., value, value ==> ... */
1667  break;
1668 
1669  case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
1670 
1671  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1672  emit_nullpointer_check(cd, iptr, s1);
1673  break;
1674 
1675  case ICMD_BREAKPOINT: /* ... ==> ... */
1676  /* sx.val.anyptr = Breakpoint */
1677 
1679  PATCHER_NOPS;
1680  break;
1681 
1682 #if defined(ENABLE_SSA)
1683  case ICMD_GETEXCEPTION:
1684 
1685  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1686  emit_imove(cd, REG_ITMP1, d);
1687  emit_store_dst(jd, iptr, d);
1688  break;
1689 #endif
1690 
1691  /* inline operations **********************************************/
1692 
1693  case ICMD_INLINE_BODY:
1694 
1697  break;
1698 
1699  case ICMD_INLINE_END:
1700 
1703  break;
1704 
1705 
1706  /* constant operations ********************************************/
1707 
1708  case ICMD_ICONST: /* ... ==> ..., constant */
1709 
1710  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1711  ICONST(d, iptr->sx.val.i);
1712  emit_store_dst(jd, iptr, d);
1713  break;
1714 
1715  case ICMD_LCONST: /* ... ==> ..., constant */
1716 
1717  d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1718  LCONST(d, iptr->sx.val.l);
1719  emit_store_dst(jd, iptr, d);
1720  break;
1721 
1722 
1723  /* load/store/copy/move operations ********************************/
1724 
1725  case ICMD_COPY:
1726  case ICMD_MOVE:
1727  case ICMD_ILOAD: /* ... ==> ..., content of local variable */
1728  case ICMD_LLOAD: /* s1 = local variable */
1729  case ICMD_FLOAD:
1730  case ICMD_DLOAD:
1731  case ICMD_ALOAD:
1732  case ICMD_ISTORE: /* ..., value ==> ... */
1733  case ICMD_LSTORE:
1734  case ICMD_FSTORE:
1735  case ICMD_DSTORE:
1736 
1737  emit_copy(jd, iptr);
1738  break;
1739 
1740  case ICMD_ASTORE:
1741 
1742  if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1743  emit_copy(jd, iptr);
1744  break;
1745 
1746 
1747  /* integer operations *********************************************/
1748 
1749  case ICMD_FCONST: /* ... ==> ..., constant */
1750  case ICMD_DCONST: /* ... ==> ..., constant */
1751  case ICMD_ACONST: /* ... ==> ..., constant */
1752  case ICMD_INEG: /* ..., value ==> ..., - value */
1753  case ICMD_LNEG: /* ..., value ==> ..., - value */
1754  case ICMD_I2L: /* ..., value ==> ..., value */
1755  case ICMD_L2I: /* ..., value ==> ..., value */
1756  case ICMD_INT2BYTE: /* ..., value ==> ..., value */
1757  case ICMD_INT2CHAR: /* ..., value ==> ..., value */
1758  case ICMD_INT2SHORT: /* ..., value ==> ..., value */
1759  case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1760  case ICMD_IINC:
1761  case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
1762  /* sx.val.i = constant */
1763  case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1764  case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
1765  /* sx.val.l = constant */
1766  case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1767  case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
1768  /* sx.val.i = constant */
1769  case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1770  case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
1771  /* sx.val.l = constant */
1772  case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1773  case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
1774  /* sx.val.i = constant */
1775  case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1776  /* sx.val.i = constant */
1777  case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1778  case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
1779  /* sx.val.l = constant */
1780  case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1781  /* sx.val.l = constant */
1782  case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1783  case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1784  case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
1785  /* sx.val.i = constant */
1786  case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
1787  /* sx.val.i = constant */
1788  case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1789  case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1790  case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
1791  /* sx.val.i = constant */
1792  case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
1793  /* sx.val.l = constant */
1794  case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1795  case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
1796  /* sx.val.i = constant */
1797  case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1798  case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
1799  /* sx.val.i = constant */
1800  case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1801  case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
1802  /* sx.val.i = constant */
1803  case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1804  case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
1805  /* sx.val.i = constant */
1806  case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1807  case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
1808  /* sx.val.i = constant */
1809  case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1810  case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
1811  /* sx.val.l = constant */
1812  case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1813  case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
1814  /* sx.val.i = constant */
1815  case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1816  case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
1817  /* sx.val.l = constant */
1818  case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1819  case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
1820  /* sx.val.i = constant */
1821  case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1822  case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
1823  /* sx.val.l = constant */
1824  case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1825  case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
1826  /* sx.val.i = constant */
1827  case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1828  case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
1829  /* sx.val.l = constant */
1830 
1831  // Generate architecture specific instructions.
1832  codegen_emit_instruction(jd, iptr);
1833  break;
1834 
1835 
1836  /* floating operations ********************************************/
1837 
1838 #if !defined(ENABLE_SOFTFLOAT)
1839  case ICMD_FNEG: /* ..., value ==> ..., - value */
1840  case ICMD_DNEG:
1841  case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1842  case ICMD_DADD:
1843  case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1844  case ICMD_DSUB:
1845  case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1846  case ICMD_DMUL:
1847  case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1848  case ICMD_DDIV:
1849  case ICMD_FREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1850  case ICMD_DREM:
1851  case ICMD_I2F: /* ..., value ==> ..., (float) value */
1852  case ICMD_I2D: /* ..., value ==> ..., (double) value */
1853  case ICMD_L2F: /* ..., value ==> ..., (float) value */
1854  case ICMD_L2D: /* ..., value ==> ..., (double) value */
1855  case ICMD_F2I: /* ..., value ==> ..., (int) value */
1856  case ICMD_D2I:
1857  case ICMD_F2L: /* ..., value ==> ..., (long) value */
1858  case ICMD_D2L:
1859  case ICMD_F2D: /* ..., value ==> ..., (double) value */
1860  case ICMD_D2F: /* ..., value ==> ..., (float) value */
1861  case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1862  case ICMD_DCMPL: /* == => 0, < => 1, > => -1 */
1863  case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1864  case ICMD_DCMPG: /* == => 0, < => 1, > => -1 */
1865 
1866  // Generate architecture specific instructions.
1867  codegen_emit_instruction(jd, iptr);
1868  break;
1869 #endif /* !defined(ENABLE_SOFTFLOAT) */
1870 
1871 
1872  /* memory operations **********************************************/
1873 
1874  case ICMD_ARRAYLENGTH:/* ..., arrayref ==> ..., length */
1875 
1876  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1877  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1878  /* implicit null-pointer check */
1879  // XXX PPC64: Here we had an explicit null-pointer check
1880  // which I think was obsolete, please confirm. Otherwise:
1881  // emit_nullpointer_check(cd, iptr, s1);
1882  M_ILD(d, s1, OFFSET(java_array_t, size));
1883  emit_store_dst(jd, iptr, d);
1884  break;
1885 
1886  case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1887  case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1888  case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1889  case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1890  case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1891  case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1892  case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1893  case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1894  case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1895  case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1896  case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1897  case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1898  case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1899  case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1900  case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1901  case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1902  case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
1903  case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
1904  case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
1905  case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
1906  case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
1907  case ICMD_FASTORECONST: /* ..., arrayref, index ==> ... */
1908  case ICMD_DASTORECONST: /* ..., arrayref, index ==> ... */
1909  case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
1910  case ICMD_GETFIELD: /* ... ==> ..., value */
1911  case ICMD_PUTFIELD: /* ..., value ==> ... */
1912  case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
1913  /* val = value (in current instruction) */
1914  case ICMD_PUTSTATICCONST: /* ... ==> ... */
1915  /* val = value (in current instruction) */
1916 
1917  // Generate architecture specific instructions.
1918  codegen_emit_instruction(jd, iptr);
1919  break;
1920 
1921  case ICMD_GETSTATIC: /* ... ==> ..., value */
1922 
1923 #if defined(__I386__)
1924  // Generate architecture specific instructions.
1925  codegen_emit_instruction(jd, iptr);
1926  break;
1927 #else
1928  {
1929  fieldinfo* fi;
1930  //patchref_t* pr;
1931  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1932  unresolved_field* uf = iptr->sx.s23.s3.uf;
1933  fieldtype = uf->fieldref->parseddesc.fd->type;
1934  disp = dseg_add_unique_address(cd, 0);
1935 
1936  //pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1938 
1939  fi = NULL; /* Silence compiler warning */
1940  }
1941  else {
1942  fi = iptr->sx.s23.s3.fmiref->p.field;
1943  fieldtype = fi->type;
1944  disp = dseg_add_address(cd, fi->value);
1945 
1950  }
1951 
1952  //pr = NULL; /* Silence compiler warning */
1953  }
1954 
1955  // XXX X86_64: Here We had this:
1956  /* This approach is much faster than moving the field
1957  address inline into a register. */
1958 
1959  M_ALD_DSEG(REG_ITMP1, disp);
1960 
1961  switch (fieldtype) {
1962  case TYPE_ADR:
1963  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1964  M_ALD(d, REG_ITMP1, 0);
1965  break;
1966  case TYPE_INT:
1967 #if defined(ENABLE_SOFTFLOAT)
1968  case TYPE_FLT:
1969 #endif
1970  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1971  M_ILD(d, REG_ITMP1, 0);
1972  break;
1973  case TYPE_LNG:
1974 #if defined(ENABLE_SOFTFLOAT)
1975  case TYPE_DBL:
1976 #endif
1977  d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1978  M_LLD(d, REG_ITMP1, 0);
1979  break;
1980 #if !defined(ENABLE_SOFTFLOAT)
1981  case TYPE_FLT:
1982  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1983  M_FLD(d, REG_ITMP1, 0);
1984  break;
1985  case TYPE_DBL:
1986  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1987  M_DLD(d, REG_ITMP1, 0);
1988  break;
1989 #endif
1990  default:
1991  // Silence compiler warning.
1992  d = 0;
1993  }
1994  emit_store_dst(jd, iptr, d);
1995  break;
1996  }
1997 #endif
1998 
1999  case ICMD_PUTSTATIC: /* ..., value ==> ... */
2000 
2001 #if defined(__I386__)
2002  // Generate architecture specific instructions.
2003  codegen_emit_instruction(jd, iptr);
2004  break;
2005 #else
2006  {
2007  fieldinfo* fi;
2008 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
2009  patchref_t* pr = NULL;
2010 #endif
2011 
2012  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2013  unresolved_field* uf = iptr->sx.s23.s3.uf;
2014  fieldtype = uf->fieldref->parseddesc.fd->type;
2015  disp = dseg_add_unique_address(cd, 0);
2016 
2017 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
2018  pr =
2019 #endif
2021 
2022  fi = NULL; /* Silence compiler warning */
2023  }
2024  else {
2025  fi = iptr->sx.s23.s3.fmiref->p.field;
2026  fieldtype = fi->type;
2027  disp = dseg_add_address(cd, fi->value);
2028 
2033  }
2034  }
2035 
2036  // XXX X86_64: Here We had this:
2037  /* This approach is much faster than moving the field
2038  address inline into a register. */
2039 
2040  M_ALD_DSEG(REG_ITMP1, disp);
2041 
2042  switch (fieldtype) {
2043  case TYPE_ADR:
2044  s1 = emit_load_s1(jd, iptr, REG_ITMP2);
2045  M_AST(s1, REG_ITMP1, 0);
2046  break;
2047  case TYPE_INT:
2048 #if defined(ENABLE_SOFTFLOAT)
2049  case TYPE_FLT:
2050 #endif
2051  s1 = emit_load_s1(jd, iptr, REG_ITMP2);
2052  M_IST(s1, REG_ITMP1, 0);
2053  break;
2054  case TYPE_LNG:
2055 #if defined(ENABLE_SOFTFLOAT)
2056  case TYPE_DBL:
2057 #endif
2058  s1 = emit_load_s1(jd, iptr, REG_LTMP23);
2059  M_LST(s1, REG_ITMP1, 0);
2060  break;
2061 #if !defined(ENABLE_SOFTFLOAT)
2062  case TYPE_FLT:
2063  s1 = emit_load_s1(jd, iptr, REG_FTMP2);
2064  M_FST(s1, REG_ITMP1, 0);
2065  break;
2066  case TYPE_DBL:
2067  s1 = emit_load_s1(jd, iptr, REG_FTMP2);
2068  M_DST(s1, REG_ITMP1, 0);
2069  break;
2070 #endif
2071  }
2072 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
2073  codegen_emit_patchable_barrier(iptr, cd, pr, fi);
2074 #endif
2075  break;
2076  }
2077 #endif
2078 
2079  /* branch operations **********************************************/
2080 
2081  case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
2082 
2083  // We might leave this method, stop profiling.
2085 
2086  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2087  // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
2088  emit_imove(cd, s1, REG_ITMP1_XPTR);
2089 
2090 #ifdef ENABLE_VERIFIER
2091  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2092  unresolved_class *uc = iptr->sx.s23.s2.uc;
2094  }
2095 #endif /* ENABLE_VERIFIER */
2096 
2097  // Generate architecture specific instructions.
2098  codegen_emit_instruction(jd, iptr);
2099  ALIGNCODENOP;
2100  break;
2101 
2102  case ICMD_GOTO: /* ... ==> ... */
2103  case ICMD_RET: /* ... ==> ... */
2104 
2105 #if defined(ENABLE_SSA)
2106  // In case of a goto, phimoves have to be inserted
2107  // before the jump.
2108  if (ls != NULL) {
2109  last_cmd_was_goto = true;
2110  codegen_emit_phi_moves(jd, bptr);
2111  }
2112 #endif
2113  if (iptr->dst.block->type == basicblock::TYPE_EXH)
2115  emit_br(cd, iptr->dst.block);
2116  ALIGNCODENOP;
2117  break;
2118 
2119  case ICMD_JSR: /* ... ==> ... */
2120 
2121  assert(iptr->sx.s23.s3.jsrtarget.block->type != basicblock::TYPE_EXH);
2122  emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
2123  ALIGNCODENOP;
2124  break;
2125 
2126  case ICMD_IFNULL: /* ..., value ==> ... */
2127  case ICMD_IFNONNULL:
2128 
2129  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2130  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2131 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2132  emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
2133 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2134  M_TEST(s1);
2135  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
2136 #else
2137 # error Unable to generate code for this configuration!
2138 #endif
2139  break;
2140 
2141  case ICMD_IFEQ: /* ..., value ==> ... */
2142  case ICMD_IFNE:
2143  case ICMD_IFLT:
2144  case ICMD_IFLE:
2145  case ICMD_IFGT:
2146  case ICMD_IFGE:
2147 
2148  // XXX Sparc64: int compares must not branch on the
2149  // register directly. Reason is, that register content is
2150  // not 32-bit clean. Fix this!
2151 
2152  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2153 
2154 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2155  if (iptr->sx.val.i == 0) {
2156  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2157  emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
2158  } else {
2159  // Generate architecture specific instructions.
2160  codegen_emit_instruction(jd, iptr);
2161  }
2162 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2163  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2164  emit_icmp_imm(cd, s1, iptr->sx.val.i);
2165  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
2166 #else
2167 # error Unable to generate code for this configuration!
2168 #endif
2169  break;
2170 
2171  case ICMD_IF_LEQ: /* ..., value ==> ... */
2172  case ICMD_IF_LNE:
2173  case ICMD_IF_LLT:
2174  case ICMD_IF_LGE:
2175  case ICMD_IF_LGT:
2176  case ICMD_IF_LLE:
2177 
2178  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2179 
2180  // Generate architecture specific instructions.
2181  codegen_emit_instruction(jd, iptr);
2182  break;
2183 
2184  case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
2185  case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
2186 
2187  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2188 
2189  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2190  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
2191 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2192  switch (iptr->opc) {
2193  case ICMD_IF_ACMPEQ:
2194  emit_beq(cd, iptr->dst.block, s1, s2);
2195  break;
2196  case ICMD_IF_ACMPNE:
2197  emit_bne(cd, iptr->dst.block, s1, s2);
2198  break;
2199  default:
2200  break;
2201  }
2202 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2203  M_ACMP(s1, s2);
2204  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
2205 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2206  M_CMPEQ(s1, s2, REG_ITMP1);
2207  switch (iptr->opc) {
2208  case ICMD_IF_ACMPEQ:
2209  emit_bnez(cd, iptr->dst.block, REG_ITMP1);
2210  break;
2211  case ICMD_IF_ACMPNE:
2212  emit_beqz(cd, iptr->dst.block, REG_ITMP1);
2213  break;
2214  default:
2215  break;
2216  }
2217 #else
2218 # error Unable to generate code for this configuration!
2219 #endif
2220  break;
2221 
2222  case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
2223  case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
2224 
2225  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2226 
2227 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2228  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2229  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
2230  switch (iptr->opc) {
2231  case ICMD_IF_ICMPEQ:
2232  emit_beq(cd, iptr->dst.block, s1, s2);
2233  break;
2234  case ICMD_IF_ICMPNE:
2235  emit_bne(cd, iptr->dst.block, s1, s2);
2236  break;
2237  }
2238  break;
2239 #else
2240  /* fall-through */
2241 #endif
2242 
2243  case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
2244  case ICMD_IF_ICMPGT: /* op1 = target JavaVM pc */
2245  case ICMD_IF_ICMPLE:
2246  case ICMD_IF_ICMPGE:
2247 
2248  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2249 
2250  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2251  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
2252 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2253 # if defined(__I386__) || defined(__X86_64__)
2254  // XXX Fix this soon!!!
2255  M_ICMP(s2, s1);
2256 # else
2257  M_ICMP(s1, s2);
2258 # endif
2259  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
2260 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2261  // Generate architecture specific instructions.
2262  codegen_emit_instruction(jd, iptr);
2263 #else
2264 # error Unable to generate code for this configuration!
2265 #endif
2266  break;
2267 
2268  case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
2269  case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
2270  case ICMD_IF_LCMPLT:
2271  case ICMD_IF_LCMPGT:
2272  case ICMD_IF_LCMPLE:
2273  case ICMD_IF_LCMPGE:
2274 
2275  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
2276 
2277  // Generate architecture specific instructions.
2278  codegen_emit_instruction(jd, iptr);
2279  break;
2280 
2281  case ICMD_RETURN: /* ... ==> ... */
2282 
2283  goto nowperformreturn;
2284 
2285  case ICMD_ARETURN: /* ..., retvalue ==> ... */
2286 
2287  s1 = emit_load_s1(jd, iptr, REG_RESULT);
2288  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
2289  emit_imove(cd, s1, REG_RESULT);
2290 
2291 #ifdef ENABLE_VERIFIER
2292  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2294  unresolved_class *uc = iptr->sx.s23.s2.uc;
2297  }
2298 #endif /* ENABLE_VERIFIER */
2299  goto nowperformreturn;
2300 
2301  case ICMD_IRETURN: /* ..., retvalue ==> ... */
2302 #if defined(ENABLE_SOFTFLOAT)
2303  case ICMD_FRETURN:
2304 #endif
2305 
2306  s1 = emit_load_s1(jd, iptr, REG_RESULT);
2307  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
2308  emit_imove(cd, s1, REG_RESULT);
2309  goto nowperformreturn;
2310 
2311  case ICMD_LRETURN: /* ..., retvalue ==> ... */
2312 #if defined(ENABLE_SOFTFLOAT)
2313  case ICMD_DRETURN:
2314 #endif
2315 
2316  s1 = emit_load_s1(jd, iptr, REG_LRESULT);
2317  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
2318  emit_lmove(cd, s1, REG_LRESULT);
2319  goto nowperformreturn;
2320 
2321 #if !defined(ENABLE_SOFTFLOAT)
2322  case ICMD_FRETURN: /* ..., retvalue ==> ... */
2323 
2324  s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2325 #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2326  M_CAST_F2I(s1, REG_RESULT);
2327 #else
2328  emit_fmove(cd, s1, REG_FRESULT);
2329 #endif
2330  goto nowperformreturn;
2331 
2332  case ICMD_DRETURN: /* ..., retvalue ==> ... */
2333 
2334  s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2335 #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2336  M_CAST_D2L(s1, REG_LRESULT);
2337 #else
2338  emit_dmove(cd, s1, REG_FRESULT);
2339 #endif
2340  goto nowperformreturn;
2341 #endif
2342 
2343 nowperformreturn:
2344 #if !defined(NDEBUG)
2345  // Call trace function.
2348 #endif
2349 
2350  // Emit code to call monitorexit function.
2351  if (checksync && code_is_synchronized(code)) {
2352  emit_monitor_exit(jd, rd->memuse * 8);
2353  }
2354 
2355  // Generate method profiling code.
2357 
2358  // Emit code for the method epilog.
2359  codegen_emit_epilog(jd);
2360  ALIGNCODENOP;
2361  break;
2362 
2363  case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
2364 
2365  bte = iptr->sx.s23.s3.bte;
2366  md = bte->md;
2367 
2368 #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
2369  if (bte->fp == BUILTIN_escape_reason_new) {
2370  void set_escape_reasons(void *);
2371  M_ASUB_IMM(8, REG_SP);
2372  M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
2373  M_AST(EDX, REG_SP, 4);
2374  M_AST(REG_ITMP1, REG_SP, 0);
2375  M_MOV_IMM(set_escape_reasons, REG_ITMP1);
2376  M_CALL(REG_ITMP1);
2377  M_ALD(EDX, REG_SP, 4);
2378  M_AADD_IMM(8, REG_SP);
2379  }
2380 #endif
2381 
2382  // Emit the fast-path if available.
2383  if (bte->emit_fastpath != NULL) {
2384  void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
2385  emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
2386 
2387  assert(md->returntype.type == TYPE_VOID);
2388  d = REG_ITMP1;
2389 
2390  // Actually call the fast-path emitter.
2391  emit_fastpath(jd, iptr, d);
2392 
2393  // If fast-path succeeded, jump to the end of the builtin
2394  // invocation.
2395  // XXX Actually the slow-path block below should be moved
2396  // out of the instruction stream and the jump below should be
2397  // inverted.
2398 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2399  os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2400 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2401  M_TEST(d);
2402  emit_label_bne(cd, BRANCH_LABEL_10);
2403 #else
2404 # error Unable to generate code for this configuration!
2405 #endif
2406  }
2407 
2408  goto gen_method;
2409 
2410  case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
2411  case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2412  case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
2413  case ICMD_INVOKEINTERFACE:
2414 
2415 #if defined(ENABLE_REPLACEMENT)
2416  codegen_set_replacement_point(cd);
2417 #endif
2418 
2419  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2420  unresolved_method* um = iptr->sx.s23.s3.um;
2421  md = um->methodref->parseddesc.md;
2422  }
2423  else {
2424  methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2425  md = lm->parseddesc;
2426  }
2427 
2428 gen_method:
2429  i = md->paramcount;
2430 
2431  // XXX Check this again!
2432  MCODECHECK((i << 1) + 64); // PPC
2433 
2434  // Copy arguments to registers or stack location.
2435  for (i = i - 1; i >= 0; i--) {
2436  var = VAR(iptr->sx.s23.s2.args[i]);
2437  d = md->params[i].regoff;
2438 
2439  // Already pre-allocated?
2440  if (var->flags & PREALLOC)
2441  continue;
2442 
2443  if (!md->params[i].inmemory) {
2444  switch (var->type) {
2445  case TYPE_ADR:
2446  case TYPE_INT:
2447 #if defined(ENABLE_SOFTFLOAT)
2448  case TYPE_FLT:
2449 #endif
2450  s1 = emit_load(jd, iptr, var, d);
2451  emit_imove(cd, s1, d);
2452  break;
2453 
2454  case TYPE_LNG:
2455 #if defined(ENABLE_SOFTFLOAT)
2456  case TYPE_DBL:
2457 #endif
2458  s1 = emit_load(jd, iptr, var, d);
2459  emit_lmove(cd, s1, d);
2460  break;
2461 
2462 #if !defined(ENABLE_SOFTFLOAT)
2463  case TYPE_FLT:
2464 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2465  s1 = emit_load(jd, iptr, var, d);
2466  emit_fmove(cd, s1, d);
2467 #else
2468  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2469  M_CAST_F2I(s1, d);
2470 #endif
2471  break;
2472 
2473  case TYPE_DBL:
2474 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2475  s1 = emit_load(jd, iptr, var, d);
2476  emit_dmove(cd, s1, d);
2477 #else
2478  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2479  M_CAST_D2L(s1, d);
2480 #endif
2481  break;
2482 #endif
2483  default:
2484  assert(false);
2485  break;
2486  }
2487  }
2488  else {
2489  switch (var->type) {
2490  case TYPE_ADR:
2491  s1 = emit_load(jd, iptr, var, REG_ITMP1);
2492  // XXX Sparc64: Here this actually was:
2493  // M_STX(s1, REG_SP, JITSTACK + d);
2494  M_AST(s1, REG_SP, d);
2495  break;
2496 
2497  case TYPE_INT:
2498 #if defined(ENABLE_SOFTFLOAT)
2499  case TYPE_FLT:
2500 #endif
2501 #if SIZEOF_VOID_P == 4
2502  s1 = emit_load(jd, iptr, var, REG_ITMP1);
2503  M_IST(s1, REG_SP, d);
2504  break;
2505 #else
2506  /* fall-through */
2507 #endif
2508 
2509  case TYPE_LNG:
2510 #if defined(ENABLE_SOFTFLOAT)
2511  case TYPE_DBL:
2512 #endif
2513  s1 = emit_load(jd, iptr, var, REG_LTMP12);
2514  // XXX Sparc64: Here this actually was:
2515  // M_STX(s1, REG_SP, JITSTACK + d);
2516  M_LST(s1, REG_SP, d);
2517  break;
2518 
2519 #if !defined(ENABLE_SOFTFLOAT)
2520  case TYPE_FLT:
2521  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2522  M_FST(s1, REG_SP, d);
2523  break;
2524 
2525  case TYPE_DBL:
2526  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2527  // XXX Sparc64: Here this actually was:
2528  // M_DST(s1, REG_SP, JITSTACK + d);
2529  M_DST(s1, REG_SP, d);
2530  break;
2531 #endif
2532  default:
2533  assert(false);
2534  break;
2535  }
2536  }
2537  }
2538 
2539  // Generate method profiling code.
2541 
2542  // Generate architecture specific instructions.
2543  codegen_emit_instruction(jd, iptr);
2544 
2545  // Generate method profiling code.
2547 
2548 #if defined(ENABLE_REPLACEMENT)
2549  // Store size of call code in replacement point.
2550  if (iptr->opc != ICMD_BUILTIN) {
2551  // Store size of call code in replacement point.
2552  cd->replacementpoint[-1].callsize = (cd->mcodeptr - cd->mcodebase)
2553  - (ptrint) cd->replacementpoint[-1].pc;
2554  }
2555 #endif
2556 
2557  // Recompute the procedure vector (PV).
2558  emit_recompute_pv(cd);
2559 
2560  // Store return value.
2561 #if defined(ENABLE_SSA)
2562  if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2563  (ls->lifetime[iptr->dst.varindex].type != jitdata::UNUSED))
2564  /* a "living" stackslot */
2565 #endif
2566  switch (md->returntype.type) {
2567  case TYPE_INT:
2568  case TYPE_ADR:
2569 #if defined(ENABLE_SOFTFLOAT)
2570  case TYPE_FLT:
2571 #endif
2572  s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2573  // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2574  emit_imove(cd, REG_RESULT, s1);
2575  emit_store_dst(jd, iptr, s1);
2576  break;
2577 
2578  case TYPE_LNG:
2579 #if defined(ENABLE_SOFTFLOAT)
2580  case TYPE_DBL:
2581 #endif
2582  s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2583  // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2584  emit_lmove(cd, REG_LRESULT, s1);
2585  emit_store_dst(jd, iptr, s1);
2586  break;
2587 
2588 #if !defined(ENABLE_SOFTFLOAT)
2589  case TYPE_FLT:
2590 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2591  s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2592  emit_fmove(cd, REG_FRESULT, s1);
2593 #else
2594  s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2595  M_CAST_I2F(REG_RESULT, s1);
2596 #endif
2597  emit_store_dst(jd, iptr, s1);
2598  break;
2599 
2600  case TYPE_DBL:
2601 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2602  s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2603  emit_dmove(cd, REG_FRESULT, s1);
2604 #else
2605  s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2606  M_CAST_L2D(REG_LRESULT, s1);
2607 #endif
2608  emit_store_dst(jd, iptr, s1);
2609  break;
2610 #endif
2611 
2612  case TYPE_VOID:
2613  break;
2614  default:
2615  assert(false);
2616  break;
2617  }
2618 
2619  // If we are emitting a fast-path block, this is the label for
2620  // successful fast-path execution.
2621  if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2623  }
2624 
2625  break;
2626 
2627  case ICMD_TABLESWITCH: /* ..., index ==> ... */
2628 
2629  // Generate architecture specific instructions.
2630  codegen_emit_instruction(jd, iptr);
2631  break;
2632 
2633  case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
2634 
2635  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2636  i = iptr->sx.s23.s2.lookupcount;
2637 
2638  // XXX Again we need to check this
2639  MCODECHECK((i<<2)+8); // Alpha, ARM, i386, MIPS, Sparc64
2640  MCODECHECK((i<<3)+8); // PPC64
2641  MCODECHECK(8 + ((7 + 6) * i) + 5); // X86_64, S390
2642 
2643  // Compare keys.
2644  for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2645 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2646  emit_icmp_imm(cd, s1, lookup->value);
2647  emit_beq(cd, lookup->target.block);
2648 #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2649  ICONST(REG_ITMP2, lookup->value);
2650  emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2651 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2652  emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2653  emit_bnez(cd, lookup->target.block, REG_ITMP2);
2654 #else
2655 # error Unable to generate code for this configuration!
2656 #endif
2657  }
2658 
2659  // Default branch.
2660  emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2661  ALIGNCODENOP;
2662  break;
2663 
2664  case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
2665  case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
2666  case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
2667 
2668  // Generate architecture specific instructions.
2669  codegen_emit_instruction(jd, iptr);
2670  break;
2671 
2672  default:
2673  exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2674  iptr->opc);
2675  return false;
2676 
2677  } // the big switch
2678 
2679 #if defined(ENABLE_REPLACEMENT)
2681  codegen_set_replacement_point(cd);
2682  }
2683 #endif
2684 
2685  } // for all instructions
2686 
2687 #if defined(ENABLE_SSA)
2688  // By edge splitting, in blocks with phi moves there can only
2689  // be a goto as last command, no other jump/branch command.
2690  if (ls != NULL) {
2691  if (!last_cmd_was_goto)
2692  codegen_emit_phi_moves(jd, bptr);
2693  }
2694 #endif
2695 
2696 #if defined(__I386__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2697  // XXX Again!!!
2698  /* XXX require a lower number? */
2699  MCODECHECK(64); // I386, MIPS, Sparc64
2700  MCODECHECK(512); // S390, X86_64
2701 
2702  /* XXX We can remove that when we don't use UD2 anymore on i386
2703  and x86_64. */
2704 
2705  /* At the end of a basic block we may have to append some nops,
2706  because the patcher stub calling code might be longer than the
2707  actual instruction. So codepatching does not change the
2708  following block unintentionally. */
2709 
2710  if (cd->mcodeptr < cd->lastmcodeptr) {
2711  while (cd->mcodeptr < cd->lastmcodeptr) {
2712  M_NOP;
2713  }
2714  }
2715 #endif
2716 
2717  if (bptr->next && bptr->next->type == basicblock::TYPE_EXH)
2718  fixup_exc_handler_interface(jd, bptr->next);
2719 
2720  } // for all basic blocks
2721 
2722  // Generate traps.
2723  emit_patcher_traps(jd);
2724 
2725  // Everything's ok.
2726  return true;
2727 }
2728 
2729 
2730 /* codegen_emit_phi_moves ****************************************************
2731 
2732  Emits phi moves at the end of the basicblock.
2733 
2734 *******************************************************************************/
2735 
2736 #if defined(ENABLE_SSA)
2737 void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2738 {
2739  int lt_d,lt_s,i;
2740  lsradata *ls;
2741  codegendata *cd;
2742  varinfo *s, *d;
2743  instruction tmp_i;
2744 
2745  cd = jd->cd;
2746  ls = jd->ls;
2747 
2748  MCODECHECK(512);
2749 
2750  /* Moves from phi functions with highest indices have to be */
2751  /* inserted first, since this is the order as is used for */
2752  /* conflict resolution */
2753 
2754  for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2755  lt_d = ls->phi_moves[bptr->nr][i][0];
2756  lt_s = ls->phi_moves[bptr->nr][i][1];
2757 #if defined(SSA_DEBUG_VERBOSE)
2758  if (compileverbose)
2759  printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2760 #endif
2761  if (lt_s == jitdata::UNUSED) {
2762 #if defined(SSA_DEBUG_VERBOSE)
2763  if (compileverbose)
2764  printf(" ... not processed \n");
2765 #endif
2766  continue;
2767  }
2768 
2769  d = VAR(ls->lifetime[lt_d].v_index);
2770  s = VAR(ls->lifetime[lt_s].v_index);
2771 
2772 
2773  if (d->type == Type(-1)) {
2774 #if defined(SSA_DEBUG_VERBOSE)
2775  if (compileverbose)
2776  printf("...returning - phi lifetimes where joined\n");
2777 #endif
2778  continue;
2779  }
2780 
2781  if (s->type == Type(-1)) {
2782 #if defined(SSA_DEBUG_VERBOSE)
2783  if (compileverbose)
2784  printf("...returning - phi lifetimes where joined\n");
2785 #endif
2786  continue;
2787  }
2788 
2789  tmp_i.opc = ICMD_NOP;
2790  tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2791  tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2792  emit_copy(jd, &tmp_i);
2793 
2794 #if defined(SSA_DEBUG_VERBOSE)
2795  if (compileverbose) {
2796  if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2797  /* mem -> mem */
2798  printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2799  }
2800  else if (IS_INMEMORY(s->flags)) {
2801  /* mem -> reg */
2802  printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2803  }
2804  else if (IS_INMEMORY(d->flags)) {
2805  /* reg -> mem */
2806  printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2807  }
2808  else {
2809  /* reg -> reg */
2810  printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2811  }
2812  printf("\n");
2813  }
2814 #endif /* defined(SSA_DEBUG_VERBOSE) */
2815  }
2816 }
2817 #endif /* defined(ENABLE_SSA) */
2818 
2819 
2820 /*
2821  * These are local overrides for various environment variables in Emacs.
2822  * Please do not remove this and leave it at the end of the file, where
2823  * Emacs will automagically detect them.
2824  * ---------------------------------------------------------------------
2825  * Local variables:
2826  * mode: c++
2827  * indent-tabs-mode: t
2828  * c-basic-offset: 4
2829  * tab-width: 4
2830  * End:
2831  * vim:noexpandtab:sw=4:ts=4:
2832  */
void codegen_emit_instruction(jitdata *jd, instruction *iptr)
Generates machine code for one ICMD.
Definition: codegen.cpp:218
#define REG_SP
Definition: md-abi.hpp:55
val_operand_t val
#define CODEGENDATA_FLAG_ERROR
Utf8String name
Definition: method.hpp:71
#define pv
Definition: md-asm.hpp:65
basicblock * block
static bool instruction_has_side_effects(const instruction *iptr)
union varinfo::@19 vv
basicblock * target
s4 emit_load_s1(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:63
static void codegen_reset(jitdata *jd)
#define M_ALD(a, b, disp)
Definition: codegen.hpp:351
void localref_native_exit(methodinfo *m, uint64_t *return_regs)
#define STATISTICS(x)
Wrapper for statistics only code.
Definition: statistics.hpp:975
DumpList< Linenumber > * linenumbers
List of line numbers.
#define BRANCH_OPT_NONE
basicblock * basicblocks
Definition: jit.hpp:141
#define STACKFRAME_SYNC_NEEDS_TWO_SLOTS
Definition: arch.hpp:114
void codegen_close(void)
Definition: jit.hpp:126
void emit_monitor_exit(jitdata *jd, int32_t syncslot_offset)
Generates synchronization code to leave a monitor.
Definition: emit.cpp:589
#define M_LST(a, b, disp)
Definition: codegen.hpp:355
#define ra
Definition: md-asm.hpp:62
Definition: stack.hpp:46
paramdesc * params
Definition: descriptor.hpp:164
#define STAT_REGISTER_SUM_SUBGROUP(var, name, description, group)
Register a statistics summary group.
Definition: statistics.hpp:973
void linenumbertable_list_entry_add(codegendata *cd, int32_t linenumber)
#define JITDATA_HAS_FLAG_VERBOSECALL(jd)
Definition: jit.hpp:234
methoddesc * md
Definition: builtin.hpp:71
int *** phi_moves
Definition: lsra.hpp:197
#define M_ILD(a, b, disp)
Definition: codegen.hpp:353
methodinfo * code_get_methodinfo_for_pv(void *pv)
Definition: code.cpp:150
s4 dseg_add_unique_address(codegendata *cd, void *value)
Definition: dseg.cpp:525
s4 localcount
Definition: jit.hpp:152
#define M_IST(a, b, disp)
Definition: codegen.hpp:357
Linenumber table of a Java method.
void dynamic_super_rewrite(codegendata *cd)
#define ICONST(d, c)
Definition: codegen.hpp:53
#define DMREALLOC(ptr, type, num1, num2)
Definition: dumpmemory.hpp:372
uint8_t savedfltcount
Definition: code.hpp:90
#define PATCHER_CALL_SIZE
Definition: codegen.hpp:68
void emit_monitor_enter(jitdata *jd, int32_t syncslot_offset)
Generates synchronization code to enter a monitor.
Definition: emit.cpp:531
s4 maxlocals
Definition: jit.hpp:162
s4 * invars
Definition: jit.hpp:330
void log_message_method(const char *msg, methodinfo *m)
Definition: logging.cpp:275
basicblock * next
Definition: jit.hpp:344
void emit_trap_countdown(codegendata *cd, s4 *counter)
Definition: emit.cpp:482
int32_t stackframesize
Definition: code.hpp:87
jumpref * next
#define M_AADD_IMM(a, b, c)
Definition: codegen.hpp:277
#define M_CAST_L2D(a, Fb)
Definition: codegen.hpp:763
static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
Fix up register locations in the case where control is transferred to an exception handler block via ...
s4 dseg_add_address(codegendata *cd, void *value)
Definition: dseg.cpp:542
codeinfo * code
Definition: jit.hpp:128
int opt_TraceJavaCalls
Definition: options.cpp:215
#define REG_FRESULT
Definition: md-abi.hpp:61
s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
#define ALIGN_EVEN(a)
Definition: global.hpp:69
int32_t argcount
Definition: instruction.hpp:64
void emit_bcc(codegendata *cd, basicblock *target, s4 condition, u4 options)
void localref_table_remove()
Definition: localref.cpp:168
int v_index
Definition: lsra.hpp:97
bool opt_intrp
Definition: options.cpp:55
#define M_CMPEQ(a, b, c)
Definition: codegen.hpp:280
s4 mpc
Definition: jit.hpp:352
codegendata * cd
Definition: jit.hpp:129
int32_t varindex
Definition: instruction.hpp:63
#define JITDATA_HAS_FLAG_INSTRUMENT(jd)
Definition: jit.hpp:210
#define PATCHER_resolve_class
#define BRANCH_LABEL_10
Definition: emit-common.hpp:56
#define M_ACMP(a, b)
Definition: codegen.hpp:366
static void emit_lmove(codegendata *cd, int s, int d)
Generates a long-move from register s to d.
#define M_CAST_D2L(Fa, b)
Definition: codegen.hpp:780
typedef void(JNICALL *jvmtiEventSingleStep)(jvmtiEnv *jvmti_env
#define REG_ITMP1_XPTR
Definition: md-abi.hpp:50
struct lifetime * lifetime
Definition: lsra.hpp:168
methoddesc * md
Definition: references.hpp:75
static void usage()
Definition: VMjdwp.cpp:377
int savintreguse
Definition: reg.hpp:88
#define JITDATA_HAS_FLAG_DEOPTIMIZE(jd)
Definition: jit.hpp:225
#define M_FST(a, b, disp)
Definition: codegen.hpp:363
void localref_native_enter(methodinfo *m, uint64_t *argument_regs, uint64_t *argument_stack)
Definition: localref.cpp:439
patchref_t * patcher_add_patch_ref(jitdata *jd, functionptr patcher, void *ref, s4 disp)
void patcher_resolve(codeinfo *code)
Resolve all patchers in the current JIT run.
void codegen_init(void)
#define M_FLD(a, b, disp)
Definition: codegen.hpp:360
uint8_t u1
Definition: types.hpp:40
void trace_java_call_exit(methodinfo *m, uint64_t *return_regs)
Definition: trace.cpp:240
void codegen_increase(codegendata *cd)
Type type
Definition: reg.hpp:44
#define M_TEST(a)
Definition: codegen.hpp:365
#define PATCHER_breakpoint
s4 mcodelength
Definition: code.hpp:84
java_object_t * codegen_finish_native_call(u1 *sp, u1 *pv)
branchref * branchrefs
Definition: jit.hpp:342
#define CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)
#define JITDATA_HAS_FLAG_COUNTDOWN(jd)
Definition: jit.hpp:222
lookup_target_t * lookup
#define CODEGENDATA_FLAG_LONGBRANCHES
JNIEnv jthread jobject jclass jlong size
Definition: jvmti.h:387
#define VAR(i)
Definition: jit.hpp:259
Definition: reg.hpp:43
void emit_recompute_pv(codegendata *cd)
Emit code to recompute the procedure vector.
Definition: emit.cpp:519
#define PATCHER_get_putstatic
static int code_is_leafmethod(codeinfo *code)
Definition: code.hpp:150
void md_cacheflush(u1 *addr, s4 nbytes)
Definition: md.c:49
#define ALIGNCODENOP
Definition: codegen.hpp:47
s4 regoff
Definition: reg.hpp:47
void(* functionptr)(void)
Definition: global.hpp:39
methodinfo * m
Definition: code.hpp:74
void emit_verbosecall_enter(jitdata *jd)
Definition: emit.cpp:649
#define INT_SAV_CNT
Definition: md-abi.hpp:75
java_handle_t * codegen_start_native_call(u1 *sp, u1 *pv)
int * num_phi_moves
Definition: lsra.hpp:196
#define MCOPY(dest, src, type, num)
Definition: memory.hpp:103
int opt_TraceBuiltinCalls
Definition: options.cpp:208
#define CODEGENDATA_HAS_FLAG_ERROR(cd)
#define STACKFRAME_RA_BETWEEN_FRAMES
Definition: arch.hpp:101
#define LLNI_classinfo_wrap(classinfo)
Definition: llni.hpp:110
#define M_MOV_IMM(d, i)
Definition: codegen.hpp:448
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
u1 * mcode
Definition: code.hpp:82
void trace_java_call_enter(methodinfo *m, uint64_t *arg_regs, uint64_t *stack)
Definition: trace.cpp:149
void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
#define MAX_ALIGN
Definition: global.hpp:102
#define PROFILE_CYCLE_STOP
Definition: profile.hpp:58
#define LA_SIZE
Definition: md-abi.hpp:93
void emit_patcher_traps(jitdata *jd)
JNIEnv jthread jmethodID method
Definition: jvmti.h:207
dst_operand_t dst
#define REG_LRESULT
flags_operand_t flags
#define PA_SIZE
Definition: md-abi.hpp:147
bool compileverbose
Definition: options.cpp:82
uint16_t u2
Definition: types.hpp:43
constant_FMIref * fieldref
Definition: resolve.hpp:88
void localref_frame_pop_all(void)
Definition: localref.cpp:254
classinfo * clazz
Definition: method.hpp:80
#define OFFSET(s, el)
Definition: memory.hpp:90
This file contains the statistics framework.
jumpref * jumpreferences
uint64_t u8
Definition: types.hpp:49
s4 emit_load_s2(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:82
bool checksync
Definition: options.cpp:90
static codeinfo * code_get_codeinfo_for_pv(void *pv)
Definition: code.hpp:222
#define STAT_REGISTER_GROUP_VAR(type, var, init, name, description, group)
Register an statistics variable and add it to a group.
Definition: statistics.hpp:967
imm_union * value
Definition: field.hpp:67
static void * reallocate(void *src, size_t len1, size_t len2)
Stupid realloc implementation for dump memory.
Definition: dumpmemory.cpp:57
Type
Types used internally by JITTED code.
Definition: global.hpp:117
void exceptions_throw_internalerror(const char *message,...)
Definition: exceptions.cpp:805
#define RETADDR_FROM_JAVALOCAL(jl)
Definition: jit.hpp:424
void linenumbertable_list_entry_add_inline_start(codegendata *cd, instruction *iptr)
#define MEMORY_ALIGN(pos, size)
Definition: memory.hpp:37
s4 indepth
Definition: jit.hpp:332
void dseg_finish(jitdata *jd)
Definition: dseg.cpp:46
void patcher_list_reset(codeinfo *code)
#define M_ASUB_IMM(a, b, c)
Definition: codegen.hpp:278
typedesc * fd
Definition: references.hpp:74
#define exceptions_get_and_clear_exception
Definition: md-asm.hpp:98
#define REG_FTMP2
Definition: md-abi.hpp:68
#define STAT_REGISTER_GROUP(var, name, description)
Register a statistics group.
Definition: statistics.hpp:971
MIIterator i
s4 emit_load(jitdata *jd, instruction *iptr, varinfo *src, s4 tempreg)
Definition: emit.cpp:66
typedesc returntype
Definition: descriptor.hpp:166
basicblock ** basicblocks
Definition: lsra.hpp:189
int32_t s4
Definition: types.hpp:45
void stacktrace_stackframeinfo_add(stackframeinfo_t *sfi, void *pv, void *sp, void *ra, void *xpc)
Definition: stacktrace.cpp:84
s4 dseg_add_unique_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:229
void emit_store(jitdata *jd, instruction *iptr, varinfo *dst, s4 d)
Definition: emit.cpp:113
DumpList< branch_label_ref_t * > * brancheslabel
classinfo * clazz
Definition: field.hpp:55
registerdata * rd
Definition: jit.hpp:130
void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
#define M_AST(a, b, disp)
Definition: codegen.hpp:356
union instruction::@12 sx
struct localref_table localref_table
Definition: localref.hpp:31
static void abort()
Definition: os.hpp:196
#define MCODEINITSIZE
int type
Definition: lsra.hpp:98
int savfltreguse
Definition: reg.hpp:91
void stack_javalocals_store(instruction *iptr, s4 *javalocals)
Definition: stack.cpp:4724
int32_t varindex
#define M_CAST_I2F(a, Fb)
Definition: codegen.hpp:755
#define LLNI_UNWRAP(hdl)
Definition: llni.hpp:52
#define REG_LTMP23
bool inmemory
Definition: descriptor.hpp:151
#define INSTRUCTION_GET_METHODDESC(iptr, md)
#define REG_ITMP2
Definition: md-abi.hpp:47
#define MNEW(type, num)
Definition: memory.hpp:96
void emit_icmp_imm(codegendata *cd, int reg, int32_t value)
Emits code comparing a single register.
Definition: emit.cpp:243
MIIterator e
void emit_store_dst(jitdata *jd, instruction *iptr, s4 d)
functionptr emit_fastpath
Definition: builtin.hpp:72
void emit_copy(jitdata *jd, instruction *iptr)
Definition: emit.cpp:153
s1_operand_t s1
#define LOG(STMT)
Analogous to DEBUG.
Definition: logging.hpp:91
uint32_t u4
Definition: types.hpp:46
static void generate()
Definition: stubs.cpp:262
#define EDX
Definition: arch.hpp:53
void codegen_setup(jitdata *jd)
#define FLT_SAV_CNT
Definition: md-abi.hpp:82
void exceptiontable_create(jitdata *jd)
#define M_ICMP(a, b)
Definition: codegen.hpp:367
constant_FMIref * methodref
Definition: resolve.hpp:97
methoddesc * parseddesc
Definition: method.hpp:78
#define sp
Definition: md-asm.hpp:81
void emit_icmpeq_imm(codegendata *cd, int reg, int32_t value, int d)
Emits code comparing one integer register to an immediate value.
Definition: emit.cpp:253
int memuse
Definition: reg.hpp:84
#define REG_FTMP1
Definition: md-abi.hpp:67
#define VAROP(v)
Definition: jit.hpp:258
#define ALIGN_ODD(a)
Definition: global.hpp:70
Definition: builtin.hpp:60
int32_t synchronizedoffset
Definition: code.hpp:88
methodinfo * m
Definition: jit.hpp:127
void stacktrace_stackframeinfo_remove(stackframeinfo_t *sfi)
Definition: stacktrace.cpp:204
static bool IS_INMEMORY(s4 flags)
Definition: stack.hpp:51
s4 type
Definition: field.hpp:60
void codegen_emit_epilog(jitdata *jd)
Generates machine code for the method epilog.
Definition: codegen.cpp:174
LinenumberTable * linenumbertable
Definition: code.hpp:93
s4 flags
Definition: reg.hpp:45
#define STAT_DECLARE_GROUP(var)
Declare an external group (or subgroup).
Definition: statistics.hpp:970
void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
#define M_ALD_DSEG(a, disp)
Definition: codegen.hpp:352
static void * allocate(size_t size)
Definition: dumpmemory.hpp:251
s4 nr
Definition: jit.hpp:319
int8_t s1
Definition: types.hpp:39
#define LCONST(d, c)
Definition: codegen.hpp:54
void methodtree_insert(void *startpc, void *endpc)
Definition: methodtree.cpp:148
static bool class_is_or_almost_initialized(classinfo *c)
Definition: class.hpp:433
basicblock * retaddr
Definition: reg.hpp:52
static int code_is_synchronized(codeinfo *code)
Definition: code.hpp:172
int16_t s2
Definition: types.hpp:42
#define PATCHER_initialize_class
#define M_DST(a, b, disp)
Definition: codegen.hpp:362
#define INSTRUCTION_IS_UNRESOLVED(iptr)
struct instruction::@12::@13 s23
#define M_CAST_F2I(Fa, b)
Definition: codegen.hpp:772
void codegen_emit_prolog(jitdata *jd)
Generates machine code for the method prolog.
Definition: codegen.cpp:73
#define M_LLD(a, b, disp)
Definition: codegen.hpp:350
const parseddesc_t parseddesc
Definition: references.hpp:105
static void emit_fmove(codegendata *cd, int s, int d)
Generates a float-move from register s to d.
BeginInst * target
#define MCODECHECK(icnt)
Definition: codegen.hpp:40
branchref * next
functionptr fp
Definition: builtin.hpp:63
bool codegen_generate(jitdata *jd)
#define REG_LTMP12
void emit_label(codegendata *cd, s4 label)
void linenumbertable_list_entry_add_inline_end(codegendata *cd, instruction *iptr)
s4 flags
Definition: method.hpp:70
void emit_bccz(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
uintptr_t ptrint
Definition: types.hpp:54
#define M_NOP
Definition: codegen.hpp:344
static void emit_dmove(codegendata *cd, int s, int d)
Generates an double-move from register s to d.
static void emit_imove(codegendata *cd, int s, int d)
Generates an integer-move from register s to d.
void localref_table_add(localref_table *lrt)
Definition: localref.cpp:142
#define M_DLD(a, b, disp)
Definition: codegen.hpp:359
#define M_CALL(a)
Definition: codegen.hpp:290
Nl nl
Definition: OStream.cpp:56
#define STAT_REGISTER_VAR(type, var, init, name, description)
Register an external statistics variable.
Definition: statistics.hpp:966
void emit_verbosecall_exit(jitdata *jd)
Definition: emit.cpp:790
uint32_t regoff
Definition: descriptor.hpp:153
void emit_br(codegendata *cd, basicblock *target)
dsegentry * dseg
Type type
Definition: jit.hpp:322
const char const void jint length
Definition: jvmti.h:352
bool opt_AlwaysEmitLongBranches
Definition: options.cpp:163
#define PATCHER_NOPS
Definition: codegen.hpp:70
#define printf(...)
Definition: ssa2.cpp:40
void codegen_emit_patchable_barrier(instruction *iptr, codegendata *cd, patchref_t *pr, fieldinfo *fi)
Generates a memory barrier to be used after volatile writes.
Definition: codegen.cpp:197
uint8_t savedintcount
Definition: code.hpp:89
#define REG_RESULT
Definition: md-abi.hpp:33
#define LA_SIZE_IN_POINTERS
Definition: md-abi.hpp:95
bool codegen_emit(jitdata *jd)
Generates machine code.
void emit_nullpointer_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:431
static int32_t md_stacktrace_get_framesize(codeinfo *code)
Returns the size (in bytes) of the current stackframe, specified by the passed codeinfo structure...
Definition: md.hpp:57
#define REG_ITMP1
Definition: md-abi.hpp:46
u1 * entrypoint
Definition: code.hpp:83
#define PROFILE_CYCLE_START
Definition: profile.hpp:57
methodinfo * method
void codegen_finish(jitdata *jd)
#define INS_FLAG_ID_SHIFT
#define NCODEINITSIZE
#define CNEW(type, num)
Definition: codememory.hpp:34