CACAO
codegen-common.cpp
Go to the documentation of this file.
1 /* src/vm/jit/codegen-common.cpp - architecture independent code generator stuff
2 
3  Copyright (C) 1996-2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5  Copyright (C) 2009 Theobroma Systems Ltd.
6 
7  This file is part of CACAO.
8 
9  This program is free software; you can redistribute it and/or
10  modify it under the terms of the GNU General Public License as
11  published by the Free Software Foundation; either version 2, or (at
12  your option) any later version.
13 
14  This program is distributed in the hope that it will be useful, but
15  WITHOUT ANY WARRANTY; without even the implied warranty of
16  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  General Public License for more details.
18 
19  You should have received a copy of the GNU General Public License
20  along with this program; if not, write to the Free Software
21  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22  02110-1301, USA.
23 
24  All functions assume the following code area / data area layout:
25 
26  +-----------+
27  | |
28  | code area | code area grows to higher addresses
29  | |
30  +-----------+ <-- start of procedure
31  | |
32  | data area | data area grows to lower addresses
33  | |
34  +-----------+
35 
36  The functions first write into a temporary code/data area allocated by
37  "codegen_init". "codegen_finish" copies the code and data area into permanent
38  memory. All functions writing values into the data area return the offset
39  relative the begin of the code area (start of procedure).
40 
41 */
42 
43 
44 #include "config.h"
45 
46 #include <cassert>
47 #include <cstring>
48 
49 #include "vm/types.hpp"
50 
51 #include "codegen.hpp"
52 #include "md.hpp"
53 #include "md-abi.hpp"
54 
55 #include "mm/codememory.hpp"
56 #include "mm/memory.hpp"
57 
58 #include "toolbox/avl.hpp"
59 #include "toolbox/list.hpp"
60 #include "toolbox/logging.hpp"
61 
62 #include "native/llni.hpp"
63 #include "native/localref.hpp"
64 #include "native/native.hpp"
65 
66 #include "vm/descriptor.hpp"
67 #include "vm/exceptions.hpp"
68 #include "vm/field.hpp"
69 #include "vm/options.hpp"
70 #include "vm/statistics.hpp"
71 
72 #include "vm/jit/abi.hpp"
73 #include "vm/jit/code.hpp"
75 
76 #include "vm/jit/builtin.hpp"
77 #include "vm/jit/dseg.hpp"
78 #include "vm/jit/disass.hpp"
80 #include "vm/jit/emit-common.hpp"
81 #include "vm/jit/jit.hpp"
83 #include "vm/jit/methodheader.hpp"
84 #include "vm/jit/methodtree.hpp"
86 #include "vm/jit/replace.hpp"
87 #include "vm/jit/show.hpp"
88 #include "vm/jit/stacktrace.hpp"
89 #include "vm/jit/stubs.hpp"
90 #include "vm/jit/trace.hpp"
91 
93 
94 #if defined(ENABLE_SSA)
96 # include "vm/jit/optimizing/ssa.hpp"
97 #elif defined(ENABLE_LSRA)
98 # include "vm/jit/allocator/lsra.hpp"
99 #endif
100 
101 #if defined(ENABLE_INTRP)
102 #include "vm/jit/intrp/intrp.h"
103 #endif
104 
105 
106 STAT_REGISTER_VAR(int,count_branches_unresolved,0,"unresolved branches","unresolved branches")
107 STAT_DECLARE_GROUP(function_call_stat)
108 STAT_REGISTER_GROUP_VAR(u8,count_calls_java_to_native,0,"calls java to native","java-to-native calls",function_call_stat)
109 
110 STAT_REGISTER_GROUP(memory_stat,"mem. stat.","Memory usage")
111 STAT_REGISTER_SUM_SUBGROUP(code_data_stat,"code data","Code and data usage",memory_stat)
112 STAT_REGISTER_GROUP_VAR(int,count_code_len,0,"code len","code length",code_data_stat)
113 STAT_REGISTER_GROUP_VAR(int,count_data_len,0,"data len","data length",code_data_stat)
114 
115 struct methodinfo;
116 
117 using namespace cacao;
118 
119 
120 /* codegen_init ****************************************************************
121 
122  TODO
123 
124 *******************************************************************************/
125 
126 void codegen_init(void)
127 {
129 }
130 
131 
132 /* codegen_setup ***************************************************************
133 
134  Allocates and initialises code area, data area and references.
135 
136 *******************************************************************************/
137 
139 {
140  //methodinfo *m;
141  codegendata *cd;
142 
143  /* get required compiler data */
144 
145  //m = jd->m;
146  cd = jd->cd;
147 
148  /* initialize members */
149 
150  // Set flags as requested.
153  }
154  else {
155  cd->flags = 0;
156  }
157 
159  cd->mcodeend = cd->mcodebase + MCODEINITSIZE;
160  cd->mcodesize = MCODEINITSIZE;
161 
162  /* initialize mcode variables */
163 
164  cd->mcodeptr = cd->mcodebase;
165  cd->lastmcodeptr = cd->mcodebase;
166 
167 #if defined(ENABLE_INTRP)
168  /* native dynamic superinstructions variables */
169 
170  if (opt_intrp) {
171  cd->ncodebase = (u1*) DumpMemory::allocate(NCODEINITSIZE);
172  cd->ncodesize = NCODEINITSIZE;
173 
174  /* initialize ncode variables */
175 
176  cd->ncodeptr = cd->ncodebase;
177 
178  cd->lastinstwithoutdispatch = ~0; /* no inst without dispatch */
179  cd->superstarts = NULL;
180  }
181 #endif
182 
183  cd->dseg = NULL;
184  cd->dseglen = 0;
185 
186  cd->jumpreferences = NULL;
187 
188 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
189  cd->datareferences = NULL;
190 #endif
191 
193  cd->linenumbers = new DumpList<Linenumber>();
194 }
195 
196 
197 /* codegen_reset ***************************************************************
198 
199  Resets the codegen data structure so we can recompile the method.
200 
201 *******************************************************************************/
202 
203 static void codegen_reset(jitdata *jd)
204 {
205  codeinfo *code;
206  codegendata *cd;
207  basicblock *bptr;
208 
209  /* get required compiler data */
210 
211  code = jd->code;
212  cd = jd->cd;
213 
214  /* reset error flag */
215 
217 
218  /* reset some members, we reuse the code memory already allocated
219  as this should have almost the correct size */
220 
221  cd->mcodeptr = cd->mcodebase;
222  cd->lastmcodeptr = cd->mcodebase;
223 
224  cd->dseg = NULL;
225  cd->dseglen = 0;
226 
227  cd->jumpreferences = NULL;
228 
229 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
230  cd->datareferences = NULL;
231 #endif
232 
234  cd->linenumbers = new DumpList<Linenumber>();
235 
236  /* We need to clear the mpc and the branch references from all
237  basic blocks as they will definitely change. */
238 
239  for (bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
240  bptr->mpc = -1;
241  bptr->branchrefs = NULL;
242  }
243 
244  /* We need to clear all the patcher references from the codeinfo
245  since they all will be regenerated */
246 
247  patcher_list_reset(code);
248 
249 #if defined(ENABLE_REPLACEMENT)
250  code->rplpoints = NULL;
251  code->rplpointcount = 0;
252  code->regalloc = NULL;
253  code->regalloccount = 0;
254  code->globalcount = 0;
255 #endif
256 }
257 
258 
259 /* codegen_generate ************************************************************
260 
261  Generates the code for the currently compiled method.
262 
263 *******************************************************************************/
264 
266 {
267  codegendata *cd;
268 
269  /* get required compiler data */
270 
271  cd = jd->cd;
272 
273  /* call the machine-dependent code generation function */
274 
275  if (!codegen_emit(jd))
276  return false;
277 
278  /* check for an error */
279 
280  if (CODEGENDATA_HAS_FLAG_ERROR(cd)) {
281  /* check for long-branches flag, if it is set we recompile the
282  method */
283 
284 #if !defined(NDEBUG)
285  if (compileverbose)
286  log_message_method("Re-generating code: ", jd->m);
287 #endif
288 
289  /* XXX maybe we should tag long-branches-methods for recompilation */
290 
292  /* we have to reset the codegendata structure first */
293 
294  codegen_reset(jd);
295 
296  /* and restart the compiler run */
297 
298  if (!codegen_emit(jd))
299  return false;
300  }
301  else {
302  os::abort("codegen_generate: unknown error occurred during codegen_emit: flags=%x\n", cd->flags);
303  }
304 
305 #if !defined(NDEBUG)
306  if (compileverbose)
307  log_message_method("Re-generating code done: ", jd->m);
308 #endif
309  }
310 
311  /* reallocate the memory and finish the code generation */
312 
313  codegen_finish(jd);
314 
315  /* everything's ok */
316 
317  return true;
318 }
319 
320 
321 /* codegen_close ***************************************************************
322 
323  TODO
324 
325 *******************************************************************************/
326 
327 void codegen_close(void)
328 {
329  /* TODO: release avl tree on i386 and x86_64 */
330 }
331 
332 
333 /* codegen_increase ************************************************************
334 
335  Doubles code area.
336 
337 *******************************************************************************/
338 
340 {
341  u1 *oldmcodebase;
342 
343  /* save old mcodebase pointer */
344 
345  oldmcodebase = cd->mcodebase;
346 
347  /* reallocate to new, doubled memory */
348 
350  cd->mcodesize,
351  cd->mcodesize * 2);
352  cd->mcodesize *= 2;
353  cd->mcodeend = cd->mcodebase + cd->mcodesize;
354 
355  /* set new mcodeptr */
356 
357  cd->mcodeptr = cd->mcodebase + (cd->mcodeptr - oldmcodebase);
358 
359 #if defined(__I386__) || defined(__MIPS__) || defined(__X86_64__) || defined(ENABLE_INTRP) \
360  || defined(__SPARC_64__)
361  /* adjust the pointer to the last patcher position */
362 
363  if (cd->lastmcodeptr != NULL)
364  cd->lastmcodeptr = cd->mcodebase + (cd->lastmcodeptr - oldmcodebase);
365 #endif
366 }
367 
368 
369 /* codegen_ncode_increase ******************************************************
370 
371  Doubles code area.
372 
373 *******************************************************************************/
374 
375 #if defined(ENABLE_INTRP)
376 u1 *codegen_ncode_increase(codegendata *cd, u1 *ncodeptr)
377 {
378  u1 *oldncodebase;
379 
380  /* save old ncodebase pointer */
381 
382  oldncodebase = cd->ncodebase;
383 
384  /* reallocate to new, doubled memory */
385 
386  cd->ncodebase = DMREALLOC(cd->ncodebase,
387  u1,
388  cd->ncodesize,
389  cd->ncodesize * 2);
390  cd->ncodesize *= 2;
391 
392  /* return the new ncodeptr */
393 
394  return (cd->ncodebase + (ncodeptr - oldncodebase));
395 }
396 #endif
397 
398 
399 /* codegen_add_branch_ref ******************************************************
400 
401  Prepends an branch to the list.
402 
403 *******************************************************************************/
404 
405 void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
406 {
407  branchref *br;
408  s4 branchmpc;
409 
410  STATISTICS(count_branches_unresolved++);
411 
412  /* calculate the mpc of the branch instruction */
413 
414  branchmpc = cd->mcodeptr - cd->mcodebase;
415 
416  br = (branchref*) DumpMemory::allocate(sizeof(branchref));
417 
418  br->branchmpc = branchmpc;
419  br->condition = condition;
420  br->reg = reg;
421  br->options = options;
422  br->next = target->branchrefs;
423 
424  target->branchrefs = br;
425 }
426 
427 
428 /* codegen_resolve_branchrefs **************************************************
429 
430  Resolves and patches the branch references of a given basic block.
431 
432 *******************************************************************************/
433 
435 {
436  branchref *br;
437  u1 *mcodeptr;
438 
439  /* Save the mcodeptr because in the branch emitting functions
440  we generate code somewhere inside already generated code,
441  but we're still in the actual code generation phase. */
442 
443  mcodeptr = cd->mcodeptr;
444 
445  /* just to make sure */
446 
447  assert(bptr->mpc >= 0);
448 
449  for (br = bptr->branchrefs; br != NULL; br = br->next) {
450  /* temporary set the mcodeptr */
451 
452  cd->mcodeptr = cd->mcodebase + br->branchmpc;
453 
454  /* emit_bccz and emit_branch emit the correct code, even if we
455  pass condition == BRANCH_UNCONDITIONAL or reg == -1. */
456 
457  emit_bccz(cd, bptr, br->condition, br->reg, br->options);
458  }
459 
460  /* restore mcodeptr */
461 
462  cd->mcodeptr = mcodeptr;
463 }
464 
465 
466 /* codegen_branch_label_add ****************************************************
467 
468  Append an branch to the label-branch list.
469 
470 *******************************************************************************/
471 
472 void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
473 {
474  // Calculate the current mpc.
475  int32_t mpc = cd->mcodeptr - cd->mcodebase;
476 
478 
479  br->mpc = mpc;
480  br->label = label;
481  br->condition = condition;
482  br->reg = reg;
483  br->options = options;
484 
485  // Add the branch to the list.
486  cd->brancheslabel->push_back(br);
487 }
488 
489 
490 /* codegen_set_replacement_point_notrap ****************************************
491 
492  Record the position of a non-trappable replacement point.
493 
494 *******************************************************************************/
495 
496 #if defined(ENABLE_REPLACEMENT)
497 #if !defined(NDEBUG)
498 void codegen_set_replacement_point_notrap(codegendata *cd, s4 type)
499 #else
500 void codegen_set_replacement_point_notrap(codegendata *cd)
501 #endif
502 {
503  assert(cd->replacementpoint);
504  assert(cd->replacementpoint->type == type);
505  assert(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP);
506 
507  cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
508 
509  cd->replacementpoint++;
510 }
511 #endif /* defined(ENABLE_REPLACEMENT) */
512 
513 
514 /* codegen_set_replacement_point ***********************************************
515 
516  Record the position of a trappable replacement point.
517 
518 *******************************************************************************/
519 
520 #if defined(ENABLE_REPLACEMENT)
521 #if !defined(NDEBUG)
522 void codegen_set_replacement_point(codegendata *cd, s4 type)
523 #else
524 void codegen_set_replacement_point(codegendata *cd)
525 #endif
526 {
527  assert(cd->replacementpoint);
528  assert(cd->replacementpoint->type == type);
529  assert(!(cd->replacementpoint->flags & rplpoint::FLAG_NOTRAP));
530 
531  cd->replacementpoint->pc = (u1*) (ptrint) (cd->mcodeptr - cd->mcodebase);
532 
533  cd->replacementpoint++;
534 
535 #if !defined(NDEBUG)
536  /* XXX actually we should use an own REPLACEMENT_NOPS here! */
537  if (opt_TestReplacement)
538  PATCHER_NOPS;
539 #endif
540 
541  /* XXX assert(cd->lastmcodeptr <= cd->mcodeptr); */
542 
544 }
545 #endif /* defined(ENABLE_REPLACEMENT) */
546 
547 
548 /* codegen_finish **************************************************************
549 
550  Finishes the code generation. A new memory, large enough for both
551  data and code, is allocated and data and code are copied together
552  to their final layout, unresolved jumps are resolved, ...
553 
554 *******************************************************************************/
555 
557 {
558  s4 mcodelen;
559 #if defined(ENABLE_INTRP)
560  s4 ncodelen;
561 #endif
562  s4 alignedmcodelen;
563  jumpref *jr;
564  u1 *epoint;
565  s4 alignedlen;
566 
567  /* Get required compiler data. */
568 
569  codeinfo* code = jd->code;
570  codegendata* cd = jd->cd;
571  registerdata* rd = jd->rd;
572 
573  /* prevent compiler warning */
574 
575 #if defined(ENABLE_INTRP)
576  ncodelen = 0;
577 #endif
578 
579  /* calculate the code length */
580 
581  mcodelen = (s4) (cd->mcodeptr - cd->mcodebase);
582 
583  STATISTICS(count_code_len += mcodelen);
584  STATISTICS(count_data_len += cd->dseglen);
585 
586  alignedmcodelen = MEMORY_ALIGN(mcodelen, MAX_ALIGN);
587 
588 #if defined(ENABLE_INTRP)
589  if (opt_intrp)
590  ncodelen = cd->ncodeptr - cd->ncodebase;
591  else {
592  ncodelen = 0; /* avoid compiler warning */
593  }
594 #endif
595 
597  alignedlen = alignedmcodelen + cd->dseglen;
598 
599 #if defined(ENABLE_INTRP)
600  if (opt_intrp) {
601  alignedlen += ncodelen;
602  }
603 #endif
604 
605  /* allocate new memory */
606 
607  code->mcodelength = mcodelen + cd->dseglen;
608  code->mcode = CNEW(u1, alignedlen);
609 
610  /* set the entrypoint of the method */
611 
612  assert(code->entrypoint == NULL);
613  code->entrypoint = epoint = (code->mcode + cd->dseglen);
614 
615  /* fill the data segment (code->entrypoint must already be set!) */
616 
617  dseg_finish(jd);
618 
619  /* copy code to the new location */
620 
621  MCOPY((void *) code->entrypoint, cd->mcodebase, u1, mcodelen);
622 
623 #if defined(ENABLE_INTRP)
624  /* relocate native dynamic superinstruction code (if any) */
625 
626  if (opt_intrp) {
627  cd->mcodebase = code->entrypoint;
628 
629  if (ncodelen > 0) {
630  u1 *ncodebase = code->mcode + cd->dseglen + alignedmcodelen;
631 
632  MCOPY((void *) ncodebase, cd->ncodebase, u1, ncodelen);
633 
634  /* flush the instruction and data caches */
635 
636  md_cacheflush(ncodebase, ncodelen);
637 
638  /* set some cd variables for dynamic_super_rerwite */
639 
640  cd->ncodebase = ncodebase;
641 
642  } else {
643  cd->ncodebase = NULL;
644  }
645 
647  }
648 #endif
649 
650  /* Fill runtime information about generated code. */
651 
652  code->stackframesize = cd->stackframesize;
653  code->synchronizedoffset = rd->memuse * 8;
654  code->savedintcount = INT_SAV_CNT - rd->savintreguse;
655  code->savedfltcount = FLT_SAV_CNT - rd->savfltreguse;
656 
657  /* Create the exception table. */
658 
660 
661  /* Create the linenumber table. */
662 
663  code->linenumbertable = new LinenumberTable(jd);
664 
665  /* jump table resolving */
666 
667  for (jr = cd->jumpreferences; jr != NULL; jr = jr->next)
668  *((functionptr *) ((ptrint) epoint + jr->tablepos)) =
669  (functionptr) ((ptrint) epoint + (ptrint) jr->target->mpc);
670 
671  /* patcher resolving */
672 
673  patcher_resolve(jd->code);
674 
675 #if defined(ENABLE_REPLACEMENT)
676  /* replacement point resolving */
677  {
678  int i;
679  rplpoint *rp;
680 
681  rp = code->rplpoints;
682  for (i=0; i<code->rplpointcount; ++i, ++rp) {
683  rp->pc = (u1*) ((ptrint) epoint + (ptrint) rp->pc);
684  }
685  }
686 #endif /* defined(ENABLE_REPLACEMENT) */
687 
688  /* Insert method into methodtree to find the entrypoint. */
689 
690  methodtree_insert(code->entrypoint, code->entrypoint + mcodelen);
691 
692 #if defined(__I386__) || defined(__X86_64__) || defined(__XDSPCORE__) || defined(ENABLE_INTRP)
693  /* resolve data segment references */
694 
695  dseg_resolve_datareferences(jd);
696 #endif
697 
698  /* flush the instruction and data caches */
699 
700  md_cacheflush(code->mcode, code->mcodelength);
701 }
702 
703 namespace {
704 /**
705  * Outsource stack adjustment logic to reduce in-code `#if defined`s.
706  *
707  * @note should be moved to a backend code unit.
708  */
709 #if defined(__AARCH64__)
710 struct FrameInfo {
711  u1 *sp;
712  int32_t framesize;
713  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
714  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
715  uint8_t *get_javasp() const { return sp + framesize; }
716  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
717  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
718  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
719 };
720 #elif defined(__ALPHA__)
721 struct FrameInfo {
722  u1 *sp;
723  int32_t framesize;
724  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
725  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
726  uint8_t *get_javasp() const { return sp + framesize; }
727  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
728  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
729  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
730 };
731 #elif defined(__ARM__)
732 struct FrameInfo {
733  u1 *sp;
734  int32_t framesize;
735  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
736  uint8_t *get_datasp() const { return sp + framesize - SIZEOF_VOID_P; }
737  uint8_t *get_javasp() const { return sp + framesize; }
738  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
739  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
740  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
741 };
742 #elif defined(__I386__)
743 struct FrameInfo {
744  u1 *sp;
745  int32_t framesize;
746  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
747  uint8_t *get_datasp() const { return sp + framesize; }
748  uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
749  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
750  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
751  uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 2 * SIZEOF_VOID_P); }
752 };
753 #elif defined(__MIPS__)
754 struct FrameInfo {
755  u1 *sp;
756  int32_t framesize;
757  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
758  /* MIPS always uses 8 bytes to store the RA */
759  uint8_t *get_datasp() const { return sp + framesize - 8; }
760  uint8_t *get_javasp() const { return sp + framesize; }
761  uint64_t *get_arg_regs() const {
762 # if SIZEOF_VOID_P == 8
763  return (uint64_t *) sp;
764 # else
765  return (uint64_t *) (sp + 5 * 8);
766 # endif
767  }
768  uint64_t *get_ret_regs() const {
769 # if SIZEOF_VOID_P == 8
770  return (uint64_t *) sp;
771 # else
772  return (uint64_t *) (sp + 1 * 8);
773 # endif
774  }
775  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
776 };
777 #elif defined(__S390__)
778 struct FrameInfo {
779  u1 *sp;
780  int32_t framesize;
781  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
782  uint8_t *get_datasp() const { return sp + framesize - 8; }
783  uint8_t *get_javasp() const { return sp + framesize; }
784  uint64_t *get_arg_regs() const { return (uint64_t *) (sp + 96); }
785  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
786  uint64_t *get_ret_regs() const { return (uint64_t *) (sp + 96); }
787 };
788 #elif defined(__POWERPC__)
789 struct FrameInfo {
790  u1 *sp;
791  int32_t framesize;
792  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
793  uint8_t *get_datasp() const { return sp + framesize; }
794  uint8_t *get_javasp() const { return sp + framesize; }
795  uint64_t *get_arg_regs() const {
796  return (uint64_t *) (sp + LA_SIZE + 4 * SIZEOF_VOID_P);
797  }
798  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
799  uint64_t *get_ret_regs() const {
800  return (uint64_t *) (sp + LA_SIZE + 2 * SIZEOF_VOID_P);
801  }
802 };
803 #elif defined(__POWERPC64__)
804 struct FrameInfo {
805  u1 *sp;
806  int32_t framesize;
807  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
808  uint8_t *get_datasp() const { return sp + framesize; }
809  uint8_t *get_javasp() const { return sp + framesize; }
810  uint64_t *get_arg_regs() const {
811  return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 4 * SIZEOF_VOID_P);
812  }
813  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
814  uint64_t *get_ret_regs() const {
815  return (uint64_t *) (sp + PA_SIZE + LA_SIZE + 2 * SIZEOF_VOID_P);
816  }
817 };
818 #elif defined(__X86_64__)
819 struct FrameInfo {
820  u1 *sp;
821  int32_t framesize;
822  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {}
823  uint8_t *get_datasp() const { return sp + framesize; }
824  uint8_t *get_javasp() const { return sp + framesize + SIZEOF_VOID_P; }
825  uint64_t *get_arg_regs() const { return (uint64_t *) sp; }
826  uint64_t *get_arg_stack() const { return (uint64_t *) get_javasp(); }
827  uint64_t *get_ret_regs() const { return (uint64_t *) sp; }
828 };
829 #else
830 // dummy
831 struct FrameInfo {
832  u1 *sp;
833  int32_t framesize;
834  FrameInfo(u1 *sp, int32_t framesize) : sp(sp), framesize(framesize) {
835  /* XXX is was unable to do this port for SPARC64, sorry. (-michi) */
836  /* XXX maybe we need to pass the RA as argument there */
837  os::abort("codegen_start_native_call: unsupported architecture");
838  }
839  uint8_t *get_datasp() const { return NULL; }
840  uint8_t *get_javasp() const { return NULL; }
841  uint64_t *get_arg_regs() const { return NULL; }
842  uint64_t *get_arg_stack() const { return NULL; }
843  uint64_t *get_ret_regs() const { return NULL; }
844 };
845 #endif
846 
847 } // end anonymous namespace
848 
849 /* codegen_start_native_call ***************************************************
850 
851  Prepares the stuff required for a native (JNI) function call:
852 
853  - adds a stackframe info structure to the chain, for stacktraces
854  - prepares the local references table on the stack
855 
856  The layout of the native stub stackframe should look like this:
857 
858  +---------------------------+ <- java SP (of parent Java function)
859  | return address |
860  +---------------------------+ <- data SP
861  | |
862  | stackframe info structure |
863  | |
864  +---------------------------+
865  | |
866  | local references table |
867  | |
868  +---------------------------+
869  | |
870  | saved registers (if any) |
871  | |
872  +---------------------------+
873  | |
874  | arguments (if any) |
875  | |
876  +---------------------------+ <- current SP (native stub)
877 
878 *******************************************************************************/
879 
881 {
882  assert(sp);
883  assert(pv);
884 
885  stackframeinfo_t *sfi;
886  localref_table *lrt;
887  codeinfo *code;
888  methodinfo *m;
889  int32_t framesize;
890 
891  STATISTICS(count_calls_java_to_native++);
892 
893  // Get information from method header.
894  code = code_get_codeinfo_for_pv(pv);
895  assert(code != NULL);
896 
897  framesize = md_stacktrace_get_framesize(code);
898  assert(framesize >= (int32_t) (sizeof(stackframeinfo_t) + sizeof(localref_table)));
899 
900  // Get the methodinfo.
902  assert(m);
903 
904  /* calculate needed values */
905 
906  FrameInfo FI(sp,framesize);
907 
908  uint8_t *datasp = FI.get_datasp();
909  //uint8_t *javasp = FI.get_javasp();
910 #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
911  uint64_t *arg_regs = FI.get_arg_regs();
912  uint64_t *arg_stack = FI.get_arg_stack();
913 #endif
914 
915  /* get data structures from stack */
916 
917  sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
918  lrt = (localref_table *) (datasp - sizeof(stackframeinfo_t) -
919  sizeof(localref_table));
920 
921 #if defined(ENABLE_JNI)
922  /* add current JNI local references table to this thread */
923 
924  localref_table_add(lrt);
925 #endif
926 
927 #if !defined(NDEBUG)
928 # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
929  /* print the call-trace if necesarry */
930  /* BEFORE: filling the local reference table */
931 
933  trace_java_call_enter(m, arg_regs, arg_stack);
934 # endif
935 #endif
936 
937 #if defined(ENABLE_HANDLES)
938  /* place all references into the local reference table */
939  /* BEFORE: creating stackframeinfo */
940 
941  localref_native_enter(m, arg_regs, arg_stack);
942 #endif
943 
944  /* Add a stackframeinfo for this native method. We don't have RA
945  and XPC here. These are determined in
946  stacktrace_stackframeinfo_add. */
947 
948  stacktrace_stackframeinfo_add(sfi, pv, sp, NULL, NULL);
949 
950  /* Return a wrapped classinfo for static methods. */
951 
952  if (m->flags & ACC_STATIC)
953  return (java_handle_t *) LLNI_classinfo_wrap(m->clazz);
954  else
955  return NULL;
956 }
957 
958 
959 /* codegen_finish_native_call **************************************************
960 
961  Removes the stuff required for a native (JNI) function call.
962  Additionally it checks for an exceptions and in case, get the
963  exception object and clear the pointer.
964 
965 *******************************************************************************/
966 
968 {
969  assert(sp);
970  assert(pv);
971 
972  stackframeinfo_t *sfi;
973  java_handle_t *e;
974  java_object_t *o;
975  codeinfo *code;
976  int32_t framesize;
977 
978 
979  // Get information from method header.
980  code = code_get_codeinfo_for_pv(pv);
981  assert(code != NULL);
982 
983  framesize = md_stacktrace_get_framesize(code);
984 
985  // Get the methodinfo.
986 #if defined(ENABLE_HANDLES) || !defined(NDEBUG)
987  methodinfo *m = code->m;
988  assert(m != NULL);
989 #endif
990 
991  /* calculate needed values */
992 
993  FrameInfo FI(sp,framesize);
994 
995  uint8_t *datasp = FI.get_datasp();
996 #if defined(ENABLE_HANDLES) || ( !defined(NDEBUG) && !defined(__ARM__) )
997  uint64_t *ret_regs = FI.get_ret_regs();
998 #endif
999 
1000  /* get data structures from stack */
1001 
1002  sfi = (stackframeinfo_t *) (datasp - sizeof(stackframeinfo_t));
1003 
1004  /* Remove current stackframeinfo from chain. */
1005 
1007 
1008 #if defined(ENABLE_HANDLES)
1009  /* unwrap the return value from the local reference table */
1010  /* AFTER: removing the stackframeinfo */
1011  /* BEFORE: releasing the local reference table */
1012 
1013  localref_native_exit(m, ret_regs);
1014 #endif
1015 
1016  /* get and unwrap the exception */
1017  /* AFTER: removing the stackframe info */
1018  /* BEFORE: releasing the local reference table */
1019 
1021  o = LLNI_UNWRAP(e);
1022 
1023 #if defined(ENABLE_JNI)
1024  /* release JNI local references table for this thread */
1025 
1028 #endif
1029 
1030 #if !defined(NDEBUG)
1031 # if defined(__AARCH64__) || defined(__ALPHA__) || defined(__I386__) || defined(__MIPS__) || defined(__POWERPC__) || defined(__POWERPC64__) || defined(__S390__) || defined(__X86_64__)
1032  /* print the call-trace if necesarry */
1033  /* AFTER: unwrapping the return value */
1034 
1036  trace_java_call_exit(m, ret_regs);
1037 # endif
1038 #endif
1039 
1040  return o;
1041 }
1042 
1043 
1044 /* codegen_reg_of_var **********************************************************
1045 
1046  This function determines a register, to which the result of an
1047  operation should go, when it is ultimatively intended to store the
1048  result in pseudoregister v. If v is assigned to an actual
1049  register, this register will be returned. Otherwise (when v is
1050  spilled) this function returns tempregnum. If not already done,
1051  regoff and flags are set in the stack location.
1052 
1053 *******************************************************************************/
1054 
1055 s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
1056 {
1057  if (!(v->flags & INMEMORY))
1058  return v->vv.regoff;
1059 
1060  return tempregnum;
1061 }
1062 
1063 
1064 /* codegen_reg_of_dst **********************************************************
1065 
1066  This function determines a register, to which the result of an
1067  operation should go, when it is ultimatively intended to store the
1068  result in iptr->dst.var. If dst.var is assigned to an actual
1069  register, this register will be returned. Otherwise (when it is
1070  spilled) this function returns tempregnum. If not already done,
1071  regoff and flags are set in the stack location.
1072 
1073 *******************************************************************************/
1074 
1075 s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
1076 {
1077  return codegen_reg_of_var(iptr->opc, VAROP(iptr->dst), tempregnum);
1078 }
1079 
1080 /**
1081  * Fix up register locations in the case where control is transferred to an
1082  * exception handler block via normal control flow (no exception).
1083  */
1085 {
1086  // Exception handlers have exactly 1 in-slot
1087  assert(bptr->indepth == 1);
1088  varinfo *var = VAR(bptr->invars[0]);
1089  int32_t d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1090  emit_load(jd, NULL, var, d);
1091  // Copy the interface variable to ITMP1 (XPTR) because that's where
1092  // the handler expects it.
1093  emit_imove(jd->cd, d, REG_ITMP1_XPTR);
1094 }
1095 
1096 /**
1097  * Generates machine code.
1098  */
1100 {
1101  varinfo* var;
1102  builtintable_entry* bte = 0;
1103  methoddesc* md;
1104  int32_t s1, s2, /*s3,*/ d;
1105 #if !defined(__I386__)
1106  int32_t fieldtype;
1107  int32_t disp;
1108 #endif
1109  int i;
1110 
1111  // Get required compiler data.
1112  //methodinfo* m = jd->m;
1113  codeinfo* code = jd->code;
1114  codegendata* cd = jd->cd;
1115  registerdata* rd = jd->rd;
1116 #if defined(ENABLE_SSA)
1117  lsradata* ls = jd->ls;
1118  bool last_cmd_was_goto = false;
1119 #endif
1120 
1121  // Space to save used callee saved registers.
1122  int32_t savedregs_num = 0;
1123  savedregs_num += (INT_SAV_CNT - rd->savintreguse);
1124  savedregs_num += (FLT_SAV_CNT - rd->savfltreguse);
1125 
1126  // Calculate size of stackframe.
1127  cd->stackframesize = rd->memuse + savedregs_num;
1128 
1129  // Space to save the return address.
1130 #if STACKFRAME_RA_TOP_OF_FRAME
1131 # if STACKFRAME_LEAFMETHODS_RA_REGISTER
1132  if (!code_is_leafmethod(code))
1133 # endif
1134  cd->stackframesize += 1;
1135 #endif
1136 
1137  // Space to save argument of monitor_enter.
1138  if (checksync && code_is_synchronized(code))
1140  /* On some architectures the stack position for the argument can
1141  not be shared with place to save the return register values to
1142  survive monitor_exit since both values reside in the same register. */
1143  cd->stackframesize += 2;
1144 #else
1145  cd->stackframesize += 1;
1146 #endif
1147 
1148  // Keep stack of non-leaf functions 16-byte aligned for calls into
1149  // native code.
1153 #else
1155 #endif
1156 
1157 #if defined(SPECIALMEMUSE)
1158  // On architectures having a linkage area, we can get rid of the whole
1159  // stackframe in leaf functions without saved registers.
1161  cd->stackframesize = 0;
1162 #endif
1163 
1164  /*
1165  * SECTION 1: Method header generation.
1166  */
1167 
1168  // The method header was reduced to the bare minimum of one pointer
1169  // to the codeinfo structure, which in turn contains all runtime
1170  // information. However this section together with the methodheader.h
1171  // file will be kept alive for historical reasons. It might come in
1172  // handy at some point.
1173 
1174  (void) dseg_add_unique_address(cd, code); ///< CodeinfoPointer
1175 
1176  // XXX, REMOVEME: We still need it for exception handling in assembler.
1177  // XXX ARM: (void) dseg_add_unique_s4(cd, cd->stackframesize);
1178 #if defined(__I386__)
1179  int align_off = (cd->stackframesize != 0) ? 4 : 0;
1180  (void) dseg_add_unique_s4(cd, cd->stackframesize * 8 + align_off); /* FrameSize */
1181 #else
1182  (void) dseg_add_unique_s4(cd, cd->stackframesize * 8); /* FrameSize */
1183 #endif
1184  (void) dseg_add_unique_s4(cd, code_is_leafmethod(code) ? 1 : 0);
1185  (void) dseg_add_unique_s4(cd, INT_SAV_CNT - rd->savintreguse); /* IntSave */
1186  (void) dseg_add_unique_s4(cd, FLT_SAV_CNT - rd->savfltreguse); /* FltSave */
1187 
1188  /*
1189  * SECTION 2: Method prolog generation.
1190  */
1191 
1192 #if defined(ENABLE_PROFILING)
1193  // Generate method profiling code.
1194  if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1195 
1196  // Count method frequency.
1197  emit_profile_method(cd, code);
1198 
1199  // Start CPU cycle counting.
1200  emit_profile_cycle_start(cd, code);
1201  }
1202 #endif
1203 
1204  // Emit code for the method prolog.
1205  codegen_emit_prolog(jd);
1206 
1207  // Emit code to call monitorenter function.
1208  if (checksync && code_is_synchronized(code))
1209  emit_monitor_enter(jd, rd->memuse * 8);
1210 
1211 #if !defined(NDEBUG)
1212  // Call trace function.
1215 #endif
1216 
1217 #if defined(ENABLE_SSA)
1218  // With SSA the header is basicblock 0, insert phi moves if necessary.
1219  if (ls != NULL)
1220  codegen_emit_phi_moves(jd, ls->basicblocks[0]);
1221 #endif
1222 
1223  // Create replacement points.
1224  REPLACEMENT_POINTS_INIT(cd, jd);
1225 
1226  /*
1227  * SECTION 3: ICMD code generation.
1228  */
1229 
1230  // Walk through all basic blocks.
1231  for (basicblock* bptr = jd->basicblocks; bptr != NULL; bptr = bptr->next) {
1232 
1233  bptr->mpc = (s4) (cd->mcodeptr - cd->mcodebase);
1234 
1235  // Is this basic block reached?
1236  if (bptr->state < basicblock::REACHED)
1237  continue;
1238 
1239  // Branch resolving.
1240  codegen_resolve_branchrefs(cd, bptr);
1241 
1242  // Handle replacement points.
1244 
1245 #if defined(ENABLE_REPLACEMENT) && defined(__I386__)
1246  // Generate countdown trap code.
1247  methodinfo* m = jd->m;
1248  if (bptr->bitflags & BBFLAG_REPLACEMENT) {
1249  if (cd->replacementpoint[-1].flags & rplpoint::FLAG_COUNTDOWN) {
1250  MCODECHECK(32);
1251  emit_trap_countdown(cd, &(m->hitcountdown));
1252  }
1253  }
1254 #endif
1255 
1256 #if defined(ENABLE_PROFILING)
1257  // Generate basicblock profiling code.
1258  if (JITDATA_HAS_FLAG_INSTRUMENT(jd)) {
1259 
1260  // Count basicblock frequency.
1261  emit_profile_basicblock(cd, code, bptr);
1262 
1263  // If this is an exception handler, start profiling again.
1264  if (bptr->type == basicblock::TYPE_EXH)
1265  emit_profile_cycle_start(cd, code);
1266  }
1267 #endif
1268 
1269  // Copy interface registers to their destination.
1270  int32_t indepth = bptr->indepth;
1271  // XXX Check if this is true for all archs.
1272  MCODECHECK(64+indepth); // All
1273  MCODECHECK(128+indepth); // PPC64
1274  MCODECHECK(512); // I386, X86_64, S390
1275 #if defined(ENABLE_SSA)
1276  // XXX Check if this is correct and add a propper comment!
1277  if (ls != NULL) {
1278  last_cmd_was_goto = false;
1279  } else {
1280 #elif defined(ENABLE_LSRA)
1281  if (opt_lsra) {
1282  while (indepth > 0) {
1283  indepth--;
1284  var = VAR(bptr->invars[indepth]);
1285  if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1286  if (!IS_INMEMORY(src->flags))
1287  d = var->vv.regoff;
1288  else
1289  d = REG_ITMP1_XPTR;
1290  // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1291  // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1292  emit_imove(cd, REG_ITMP1_XPTR, d);
1293  emit_store(jd, NULL, var, d);
1294  }
1295  }
1296  } else {
1297 #endif
1298  while (indepth > 0) {
1299  indepth--;
1300  var = VAR(bptr->invars[indepth]);
1301  if ((indepth == bptr->indepth-1) && (bptr->type == basicblock::TYPE_EXH)) {
1302  d = codegen_reg_of_var(0, var, REG_ITMP1_XPTR);
1303  // XXX Sparc64: Here we use REG_ITMP2_XPTR, fix this!
1304  // XXX S390: Here we use REG_ITMP3_XPTR, fix this!
1305  emit_imove(cd, REG_ITMP1_XPTR, d);
1306  emit_store(jd, NULL, var, d);
1307  }
1308  else {
1309  assert((var->flags & INOUT));
1310  }
1311  }
1312 #if defined(ENABLE_SSA) || defined(ENABLE_LSRA)
1313  }
1314 #endif
1315 
1316  // Walk through all instructions.
1317  int32_t len = bptr->icount;
1318  uint16_t currentline = 0;
1319  for (instruction* iptr = bptr->iinstr; len > 0; len--, iptr++) {
1320 
1321  // Add line number.
1322  if (iptr->line != currentline) {
1323  linenumbertable_list_entry_add(cd, iptr->line);
1324  currentline = iptr->line;
1325  }
1326 
1327  // An instruction usually needs < 64 words.
1328  // XXX Check if this is true for all archs.
1329  MCODECHECK(64); // All
1330  MCODECHECK(128); // PPC64
1331  MCODECHECK(1024); // I386, X86_64, S390 /* 1kB should be enough */
1332 
1333  // The big switch.
1334  switch (iptr->opc) {
1335 
1336  case ICMD_NOP: /* ... ==> ... */
1337  case ICMD_POP: /* ..., value ==> ... */
1338  case ICMD_POP2: /* ..., value, value ==> ... */
1339  break;
1340 
1341  case ICMD_CHECKNULL: /* ..., objectref ==> ..., objectref */
1342 
1343  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1344  emit_nullpointer_check(cd, iptr, s1);
1345  break;
1346 
1347  case ICMD_BREAKPOINT: /* ... ==> ... */
1348  /* sx.val.anyptr = Breakpoint */
1349 
1350  patcher_add_patch_ref(jd, PATCHER_breakpoint, iptr->sx.val.anyptr, 0);
1351  PATCHER_NOPS;
1352  break;
1353 
1354 #if defined(ENABLE_SSA)
1355  case ICMD_GETEXCEPTION:
1356 
1357  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1358  emit_imove(cd, REG_ITMP1, d);
1359  emit_store_dst(jd, iptr, d);
1360  break;
1361 #endif
1362 
1363  /* inline operations **********************************************/
1364 
1365  case ICMD_INLINE_START:
1366 
1368  break;
1369 
1370  case ICMD_INLINE_BODY:
1371 
1374  linenumbertable_list_entry_add(cd, iptr->line);
1375  break;
1376 
1377  case ICMD_INLINE_END:
1378 
1380  linenumbertable_list_entry_add(cd, iptr->line);
1381  break;
1382 
1383 
1384  /* constant operations ********************************************/
1385 
1386  case ICMD_ICONST: /* ... ==> ..., constant */
1387 
1388  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
1389  ICONST(d, iptr->sx.val.i);
1390  emit_store_dst(jd, iptr, d);
1391  break;
1392 
1393  case ICMD_LCONST: /* ... ==> ..., constant */
1394 
1395  d = codegen_reg_of_dst(jd, iptr, REG_LTMP12);
1396  LCONST(d, iptr->sx.val.l);
1397  emit_store_dst(jd, iptr, d);
1398  break;
1399 
1400 
1401  /* load/store/copy/move operations ********************************/
1402 
1403  case ICMD_COPY:
1404  case ICMD_MOVE:
1405  case ICMD_ILOAD: /* ... ==> ..., content of local variable */
1406  case ICMD_LLOAD: /* s1 = local variable */
1407  case ICMD_FLOAD:
1408  case ICMD_DLOAD:
1409  case ICMD_ALOAD:
1410  case ICMD_ISTORE: /* ..., value ==> ... */
1411  case ICMD_LSTORE:
1412  case ICMD_FSTORE:
1413  case ICMD_DSTORE:
1414 
1415  emit_copy(jd, iptr);
1416  break;
1417 
1418  case ICMD_ASTORE:
1419 
1420  if (!(iptr->flags.bits & INS_FLAG_RETADDR))
1421  emit_copy(jd, iptr);
1422  break;
1423 
1424 
1425  /* integer operations *********************************************/
1426 
1427  case ICMD_FCONST: /* ... ==> ..., constant */
1428  case ICMD_DCONST: /* ... ==> ..., constant */
1429  case ICMD_ACONST: /* ... ==> ..., constant */
1430  case ICMD_INEG: /* ..., value ==> ..., - value */
1431  case ICMD_LNEG: /* ..., value ==> ..., - value */
1432  case ICMD_I2L: /* ..., value ==> ..., value */
1433  case ICMD_L2I: /* ..., value ==> ..., value */
1434  case ICMD_INT2BYTE: /* ..., value ==> ..., value */
1435  case ICMD_INT2CHAR: /* ..., value ==> ..., value */
1436  case ICMD_INT2SHORT: /* ..., value ==> ..., value */
1437  case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1438  case ICMD_IINC:
1439  case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
1440  /* sx.val.i = constant */
1441  case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1442  case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
1443  /* sx.val.l = constant */
1444  case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1445  case ICMD_ISUBCONST: /* ..., value ==> ..., value + constant */
1446  /* sx.val.i = constant */
1447  case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1448  case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
1449  /* sx.val.l = constant */
1450  case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1451  case ICMD_IMULCONST: /* ..., value ==> ..., value * constant */
1452  /* sx.val.i = constant */
1453  case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1454  /* sx.val.i = constant */
1455  case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1456  case ICMD_LMULCONST: /* ..., value ==> ..., value * constant */
1457  /* sx.val.l = constant */
1458  case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
1459  /* sx.val.l = constant */
1460  case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1461  case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1462  case ICMD_IDIVPOW2: /* ..., value ==> ..., value >> constant */
1463  /* sx.val.i = constant */
1464  case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
1465  /* sx.val.i = constant */
1466  case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1467  case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1468  case ICMD_LDIVPOW2: /* ..., value ==> ..., value >> constant */
1469  /* sx.val.i = constant */
1470  case ICMD_LREMPOW2: /* ..., value ==> ..., value % constant */
1471  /* sx.val.l = constant */
1472  case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1473  case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
1474  /* sx.val.i = constant */
1475  case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1476  case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
1477  /* sx.val.i = constant */
1478  case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1479  case ICMD_IUSHRCONST: /* ..., value ==> ..., value >>> constant */
1480  /* sx.val.i = constant */
1481  case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
1482  case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
1483  /* sx.val.i = constant */
1484  case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
1485  case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
1486  /* sx.val.i = constant */
1487  case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >>> val2 */
1488  case ICMD_LUSHRCONST: /* ..., value ==> ..., value >>> constant */
1489  /* sx.val.l = constant */
1490  case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1491  case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
1492  /* sx.val.i = constant */
1493  case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
1494  case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
1495  /* sx.val.l = constant */
1496  case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1497  case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
1498  /* sx.val.i = constant */
1499  case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
1500  case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
1501  /* sx.val.l = constant */
1502  case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1503  case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
1504  /* sx.val.i = constant */
1505  case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
1506  case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
1507  /* sx.val.l = constant */
1508 
1509  // Generate architecture specific instructions.
1510  codegen_emit_instruction(jd, iptr);
1511  break;
1512 
1513 
1514  /* floating operations ********************************************/
1515 
1516 #if !defined(ENABLE_SOFTFLOAT)
1517  case ICMD_FNEG: /* ..., value ==> ..., - value */
1518  case ICMD_DNEG:
1519  case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
1520  case ICMD_DADD:
1521  case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
1522  case ICMD_DSUB:
1523  case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
1524  case ICMD_DMUL:
1525  case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
1526  case ICMD_DDIV:
1527  case ICMD_FREM: /* ..., val1, val2 ==> ..., val1 % val2 */
1528  case ICMD_DREM:
1529  case ICMD_I2F: /* ..., value ==> ..., (float) value */
1530  case ICMD_I2D: /* ..., value ==> ..., (double) value */
1531  case ICMD_L2F: /* ..., value ==> ..., (float) value */
1532  case ICMD_L2D: /* ..., value ==> ..., (double) value */
1533  case ICMD_F2I: /* ..., value ==> ..., (int) value */
1534  case ICMD_D2I:
1535  case ICMD_F2L: /* ..., value ==> ..., (long) value */
1536  case ICMD_D2L:
1537  case ICMD_F2D: /* ..., value ==> ..., (double) value */
1538  case ICMD_D2F: /* ..., value ==> ..., (float) value */
1539  case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1540  case ICMD_DCMPL: /* == => 0, < => 1, > => -1 */
1541  case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1542  case ICMD_DCMPG: /* == => 0, < => 1, > => -1 */
1543 
1544  // Generate architecture specific instructions.
1545  codegen_emit_instruction(jd, iptr);
1546  break;
1547 #endif /* !defined(ENABLE_SOFTFLOAT) */
1548 
1549 
1550  /* memory operations **********************************************/
1551 
1552  case ICMD_ARRAYLENGTH:/* ..., arrayref ==> ..., length */
1553 
1554  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1555  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1556  /* implicit null-pointer check */
1557  // XXX PPC64: Here we had an explicit null-pointer check
1558  // which I think was obsolete, please confirm. Otherwise:
1559  // emit_nullpointer_check(cd, iptr, s1);
1560  M_ILD(d, s1, OFFSET(java_array_t, size));
1561  emit_store_dst(jd, iptr, d);
1562  break;
1563 
1564  case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1565  case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1566  case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1567  case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1568  case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1569  case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1570  case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1571  case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1572  case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1573  case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1574  case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1575  case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1576  case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1577  case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1578  case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1579  case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1580  case ICMD_BASTORECONST: /* ..., arrayref, index ==> ... */
1581  case ICMD_CASTORECONST: /* ..., arrayref, index ==> ... */
1582  case ICMD_SASTORECONST: /* ..., arrayref, index ==> ... */
1583  case ICMD_IASTORECONST: /* ..., arrayref, index ==> ... */
1584  case ICMD_LASTORECONST: /* ..., arrayref, index ==> ... */
1585  case ICMD_FASTORECONST: /* ..., arrayref, index ==> ... */
1586  case ICMD_DASTORECONST: /* ..., arrayref, index ==> ... */
1587  case ICMD_AASTORECONST: /* ..., arrayref, index ==> ... */
1588  case ICMD_GETFIELD: /* ... ==> ..., value */
1589  case ICMD_PUTFIELD: /* ..., value ==> ... */
1590  case ICMD_PUTFIELDCONST: /* ..., objectref ==> ... */
1591  /* val = value (in current instruction) */
1592  case ICMD_PUTSTATICCONST: /* ... ==> ... */
1593  /* val = value (in current instruction) */
1594 
1595  // Generate architecture specific instructions.
1596  codegen_emit_instruction(jd, iptr);
1597  break;
1598 
1599  case ICMD_GETSTATIC: /* ... ==> ..., value */
1600 
1601 #if defined(__I386__)
1602  // Generate architecture specific instructions.
1603  codegen_emit_instruction(jd, iptr);
1604  break;
1605 #else
1606  {
1607  fieldinfo* fi;
1608  //patchref_t* pr;
1609  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1610  unresolved_field* uf = iptr->sx.s23.s3.uf;
1611  fieldtype = uf->fieldref->parseddesc.fd->type;
1612  disp = dseg_add_unique_address(cd, 0);
1613 
1614  //pr = patcher_add_patch_ref(jd, PATCHER_get_putstatic, uf, disp);
1616 
1617  fi = NULL; /* Silence compiler warning */
1618  }
1619  else {
1620  fi = iptr->sx.s23.s3.fmiref->p.field;
1621  fieldtype = fi->type;
1622  disp = dseg_add_address(cd, fi->value);
1623 
1628  }
1629 
1630  //pr = NULL; /* Silence compiler warning */
1631  }
1632 
1633  // XXX X86_64: Here We had this:
1634  /* This approach is much faster than moving the field
1635  address inline into a register. */
1636 
1637  M_ALD_DSEG(REG_ITMP1, disp);
1638 
1639  switch (fieldtype) {
1640  case TYPE_ADR:
1641  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1642  M_ALD(d, REG_ITMP1, 0);
1643  break;
1644  case TYPE_INT:
1645 #if defined(ENABLE_SOFTFLOAT)
1646  case TYPE_FLT:
1647 #endif
1648  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1649  M_ILD(d, REG_ITMP1, 0);
1650  break;
1651  case TYPE_LNG:
1652 #if defined(ENABLE_SOFTFLOAT)
1653  case TYPE_DBL:
1654 #endif
1655  d = codegen_reg_of_dst(jd, iptr, REG_LTMP23);
1656  M_LLD(d, REG_ITMP1, 0);
1657  break;
1658 #if !defined(ENABLE_SOFTFLOAT)
1659  case TYPE_FLT:
1660  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1661  M_FLD(d, REG_ITMP1, 0);
1662  break;
1663  case TYPE_DBL:
1664  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1665  M_DLD(d, REG_ITMP1, 0);
1666  break;
1667 #endif
1668  default:
1669  // Silence compiler warning.
1670  d = 0;
1671  }
1672  emit_store_dst(jd, iptr, d);
1673  break;
1674  }
1675 #endif
1676 
1677  case ICMD_PUTSTATIC: /* ..., value ==> ... */
1678 
1679 #if defined(__I386__)
1680  // Generate architecture specific instructions.
1681  codegen_emit_instruction(jd, iptr);
1682  break;
1683 #else
1684  {
1685  fieldinfo* fi;
1686 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1687  patchref_t* pr = NULL;
1688 #endif
1689 
1690  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1691  unresolved_field* uf = iptr->sx.s23.s3.uf;
1692  fieldtype = uf->fieldref->parseddesc.fd->type;
1693  disp = dseg_add_unique_address(cd, 0);
1694 
1695 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1696  pr =
1697 #endif
1699 
1700  fi = NULL; /* Silence compiler warning */
1701  }
1702  else {
1703  fi = iptr->sx.s23.s3.fmiref->p.field;
1704  fieldtype = fi->type;
1705  disp = dseg_add_address(cd, fi->value);
1706 
1711  }
1712  }
1713 
1714  // XXX X86_64: Here We had this:
1715  /* This approach is much faster than moving the field
1716  address inline into a register. */
1717 
1718  M_ALD_DSEG(REG_ITMP1, disp);
1719 
1720  switch (fieldtype) {
1721  case TYPE_ADR:
1722  s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1723  M_AST(s1, REG_ITMP1, 0);
1724  break;
1725  case TYPE_INT:
1726 #if defined(ENABLE_SOFTFLOAT)
1727  case TYPE_FLT:
1728 #endif
1729  s1 = emit_load_s1(jd, iptr, REG_ITMP2);
1730  M_IST(s1, REG_ITMP1, 0);
1731  break;
1732  case TYPE_LNG:
1733 #if defined(ENABLE_SOFTFLOAT)
1734  case TYPE_DBL:
1735 #endif
1736  s1 = emit_load_s1(jd, iptr, REG_LTMP23);
1737  M_LST(s1, REG_ITMP1, 0);
1738  break;
1739 #if !defined(ENABLE_SOFTFLOAT)
1740  case TYPE_FLT:
1741  s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1742  M_FST(s1, REG_ITMP1, 0);
1743  break;
1744  case TYPE_DBL:
1745  s1 = emit_load_s1(jd, iptr, REG_FTMP2);
1746  M_DST(s1, REG_ITMP1, 0);
1747  break;
1748 #endif
1749  }
1750 #if defined(USES_PATCHABLE_MEMORY_BARRIER)
1751  codegen_emit_patchable_barrier(iptr, cd, pr, fi);
1752 #endif
1753  break;
1754  }
1755 #endif
1756 
1757  /* branch operations **********************************************/
1758 
1759  case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1760 
1761  // We might leave this method, stop profiling.
1763 
1764  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1765  // XXX Sparc64: We use REG_ITMP2_XPTR here, fix me!
1766  emit_imove(cd, s1, REG_ITMP1_XPTR);
1767 
1768 #ifdef ENABLE_VERIFIER
1769  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1770  unresolved_class *uc = iptr->sx.s23.s2.uc;
1772  }
1773 #endif /* ENABLE_VERIFIER */
1774 
1775  // Generate architecture specific instructions.
1776  codegen_emit_instruction(jd, iptr);
1777  ALIGNCODENOP;
1778  break;
1779 
1780  case ICMD_GOTO: /* ... ==> ... */
1781  case ICMD_RET: /* ... ==> ... */
1782 
1783 #if defined(ENABLE_SSA)
1784  // In case of a goto, phimoves have to be inserted
1785  // before the jump.
1786  if (ls != NULL) {
1787  last_cmd_was_goto = true;
1788  codegen_emit_phi_moves(jd, bptr);
1789  }
1790 #endif
1791  if (iptr->dst.block->type == basicblock::TYPE_EXH)
1792  fixup_exc_handler_interface(jd, iptr->dst.block);
1793  emit_br(cd, iptr->dst.block);
1794  ALIGNCODENOP;
1795  break;
1796 
1797  case ICMD_JSR: /* ... ==> ... */
1798 
1799  assert(iptr->sx.s23.s3.jsrtarget.block->type != basicblock::TYPE_EXH);
1800  emit_br(cd, iptr->sx.s23.s3.jsrtarget.block);
1801  ALIGNCODENOP;
1802  break;
1803 
1804  case ICMD_IFNULL: /* ..., value ==> ... */
1805  case ICMD_IFNONNULL:
1806 
1807  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1808  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1809 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1810  emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, s1, BRANCH_OPT_NONE);
1811 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1812  M_TEST(s1);
1813  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFNULL, BRANCH_OPT_NONE);
1814 #else
1815 # error Unable to generate code for this configuration!
1816 #endif
1817  break;
1818 
1819  case ICMD_IFEQ: /* ..., value ==> ... */
1820  case ICMD_IFNE:
1821  case ICMD_IFLT:
1822  case ICMD_IFLE:
1823  case ICMD_IFGT:
1824  case ICMD_IFGE:
1825 
1826  // XXX Sparc64: int compares must not branch on the
1827  // register directly. Reason is, that register content is
1828  // not 32-bit clean. Fix this!
1829 
1830  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1831 
1832 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1833  if (iptr->sx.val.i == 0) {
1834  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1835  emit_bccz(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, s1, BRANCH_OPT_NONE);
1836  } else {
1837  // Generate architecture specific instructions.
1838  codegen_emit_instruction(jd, iptr);
1839  }
1840 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1841  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1842  emit_icmp_imm(cd, s1, iptr->sx.val.i);
1843  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IFEQ, BRANCH_OPT_NONE);
1844 #else
1845 # error Unable to generate code for this configuration!
1846 #endif
1847  break;
1848 
1849  case ICMD_IF_LEQ: /* ..., value ==> ... */
1850  case ICMD_IF_LNE:
1851  case ICMD_IF_LLT:
1852  case ICMD_IF_LGE:
1853  case ICMD_IF_LGT:
1854  case ICMD_IF_LLE:
1855 
1856  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1857 
1858  // Generate architecture specific instructions.
1859  codegen_emit_instruction(jd, iptr);
1860  break;
1861 
1862  case ICMD_IF_ACMPEQ: /* ..., value, value ==> ... */
1863  case ICMD_IF_ACMPNE: /* op1 = target JavaVM pc */
1864 
1865  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1866 
1867  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1868  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1869 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1870  switch (iptr->opc) {
1871  case ICMD_IF_ACMPEQ:
1872  emit_beq(cd, iptr->dst.block, s1, s2);
1873  break;
1874  case ICMD_IF_ACMPNE:
1875  emit_bne(cd, iptr->dst.block, s1, s2);
1876  break;
1877  default:
1878  break;
1879  }
1880 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1881  M_ACMP(s1, s2);
1882  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ACMPEQ, BRANCH_OPT_NONE);
1883 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1884  M_CMPEQ(s1, s2, REG_ITMP1);
1885  switch (iptr->opc) {
1886  case ICMD_IF_ACMPEQ:
1887  emit_bnez(cd, iptr->dst.block, REG_ITMP1);
1888  break;
1889  case ICMD_IF_ACMPNE:
1890  emit_beqz(cd, iptr->dst.block, REG_ITMP1);
1891  break;
1892  default:
1893  break;
1894  }
1895 #else
1896 # error Unable to generate code for this configuration!
1897 #endif
1898  break;
1899 
1900  case ICMD_IF_ICMPEQ: /* ..., value, value ==> ... */
1901  case ICMD_IF_ICMPNE: /* op1 = target JavaVM pc */
1902 
1903  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1904 
1905 #if SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
1906  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1907  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1908  switch (iptr->opc) {
1909  case ICMD_IF_ICMPEQ:
1910  emit_beq(cd, iptr->dst.block, s1, s2);
1911  break;
1912  case ICMD_IF_ICMPNE:
1913  emit_bne(cd, iptr->dst.block, s1, s2);
1914  break;
1915  }
1916  break;
1917 #else
1918  /* fall-through */
1919 #endif
1920 
1921  case ICMD_IF_ICMPLT: /* ..., value, value ==> ... */
1922  case ICMD_IF_ICMPGT: /* op1 = target JavaVM pc */
1923  case ICMD_IF_ICMPLE:
1924  case ICMD_IF_ICMPGE:
1925 
1926  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1927 
1928  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1929  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1930 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
1931 # if defined(__I386__) || defined(__X86_64__)
1932  // XXX Fix this soon!!!
1933  M_ICMP(s2, s1);
1934 # else
1935  M_ICMP(s1, s2);
1936 # endif
1937  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_ICMPEQ, BRANCH_OPT_NONE);
1938 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
1939  // Generate architecture specific instructions.
1940  codegen_emit_instruction(jd, iptr);
1941 #else
1942 # error Unable to generate code for this configuration!
1943 #endif
1944  break;
1945 
1946  case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1947  case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1948  case ICMD_IF_LCMPLT:
1949  case ICMD_IF_LCMPGT:
1950  case ICMD_IF_LCMPLE:
1951  case ICMD_IF_LCMPGE:
1952 
1953  assert(iptr->dst.block->type != basicblock::TYPE_EXH);
1954 
1955  // Generate architecture specific instructions.
1956  codegen_emit_instruction(jd, iptr);
1957  break;
1958 
1959  case ICMD_RETURN: /* ... ==> ... */
1960 
1961  REPLACEMENT_POINT_RETURN(cd, iptr);
1962  goto nowperformreturn;
1963 
1964  case ICMD_ARETURN: /* ..., retvalue ==> ... */
1965 
1966  REPLACEMENT_POINT_RETURN(cd, iptr);
1967  s1 = emit_load_s1(jd, iptr, REG_RESULT);
1968  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1969  emit_imove(cd, s1, REG_RESULT);
1970 
1971 #ifdef ENABLE_VERIFIER
1972  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1974  unresolved_class *uc = iptr->sx.s23.s2.uc;
1977  }
1978 #endif /* ENABLE_VERIFIER */
1979  goto nowperformreturn;
1980 
1981  case ICMD_IRETURN: /* ..., retvalue ==> ... */
1982 #if defined(ENABLE_SOFTFLOAT)
1983  case ICMD_FRETURN:
1984 #endif
1985 
1986  REPLACEMENT_POINT_RETURN(cd, iptr);
1987  s1 = emit_load_s1(jd, iptr, REG_RESULT);
1988  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
1989  emit_imove(cd, s1, REG_RESULT);
1990  goto nowperformreturn;
1991 
1992  case ICMD_LRETURN: /* ..., retvalue ==> ... */
1993 #if defined(ENABLE_SOFTFLOAT)
1994  case ICMD_DRETURN:
1995 #endif
1996 
1997  REPLACEMENT_POINT_RETURN(cd, iptr);
1998  s1 = emit_load_s1(jd, iptr, REG_LRESULT);
1999  // XXX Sparc64: Here this should be REG_RESULT_CALLEE!
2000  emit_lmove(cd, s1, REG_LRESULT);
2001  goto nowperformreturn;
2002 
2003 #if !defined(ENABLE_SOFTFLOAT)
2004  case ICMD_FRETURN: /* ..., retvalue ==> ... */
2005 
2006  REPLACEMENT_POINT_RETURN(cd, iptr);
2007  s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2008 #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2009  M_CAST_F2I(s1, REG_RESULT);
2010 #else
2011  emit_fmove(cd, s1, REG_FRESULT);
2012 #endif
2013  goto nowperformreturn;
2014 
2015  case ICMD_DRETURN: /* ..., retvalue ==> ... */
2016 
2017  REPLACEMENT_POINT_RETURN(cd, iptr);
2018  s1 = emit_load_s1(jd, iptr, REG_FRESULT);
2019 #if defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2020  M_CAST_D2L(s1, REG_LRESULT);
2021 #else
2022  emit_dmove(cd, s1, REG_FRESULT);
2023 #endif
2024  goto nowperformreturn;
2025 #endif
2026 
2027 nowperformreturn:
2028 #if !defined(NDEBUG)
2029  // Call trace function.
2032 #endif
2033 
2034  // Emit code to call monitorexit function.
2035  if (checksync && code_is_synchronized(code)) {
2036  emit_monitor_exit(jd, rd->memuse * 8);
2037  }
2038 
2039  // Generate method profiling code.
2041 
2042  // Emit code for the method epilog.
2043  codegen_emit_epilog(jd);
2044  ALIGNCODENOP;
2045  break;
2046 
2047  case ICMD_BUILTIN: /* ..., [arg1, [arg2 ...]] ==> ... */
2048 
2050 
2051  bte = iptr->sx.s23.s3.bte;
2052  md = bte->md;
2053 
2054 #if defined(ENABLE_ESCAPE_REASON) && defined(__I386__)
2055  if (bte->fp == BUILTIN_escape_reason_new) {
2056  void set_escape_reasons(void *);
2057  M_ASUB_IMM(8, REG_SP);
2058  M_MOV_IMM(iptr->escape_reasons, REG_ITMP1);
2059  M_AST(EDX, REG_SP, 4);
2060  M_AST(REG_ITMP1, REG_SP, 0);
2061  M_MOV_IMM(set_escape_reasons, REG_ITMP1);
2062  M_CALL(REG_ITMP1);
2063  M_ALD(EDX, REG_SP, 4);
2064  M_AADD_IMM(8, REG_SP);
2065  }
2066 #endif
2067 
2068  // Emit the fast-path if available.
2069  if (bte->emit_fastpath != NULL) {
2070  void (*emit_fastpath)(jitdata* jd, instruction* iptr, int d);
2071  emit_fastpath = (void (*)(jitdata* jd, instruction* iptr, int d)) bte->emit_fastpath;
2072 
2073  assert(md->returntype.type == TYPE_VOID);
2074  d = REG_ITMP1;
2075 
2076  // Actually call the fast-path emitter.
2077  emit_fastpath(jd, iptr, d);
2078 
2079  // If fast-path succeeded, jump to the end of the builtin
2080  // invocation.
2081  // XXX Actually the slow-path block below should be moved
2082  // out of the instruction stream and the jump below should be
2083  // inverted.
2084 #if SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2085  os::abort("codegen_emit: Implement jump over slow-path for this configuration.");
2086 #elif SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2087  M_TEST(d);
2088  emit_label_bne(cd, BRANCH_LABEL_10);
2089 #else
2090 # error Unable to generate code for this configuration!
2091 #endif
2092  }
2093 
2094  goto gen_method;
2095 
2096  case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
2097  case ICMD_INVOKESPECIAL:/* ..., objectref, [arg1, [arg2 ...]] ==> ... */
2098  case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
2099  case ICMD_INVOKEINTERFACE:
2100 
2101  REPLACEMENT_POINT_INVOKE(cd, iptr);
2102 
2103  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
2104  unresolved_method* um = iptr->sx.s23.s3.um;
2105  md = um->methodref->parseddesc.md;
2106  }
2107  else {
2108  methodinfo* lm = iptr->sx.s23.s3.fmiref->p.method;
2109  md = lm->parseddesc;
2110  }
2111 
2112 gen_method:
2113  i = md->paramcount;
2114 
2115  // XXX Check this again!
2116  MCODECHECK((i << 1) + 64); // PPC
2117 
2118  // Copy arguments to registers or stack location.
2119  for (i = i - 1; i >= 0; i--) {
2120  var = VAR(iptr->sx.s23.s2.args[i]);
2121  d = md->params[i].regoff;
2122 
2123  // Already pre-allocated?
2124  if (var->flags & PREALLOC)
2125  continue;
2126 
2127  if (!md->params[i].inmemory) {
2128  switch (var->type) {
2129  case TYPE_ADR:
2130  case TYPE_INT:
2131 #if defined(ENABLE_SOFTFLOAT)
2132  case TYPE_FLT:
2133 #endif
2134  s1 = emit_load(jd, iptr, var, d);
2135  emit_imove(cd, s1, d);
2136  break;
2137 
2138  case TYPE_LNG:
2139 #if defined(ENABLE_SOFTFLOAT)
2140  case TYPE_DBL:
2141 #endif
2142  s1 = emit_load(jd, iptr, var, d);
2143  emit_lmove(cd, s1, d);
2144  break;
2145 
2146 #if !defined(ENABLE_SOFTFLOAT)
2147  case TYPE_FLT:
2148 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2149  s1 = emit_load(jd, iptr, var, d);
2150  emit_fmove(cd, s1, d);
2151 #else
2152  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2153  M_CAST_F2I(s1, d);
2154 #endif
2155  break;
2156 
2157  case TYPE_DBL:
2158 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2159  s1 = emit_load(jd, iptr, var, d);
2160  emit_dmove(cd, s1, d);
2161 #else
2162  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2163  M_CAST_D2L(s1, d);
2164 #endif
2165  break;
2166 #endif
2167  default:
2168  assert(false);
2169  break;
2170  }
2171  }
2172  else {
2173  switch (var->type) {
2174  case TYPE_ADR:
2175  s1 = emit_load(jd, iptr, var, REG_ITMP1);
2176  // XXX Sparc64: Here this actually was:
2177  // M_STX(s1, REG_SP, JITSTACK + d);
2178  M_AST(s1, REG_SP, d);
2179  break;
2180 
2181  case TYPE_INT:
2182 #if defined(ENABLE_SOFTFLOAT)
2183  case TYPE_FLT:
2184 #endif
2185 #if SIZEOF_VOID_P == 4
2186  s1 = emit_load(jd, iptr, var, REG_ITMP1);
2187  M_IST(s1, REG_SP, d);
2188  break;
2189 #else
2190  /* fall-through */
2191 #endif
2192 
2193  case TYPE_LNG:
2194 #if defined(ENABLE_SOFTFLOAT)
2195  case TYPE_DBL:
2196 #endif
2197  s1 = emit_load(jd, iptr, var, REG_LTMP12);
2198  // XXX Sparc64: Here this actually was:
2199  // M_STX(s1, REG_SP, JITSTACK + d);
2200  M_LST(s1, REG_SP, d);
2201  break;
2202 
2203 #if !defined(ENABLE_SOFTFLOAT)
2204  case TYPE_FLT:
2205  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2206  M_FST(s1, REG_SP, d);
2207  break;
2208 
2209  case TYPE_DBL:
2210  s1 = emit_load(jd, iptr, var, REG_FTMP1);
2211  // XXX Sparc64: Here this actually was:
2212  // M_DST(s1, REG_SP, JITSTACK + d);
2213  M_DST(s1, REG_SP, d);
2214  break;
2215 #endif
2216  default:
2217  assert(false);
2218  break;
2219  }
2220  }
2221  }
2222 
2223  // Generate method profiling code.
2225 
2226  // Generate architecture specific instructions.
2227  codegen_emit_instruction(jd, iptr);
2228 
2229  // Generate method profiling code.
2231 
2232  // Store size of call code in replacement point.
2235 
2236  // Recompute the procedure vector (PV).
2237  emit_recompute_pv(cd);
2238 
2239  // Store return value.
2240 #if defined(ENABLE_SSA)
2241  if ((ls == NULL) /* || (!IS_TEMPVAR_INDEX(iptr->dst.varindex)) */ ||
2242  (ls->lifetime[iptr->dst.varindex].type != jitdata::UNUSED))
2243  /* a "living" stackslot */
2244 #endif
2245  switch (md->returntype.type) {
2246  case TYPE_INT:
2247  case TYPE_ADR:
2248 #if defined(ENABLE_SOFTFLOAT)
2249  case TYPE_FLT:
2250 #endif
2251  s1 = codegen_reg_of_dst(jd, iptr, REG_RESULT);
2252  // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2253  emit_imove(cd, REG_RESULT, s1);
2254  emit_store_dst(jd, iptr, s1);
2255  break;
2256 
2257  case TYPE_LNG:
2258 #if defined(ENABLE_SOFTFLOAT)
2259  case TYPE_DBL:
2260 #endif
2261  s1 = codegen_reg_of_dst(jd, iptr, REG_LRESULT);
2262  // XXX Sparc64: This should actually be REG_RESULT_CALLER, fix this!
2263  emit_lmove(cd, REG_LRESULT, s1);
2264  emit_store_dst(jd, iptr, s1);
2265  break;
2266 
2267 #if !defined(ENABLE_SOFTFLOAT)
2268  case TYPE_FLT:
2269 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2270  s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2271  emit_fmove(cd, REG_FRESULT, s1);
2272 #else
2273  s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2274  M_CAST_I2F(REG_RESULT, s1);
2275 #endif
2276  emit_store_dst(jd, iptr, s1);
2277  break;
2278 
2279  case TYPE_DBL:
2280 #if !defined(SUPPORT_PASS_FLOATARGS_IN_INTREGS)
2281  s1 = codegen_reg_of_dst(jd, iptr, REG_FRESULT);
2282  emit_dmove(cd, REG_FRESULT, s1);
2283 #else
2284  s1 = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
2285  M_CAST_L2D(REG_LRESULT, s1);
2286 #endif
2287  emit_store_dst(jd, iptr, s1);
2288  break;
2289 #endif
2290 
2291  case TYPE_VOID:
2292  break;
2293  default:
2294  assert(false);
2295  break;
2296  }
2297 
2298  // If we are emitting a fast-path block, this is the label for
2299  // successful fast-path execution.
2300  if ((iptr->opc == ICMD_BUILTIN) && (bte->emit_fastpath != NULL)) {
2302  }
2303 
2304  break;
2305 
2306  case ICMD_TABLESWITCH: /* ..., index ==> ... */
2307 
2308  // Generate architecture specific instructions.
2309  codegen_emit_instruction(jd, iptr);
2310  break;
2311 
2312  case ICMD_LOOKUPSWITCH: /* ..., key ==> ... */
2313 
2314  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
2315  i = iptr->sx.s23.s2.lookupcount;
2316 
2317  // XXX Again we need to check this
2318  MCODECHECK((i<<2)+8); // Alpha, ARM, i386, MIPS, Sparc64
2319  MCODECHECK((i<<3)+8); // PPC64
2320  MCODECHECK(8 + ((7 + 6) * i) + 5); // X86_64, S390
2321 
2322  // Compare keys.
2323  for (lookup_target_t* lookup = iptr->dst.lookup; i > 0; ++lookup, --i) {
2324 #if SUPPORT_BRANCH_CONDITIONAL_CONDITION_REGISTER
2325  emit_icmp_imm(cd, s1, lookup->value);
2326  emit_beq(cd, lookup->target.block);
2327 #elif SUPPORT_BRANCH_CONDITIONAL_TWO_INTEGER_REGISTERS
2328  ICONST(REG_ITMP2, lookup->value);
2329  emit_beq(cd, lookup->target.block, s1, REG_ITMP2);
2330 #elif SUPPORT_BRANCH_CONDITIONAL_ONE_INTEGER_REGISTER
2331  emit_icmpeq_imm(cd, s1, lookup->value, REG_ITMP2);
2332  emit_bnez(cd, lookup->target.block, REG_ITMP2);
2333 #else
2334 # error Unable to generate code for this configuration!
2335 #endif
2336  }
2337 
2338  // Default branch.
2339  emit_br(cd, iptr->sx.s23.s3.lookupdefault.block);
2340  ALIGNCODENOP;
2341  break;
2342 
2343  case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
2344  case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
2345  case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
2346 
2347  // Generate architecture specific instructions.
2348  codegen_emit_instruction(jd, iptr);
2349  break;
2350 
2351  default:
2352  exceptions_throw_internalerror("Unknown ICMD %d during code generation",
2353  iptr->opc);
2354  return false;
2355 
2356  } // the big switch
2357 
2358  } // for all instructions
2359 
2360 #if defined(ENABLE_SSA)
2361  // By edge splitting, in blocks with phi moves there can only
2362  // be a goto as last command, no other jump/branch command.
2363  if (ls != NULL) {
2364  if (!last_cmd_was_goto)
2365  codegen_emit_phi_moves(jd, bptr);
2366  }
2367 #endif
2368 
2369 #if defined(__I386__) || defined(__MIPS__) || defined(__S390__) || defined(__SPARC_64__) || defined(__X86_64__)
2370  // XXX Again!!!
2371  /* XXX require a lower number? */
2372  MCODECHECK(64); // I386, MIPS, Sparc64
2373  MCODECHECK(512); // S390, X86_64
2374 
2375  /* XXX We can remove that when we don't use UD2 anymore on i386
2376  and x86_64. */
2377 
2378  /* At the end of a basic block we may have to append some nops,
2379  because the patcher stub calling code might be longer than the
2380  actual instruction. So codepatching does not change the
2381  following block unintentionally. */
2382 
2383  if (cd->mcodeptr < cd->lastmcodeptr) {
2384  while (cd->mcodeptr < cd->lastmcodeptr) {
2385  M_NOP;
2386  }
2387  }
2388 #endif
2389 
2390  if (bptr->next && bptr->next->type == basicblock::TYPE_EXH)
2391  fixup_exc_handler_interface(jd, bptr->next);
2392 
2393  } // for all basic blocks
2394 
2395  // Generate traps.
2396  emit_patcher_traps(jd);
2397 
2398  // Everything's ok.
2399  return true;
2400 }
2401 
2402 
2403 /* codegen_emit_phi_moves ****************************************************
2404 
2405  Emits phi moves at the end of the basicblock.
2406 
2407 *******************************************************************************/
2408 
2409 #if defined(ENABLE_SSA)
2410 void codegen_emit_phi_moves(jitdata *jd, basicblock *bptr)
2411 {
2412  int lt_d,lt_s,i;
2413  lsradata *ls;
2414  codegendata *cd;
2415  varinfo *s, *d;
2416  instruction tmp_i;
2417 
2418  cd = jd->cd;
2419  ls = jd->ls;
2420 
2421  MCODECHECK(512);
2422 
2423  /* Moves from phi functions with highest indices have to be */
2424  /* inserted first, since this is the order as is used for */
2425  /* conflict resolution */
2426 
2427  for(i = ls->num_phi_moves[bptr->nr] - 1; i >= 0 ; i--) {
2428  lt_d = ls->phi_moves[bptr->nr][i][0];
2429  lt_s = ls->phi_moves[bptr->nr][i][1];
2430 #if defined(SSA_DEBUG_VERBOSE)
2431  if (compileverbose)
2432  printf("BB %3i Move %3i <- %3i ", bptr->nr, lt_d, lt_s);
2433 #endif
2434  if (lt_s == jitdata::UNUSED) {
2435 #if defined(SSA_DEBUG_VERBOSE)
2436  if (compileverbose)
2437  printf(" ... not processed \n");
2438 #endif
2439  continue;
2440  }
2441 
2442  d = VAR(ls->lifetime[lt_d].v_index);
2443  s = VAR(ls->lifetime[lt_s].v_index);
2444 
2445 
2446  if (d->type == Type(-1)) {
2447 #if defined(SSA_DEBUG_VERBOSE)
2448  if (compileverbose)
2449  printf("...returning - phi lifetimes where joined\n");
2450 #endif
2451  continue;
2452  }
2453 
2454  if (s->type == Type(-1)) {
2455 #if defined(SSA_DEBUG_VERBOSE)
2456  if (compileverbose)
2457  printf("...returning - phi lifetimes where joined\n");
2458 #endif
2459  continue;
2460  }
2461 
2462  tmp_i.opc = ICMD_NOP;
2463  tmp_i.s1.varindex = ls->lifetime[lt_s].v_index;
2464  tmp_i.dst.varindex = ls->lifetime[lt_d].v_index;
2465  emit_copy(jd, &tmp_i);
2466 
2467 #if defined(SSA_DEBUG_VERBOSE)
2468  if (compileverbose) {
2469  if (IS_INMEMORY(d->flags) && IS_INMEMORY(s->flags)) {
2470  /* mem -> mem */
2471  printf("M%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2472  }
2473  else if (IS_INMEMORY(s->flags)) {
2474  /* mem -> reg */
2475  printf("R%3i <- M%3i",d->vv.regoff,s->vv.regoff);
2476  }
2477  else if (IS_INMEMORY(d->flags)) {
2478  /* reg -> mem */
2479  printf("M%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2480  }
2481  else {
2482  /* reg -> reg */
2483  printf("R%3i <- R%3i",d->vv.regoff,s->vv.regoff);
2484  }
2485  printf("\n");
2486  }
2487 #endif /* defined(SSA_DEBUG_VERBOSE) */
2488  }
2489 }
2490 #endif /* defined(ENABLE_SSA) */
2491 
2492 
2493 /*
2494  * These are local overrides for various environment variables in Emacs.
2495  * Please do not remove this and leave it at the end of the file, where
2496  * Emacs will automagically detect them.
2497  * ---------------------------------------------------------------------
2498  * Local variables:
2499  * mode: c++
2500  * indent-tabs-mode: t
2501  * c-basic-offset: 4
2502  * tab-width: 4
2503  * End:
2504  * vim:noexpandtab:sw=4:ts=4:
2505  */
void codegen_emit_instruction(jitdata *jd, instruction *iptr)
Generates machine code for one ICMD.
Definition: codegen.cpp:217
#define REG_SP
Definition: md-abi.hpp:53
#define CODEGENDATA_FLAG_ERROR
#define pv
Definition: md-asm.hpp:65
union varinfo::@19 vv
basicblock * target
s4 emit_load_s1(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:63
static void codegen_reset(jitdata *jd)
#define M_ALD(a, b, disp)
Definition: codegen.hpp:345
void localref_native_exit(methodinfo *m, uint64_t *return_regs)
#define STATISTICS(x)
Wrapper for statistics only code.
Definition: statistics.hpp:975
DumpList< Linenumber > * linenumbers
List of line numbers.
#define REPLACEMENT_POINT_FORGC_BUILTIN(cd, iptr)
Definition: replace.hpp:59
#define REPLACEMENT_POINTS_INIT(cd, jd)
Definition: replace.hpp:51
#define BRANCH_OPT_NONE
basicblock * basicblocks
Definition: jit.hpp:141
#define STACKFRAME_SYNC_NEEDS_TWO_SLOTS
Definition: arch.hpp:106
void codegen_close(void)
Definition: jit.hpp:126
void emit_monitor_exit(jitdata *jd, int32_t syncslot_offset)
Generates synchronization code to leave a monitor.
Definition: emit.cpp:565
#define M_LST(a, b, disp)
Definition: codegen.hpp:349
Definition: stack.hpp:46
paramdesc * params
Definition: descriptor.hpp:164
#define STAT_REGISTER_SUM_SUBGROUP(var, name, description, group)
Register a statistics summary group.
Definition: statistics.hpp:973
void linenumbertable_list_entry_add(codegendata *cd, int32_t linenumber)
#define JITDATA_HAS_FLAG_VERBOSECALL(jd)
Definition: jit.hpp:227
methoddesc * md
Definition: builtin.hpp:71
int *** phi_moves
Definition: lsra.hpp:197
#define M_ILD(a, b, disp)
Definition: codegen.hpp:347
methodinfo * code_get_methodinfo_for_pv(void *pv)
Definition: code.cpp:150
s4 dseg_add_unique_address(codegendata *cd, void *value)
Definition: dseg.cpp:525
#define M_IST(a, b, disp)
Definition: codegen.hpp:351
Linenumber table of a Java method.
void dynamic_super_rewrite(codegendata *cd)
#define ICONST(d, c)
Definition: codegen.hpp:53
#define DMREALLOC(ptr, type, num1, num2)
Definition: dumpmemory.hpp:372
uint8_t savedfltcount
Definition: code.hpp:91
#define PATCHER_CALL_SIZE
Definition: codegen.hpp:68
void emit_monitor_enter(jitdata *jd, int32_t syncslot_offset)
Generates synchronization code to enter a monitor.
Definition: emit.cpp:507
s4 * invars
Definition: jit.hpp:323
void log_message_method(const char *msg, methodinfo *m)
Definition: logging.cpp:275
basicblock * next
Definition: jit.hpp:337
#define REPLACEMENT_POINT_INLINE_START(cd, iptr)
Definition: replace.hpp:54
int32_t stackframesize
Definition: code.hpp:88
jumpref * next
#define M_AADD_IMM(a, b, c)
Definition: codegen.hpp:277
#define M_CAST_L2D(a, Fb)
Definition: codegen.hpp:763
static void fixup_exc_handler_interface(jitdata *jd, basicblock *bptr)
Fix up register locations in the case where control is transferred to an exception handler block via ...
s4 dseg_add_address(codegendata *cd, void *value)
Definition: dseg.cpp:542
codeinfo * code
Definition: jit.hpp:128
int opt_TraceJavaCalls
Definition: options.cpp:213
#define REG_FRESULT
Definition: md-abi.hpp:59
s4 codegen_reg_of_var(u2 opcode, varinfo *v, s4 tempregnum)
#define ALIGN_EVEN(a)
Definition: global.hpp:69
void emit_bcc(codegendata *cd, basicblock *target, s4 condition, u4 options)
void localref_table_remove()
Definition: localref.cpp:168
int v_index
Definition: lsra.hpp:97
bool opt_intrp
Definition: options.cpp:55
#define M_CMPEQ(a, b, c)
Definition: codegen.hpp:280
s4 mpc
Definition: jit.hpp:345
codegendata * cd
Definition: jit.hpp:129
int32_t varindex
Definition: instruction.hpp:63
#define JITDATA_HAS_FLAG_INSTRUMENT(jd)
Definition: jit.hpp:206
#define PATCHER_resolve_class
#define BRANCH_LABEL_10
Definition: emit-common.hpp:56
#define M_ACMP(a, b)
Definition: codegen.hpp:360
static void emit_lmove(codegendata *cd, int s, int d)
Generates a long-move from register s to d.
#define M_CAST_D2L(Fa, b)
Definition: codegen.hpp:780
typedef void(JNICALL *jvmtiEventSingleStep)(jvmtiEnv *jvmti_env
#define REG_ITMP1_XPTR
Definition: md-abi.hpp:50
struct lifetime * lifetime
Definition: lsra.hpp:168
methoddesc * md
Definition: references.hpp:75
static void usage()
Definition: VMjdwp.cpp:377
#define REPLACEMENT_POINT_BLOCK_START(cd, bptr)
Definition: replace.hpp:53
int savintreguse
Definition: reg.hpp:88
#define M_FST(a, b, disp)
Definition: codegen.hpp:357
void localref_native_enter(methodinfo *m, uint64_t *argument_regs, uint64_t *argument_stack)
Definition: localref.cpp:439
patchref_t * patcher_add_patch_ref(jitdata *jd, functionptr patcher, void *ref, s4 disp)
#define BBFLAG_REPLACEMENT
Definition: jit.hpp:285
void patcher_resolve(codeinfo *code)
Resolve all patchers in the current JIT run.
void codegen_init(void)
#define M_FLD(a, b, disp)
Definition: codegen.hpp:354
uint8_t u1
Definition: types.hpp:40
void trace_java_call_exit(methodinfo *m, uint64_t *return_regs)
Definition: trace.cpp:240
void codegen_increase(codegendata *cd)
Type type
Definition: reg.hpp:44
#define M_TEST(a)
Definition: codegen.hpp:359
#define PATCHER_breakpoint
s4 mcodelength
Definition: code.hpp:85
java_object_t * codegen_finish_native_call(u1 *sp, u1 *pv)
branchref * branchrefs
Definition: jit.hpp:335
#define CODEGENDATA_HAS_FLAG_LONGBRANCHES(cd)
#define CODEGENDATA_FLAG_LONGBRANCHES
JNIEnv jthread jobject jclass jlong size
Definition: jvmti.h:387
#define VAR(i)
Definition: jit.hpp:252
Definition: reg.hpp:43
void emit_recompute_pv(codegendata *cd)
Emit code to recompute the procedure vector.
Definition: emit.cpp:495
#define PATCHER_get_putstatic
static int code_is_leafmethod(codeinfo *code)
Definition: code.hpp:151
void md_cacheflush(u1 *addr, s4 nbytes)
Definition: md.c:49
#define ALIGNCODENOP
Definition: codegen.hpp:47
s4 regoff
Definition: reg.hpp:47
void(* functionptr)(void)
Definition: global.hpp:39
methodinfo * m
Definition: code.hpp:75
void emit_verbosecall_enter(jitdata *jd)
Definition: emit.cpp:625
#define INT_SAV_CNT
Definition: md-abi.hpp:73
java_handle_t * codegen_start_native_call(u1 *sp, u1 *pv)
int * num_phi_moves
Definition: lsra.hpp:196
int opt_TraceBuiltinCalls
Definition: options.cpp:206
#define CODEGENDATA_HAS_FLAG_ERROR(cd)
#define STACKFRAME_RA_BETWEEN_FRAMES
Definition: arch.hpp:101
#define LLNI_classinfo_wrap(classinfo)
Definition: llni.hpp:110
#define M_MOV_IMM(d, i)
Definition: codegen.hpp:448
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
u1 * mcode
Definition: code.hpp:83
void trace_java_call_enter(methodinfo *m, uint64_t *arg_regs, uint64_t *stack)
Definition: trace.cpp:149
void codegen_resolve_branchrefs(codegendata *cd, basicblock *bptr)
#define MAX_ALIGN
Definition: global.hpp:102
#define PROFILE_CYCLE_STOP
Definition: profile.hpp:58
#define LA_SIZE
Definition: md-abi.hpp:93
void emit_patcher_traps(jitdata *jd)
dst_operand_t dst
#define REG_LRESULT
#define PA_SIZE
Definition: md-abi.hpp:147
bool compileverbose
Definition: options.cpp:82
uint16_t u2
Definition: types.hpp:43
constant_FMIref * fieldref
Definition: resolve.hpp:88
void localref_frame_pop_all(void)
Definition: localref.cpp:254
classinfo * clazz
Definition: method.hpp:80
This file contains the statistics framework.
jumpref * jumpreferences
uint64_t u8
Definition: types.hpp:49
s4 emit_load_s2(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:82
bool checksync
Definition: options.cpp:90
static codeinfo * code_get_codeinfo_for_pv(void *pv)
Definition: code.hpp:201
#define STAT_REGISTER_GROUP_VAR(type, var, init, name, description, group)
Register an statistics variable and add it to a group.
Definition: statistics.hpp:967
imm_union * value
Definition: field.hpp:67
static void * reallocate(void *src, size_t len1, size_t len2)
Stupid realloc implementation for dump memory.
Definition: dumpmemory.cpp:57
Type
Types used internally by JITTED code.
Definition: global.hpp:117
void exceptions_throw_internalerror(const char *message,...)
Definition: exceptions.cpp:805
void linenumbertable_list_entry_add_inline_start(codegendata *cd, instruction *iptr)
s4 indepth
Definition: jit.hpp:325
void dseg_finish(jitdata *jd)
Definition: dseg.cpp:46
void patcher_list_reset(codeinfo *code)
#define M_ASUB_IMM(a, b, c)
Definition: codegen.hpp:278
typedesc * fd
Definition: references.hpp:74
#define exceptions_get_and_clear_exception
Definition: md-asm.hpp:98
#define REG_FTMP2
Definition: md-abi.hpp:66
#define STAT_REGISTER_GROUP(var, name, description)
Register a statistics group.
Definition: statistics.hpp:971
#define REPLACEMENT_POINT_FORGC_BUILTIN_RETURN(cd, iptr)
Definition: replace.hpp:60
MIIterator i
s4 emit_load(jitdata *jd, instruction *iptr, varinfo *src, s4 tempreg)
Definition: emit.cpp:66
typedesc returntype
Definition: descriptor.hpp:166
basicblock ** basicblocks
Definition: lsra.hpp:189
int32_t s4
Definition: types.hpp:45
void stacktrace_stackframeinfo_add(stackframeinfo_t *sfi, void *pv, void *sp, void *ra, void *xpc)
Definition: stacktrace.cpp:84
s4 dseg_add_unique_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:229
void emit_store(jitdata *jd, instruction *iptr, varinfo *dst, s4 d)
Definition: emit.cpp:113
DumpList< branch_label_ref_t * > * brancheslabel
classinfo * clazz
Definition: field.hpp:55
registerdata * rd
Definition: jit.hpp:130
void codegen_branch_label_add(codegendata *cd, s4 label, s4 condition, s4 reg, u4 options)
#define M_AST(a, b, disp)
Definition: codegen.hpp:350
struct localref_table localref_table
Definition: localref.hpp:31
static void abort()
Definition: os.hpp:196
#define MCODEINITSIZE
int type
Definition: lsra.hpp:98
int savfltreguse
Definition: reg.hpp:91
#define REPLACEMENT_POINT_INLINE_BODY(cd, iptr)
Definition: replace.hpp:55
int32_t varindex
#define M_CAST_I2F(a, Fb)
Definition: codegen.hpp:755
#define LLNI_UNWRAP(hdl)
Definition: llni.hpp:52
#define REG_LTMP23
bool inmemory
Definition: descriptor.hpp:151
#define REPLACEMENT_POINT_RETURN(cd, iptr)
Definition: replace.hpp:56
#define REPLACEMENT_POINT_INVOKE(cd, iptr)
Definition: replace.hpp:57
#define REG_ITMP2
Definition: md-abi.hpp:47
void emit_icmp_imm(codegendata *cd, int reg, int32_t value)
Emits code comparing a single register.
Definition: emit.cpp:243
MIIterator e
void emit_store_dst(jitdata *jd, instruction *iptr, s4 d)
functionptr emit_fastpath
Definition: builtin.hpp:72
void emit_copy(jitdata *jd, instruction *iptr)
Definition: emit.cpp:153
s1_operand_t s1
uint32_t u4
Definition: types.hpp:46
static void generate()
Definition: stubs.cpp:262
#define EDX
Definition: arch.hpp:53
void codegen_setup(jitdata *jd)
#define FLT_SAV_CNT
Definition: md-abi.hpp:80
void exceptiontable_create(jitdata *jd)
#define M_ICMP(a, b)
Definition: codegen.hpp:361
constant_FMIref * methodref
Definition: resolve.hpp:97
methoddesc * parseddesc
Definition: method.hpp:78
#define sp
Definition: md-asm.hpp:81
void emit_icmpeq_imm(codegendata *cd, int reg, int32_t value, int d)
Emits code comparing one integer register to an immediate value.
Definition: emit.cpp:253
int memuse
Definition: reg.hpp:84
#define REG_FTMP1
Definition: md-abi.hpp:65
#define VAROP(v)
Definition: jit.hpp:251
#define ALIGN_ODD(a)
Definition: global.hpp:70
Definition: builtin.hpp:60
int32_t synchronizedoffset
Definition: code.hpp:89
methodinfo * m
Definition: jit.hpp:127
void stacktrace_stackframeinfo_remove(stackframeinfo_t *sfi)
Definition: stacktrace.cpp:204
static bool IS_INMEMORY(s4 flags)
Definition: stack.hpp:51
s4 type
Definition: field.hpp:60
void codegen_emit_epilog(jitdata *jd)
Generates machine code for the method epilog.
Definition: codegen.cpp:174
LinenumberTable * linenumbertable
Definition: code.hpp:94
s4 flags
Definition: reg.hpp:45
#define REPLACEMENT_POINT_INVOKE_RETURN(cd, iptr)
Definition: replace.hpp:58
#define STAT_DECLARE_GROUP(var)
Declare an external group (or subgroup).
Definition: statistics.hpp:970
void codegen_add_branch_ref(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
#define M_ALD_DSEG(a, disp)
Definition: codegen.hpp:346
static void * allocate(size_t size)
Definition: dumpmemory.hpp:251
#define MEMORY_ALIGN(pos, size)
Definition: memory.hpp:37
s4 nr
Definition: jit.hpp:312
int8_t s1
Definition: types.hpp:39
#define LCONST(d, c)
Definition: codegen.hpp:54
void methodtree_insert(void *startpc, void *endpc)
Definition: methodtree.cpp:148
static bool class_is_or_almost_initialized(classinfo *c)
Definition: class.hpp:433
static int code_is_synchronized(codeinfo *code)
Definition: code.hpp:173
int16_t s2
Definition: types.hpp:42
#define PATCHER_initialize_class
#define M_DST(a, b, disp)
Definition: codegen.hpp:356
#define INSTRUCTION_IS_UNRESOLVED(iptr)
#define M_CAST_F2I(Fa, b)
Definition: codegen.hpp:772
void codegen_emit_prolog(jitdata *jd)
Generates machine code for the method prolog.
Definition: codegen.cpp:73
#define M_LLD(a, b, disp)
Definition: codegen.hpp:344
const parseddesc_t parseddesc
Definition: references.hpp:105
static void emit_fmove(codegendata *cd, int s, int d)
Generates a float-move from register s to d.
BeginInst * target
#define MCODECHECK(icnt)
Definition: codegen.hpp:40
branchref * next
functionptr fp
Definition: builtin.hpp:63
bool codegen_generate(jitdata *jd)
#define REG_LTMP12
#define MCOPY(dest, src, type, num)
Definition: memory.hpp:103
void emit_label(codegendata *cd, s4 label)
void linenumbertable_list_entry_add_inline_end(codegendata *cd, instruction *iptr)
s4 flags
Definition: method.hpp:70
void emit_bccz(codegendata *cd, basicblock *target, s4 condition, s4 reg, u4 options)
uintptr_t ptrint
Definition: types.hpp:54
#define M_NOP
Definition: codegen.hpp:338
static void emit_dmove(codegendata *cd, int s, int d)
Generates an double-move from register s to d.
static void emit_imove(codegendata *cd, int s, int d)
Generates an integer-move from register s to d.
void localref_table_add(localref_table *lrt)
Definition: localref.cpp:142
#define M_DLD(a, b, disp)
Definition: codegen.hpp:353
#define M_CALL(a)
Definition: codegen.hpp:290
#define STAT_REGISTER_VAR(type, var, init, name, description)
Register an external statistics variable.
Definition: statistics.hpp:966
void emit_verbosecall_exit(jitdata *jd)
Definition: emit.cpp:766
uint32_t regoff
Definition: descriptor.hpp:153
void emit_br(codegendata *cd, basicblock *target)
dsegentry * dseg
void emit_trap_countdown(codegendata *cd, s4 *counter)
Definition: emit.cpp:565
const char const void jint length
Definition: jvmti.h:352
bool opt_AlwaysEmitLongBranches
Definition: options.cpp:163
#define PATCHER_NOPS
Definition: codegen.hpp:70
#define OFFSET(s, el)
Definition: memory.hpp:90
#define printf(...)
Definition: ssa2.cpp:40
void codegen_emit_patchable_barrier(instruction *iptr, codegendata *cd, patchref_t *pr, fieldinfo *fi)
Generates a memory barrier to be used after volatile writes.
Definition: codegen.cpp:197
uint8_t savedintcount
Definition: code.hpp:90
#define REG_RESULT
Definition: md-abi.hpp:33
#define LA_SIZE_IN_POINTERS
Definition: md-abi.hpp:95
bool codegen_emit(jitdata *jd)
Generates machine code.
void emit_nullpointer_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:431
static int32_t md_stacktrace_get_framesize(codeinfo *code)
Returns the size (in bytes) of the current stackframe, specified by the passed codeinfo structure...
Definition: md.hpp:56
#define REG_ITMP1
Definition: md-abi.hpp:46
u1 * entrypoint
Definition: code.hpp:84
#define PROFILE_CYCLE_START
Definition: profile.hpp:57
void codegen_finish(jitdata *jd)
#define NCODEINITSIZE
#define CNEW(type, num)
Definition: codememory.hpp:34