CACAO
codegen.cpp
Go to the documentation of this file.
1 /* src/vm/jit/aarch64/codegen.cpp - machine code generator for Aarch64
2 
3  Copyright (C) 1996-2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
25 
26 #include "config.h"
27 
28 #include <cassert>
29 #include <cstdio>
30 #include <cmath>
31 
32 #include "vm/types.hpp"
33 
34 #include "md.hpp"
35 #include "md-abi.hpp"
36 
37 #include "vm/jit/aarch64/arch.hpp"
39 
40 #include "mm/memory.hpp"
41 
42 #include "native/localref.hpp"
43 #include "native/native.hpp"
44 
45 #include "threads/lock.hpp"
46 
47 #include "vm/descriptor.hpp"
48 #include "vm/exceptions.hpp"
49 #include "vm/field.hpp"
50 #include "vm/global.hpp"
51 #include "vm/loader.hpp"
52 #include "vm/options.hpp"
53 #include "vm/vm.hpp"
54 
55 #include "vm/jit/abi.hpp"
56 #include "vm/jit/asmpart.hpp"
57 #include "vm/jit/builtin.hpp"
59 #include "vm/jit/dseg.hpp"
60 #include "vm/jit/emit-common.hpp"
61 #include "vm/jit/jit.hpp"
63 #include "vm/jit/parse.hpp"
65 #include "vm/jit/reg.hpp"
66 #include "vm/jit/stacktrace.hpp"
67 #include "vm/jit/trap.hpp"
68 
69 
70 /**
71  * Generates machine code for the method prolog.
72  */
74 {
75  varinfo* var;
76  methoddesc* md;
77  int32_t s1;
78  int32_t p, t, l;
79  int32_t varindex;
80  int i;
81 
82  // Get required compiler data.
83  methodinfo* m = jd->m;
84  codeinfo* code = jd->code;
85  codegendata* cd = jd->cd;
86  registerdata* rd = jd->rd;
87  AsmEmitter asme(cd);
88 
89  /* create stack frame (if necessary) */
90  /* NOTE: SP on aarch64 has to be quad word aligned */
91  if (cd->stackframesize) {
92  int offset = cd->stackframesize * 8;
93  offset += (offset % 16);
94  asme.lda(REG_SP, REG_SP, -offset);
95  }
96 
97  /* save return address and used callee saved registers */
98 
99  p = cd->stackframesize;
100  if (!code_is_leafmethod(code)) {
101  p--; asme.lst(REG_RA, REG_SP, p * 8);
102  }
103  for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
104  p--; asme.lst(rd->savintregs[i], REG_SP, p * 8);
105  }
106  for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
107  p--; asme.dst(rd->savfltregs[i], REG_SP, p * 8);
108  }
109 
110  /* take arguments out of register or stack frame */
111 
112  md = m->parseddesc;
113 
114  for (p = 0, l = 0; p < md->paramcount; p++) {
115  t = md->paramtypes[p].type;
116 
117  varindex = jd->local_map[l * 5 + t];
118 
119  l++;
120  if (IS_2_WORD_TYPE(t)) /* increment local counter for 2 word types */
121  l++;
122 
123  if (varindex == jitdata::UNUSED)
124  continue;
125 
126  var = VAR(varindex);
127 
128  s1 = md->params[p].regoff;
129 
130  if (IS_INT_LNG_TYPE(t)) { /* integer args */
131  if (!md->params[p].inmemory) { /* register arguments */
132  if (!IS_INMEMORY(var->flags))
133  asme.mov(var->vv.regoff, s1);
134  else
135  asme.lst(s1, REG_SP, var->vv.regoff);
136  }
137  else { /* stack arguments */
138  if (!IS_INMEMORY(var->flags))
139  asme.lld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
140  else
141  var->vv.regoff = cd->stackframesize * 8 + s1;
142  }
143  }
144  else { /* floating args */
145  if (!md->params[p].inmemory) { /* register arguments */
146  if (!IS_INMEMORY(var->flags))
147  if (IS_2_WORD_TYPE(t))
148  asme.dmov(var->vv.regoff, s1);
149  else
150  asme.fmov(var->vv.regoff, s1);
151  else
152  if (IS_2_WORD_TYPE(t))
153  asme.dst(s1, REG_SP, var->vv.regoff);
154  else
155  asme.fst(s1, REG_SP, var->vv.regoff);
156  }
157  else { /* stack arguments */
158  if (!(var->flags & INMEMORY))
159  if (IS_2_WORD_TYPE(t))
160  asme.dld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
161  else
162  asme.fld(var->vv.regoff, REG_SP, cd->stackframesize * 8 + s1);
163  else
164  var->vv.regoff = cd->stackframesize * 8 + s1;
165  }
166  }
167  }
168 }
169 
170 
171 /**
172  * Generates machine code for the method epilog.
173  */
175 {
176  int32_t p;
177  int i;
178 
179  // Get required compiler data.
180  codeinfo* code = jd->code;
181  codegendata* cd = jd->cd;
182  registerdata* rd = jd->rd;
183  AsmEmitter asme(cd);
184 
185  p = cd->stackframesize;
186 
187  /* restore return address */
188 
189  if (!code_is_leafmethod(code)) {
190  p--; asme.lld(REG_RA, REG_SP, p * 8);
191  }
192 
193  /* restore saved registers */
194 
195  for (i = INT_SAV_CNT - 1; i >= rd->savintreguse; i--) {
196  p--; asme.lld(rd->savintregs[i], REG_SP, p * 8);
197  }
198  for (i = FLT_SAV_CNT - 1; i >= rd->savfltreguse; i--) {
199  p--; asme.dld(rd->savfltregs[i], REG_SP, p * 8);
200  }
201 
202  /* deallocate stack */
203 
204  if (cd->stackframesize) {
205  int offset = cd->stackframesize * 8;
206  offset += (offset % 16);
207  asme.lda(REG_SP, REG_SP, offset);
208  }
209 
210  asme.ret();
211 }
212 
213 
214 /**
215  * Generates machine code for one ICMD.
216  */
218 {
219  varinfo* var;
220  builtintable_entry* bte;
221  methodinfo* lm; // Local methodinfo for ICMD_INVOKE*.
222  unresolved_method* um;
223  fieldinfo* fi;
224  unresolved_field* uf;
225  int32_t fieldtype;
226  int32_t s1, s2, s3, d = 0;
227  int32_t disp;
228 
229  // Get required compiler data.
230  codegendata* cd = jd->cd;
231 
232  AsmEmitter asme(cd);
233 
234  switch (iptr->opc) {
235 
236  /* constant operations ************************************************/
237 
238  case ICMD_ACONST: /* ... ==> ..., constant */
239  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
240 
241  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
242  constant_classref *cr = iptr->sx.val.c.ref;
243 
244  disp = dseg_add_unique_address(cd, cr);
245 
246  /* XXX Only add the patcher, if this position needs to
247  be patched. If there was a previous position which
248  resolved the same class, the returned displacement
249  of dseg_add_address is ok to use. */
250 
252  cr, disp);
253 
254  asme.ald(d, REG_PV, disp);
255  }
256  else {
257  if (iptr->sx.val.anyptr == NULL) {
258  asme.lconst(d, 0);
259  }
260  else {
261  disp = dseg_add_address(cd, iptr->sx.val.anyptr);
262  asme.ald(d, REG_PV, disp);
263  }
264  }
265  emit_store_dst(jd, iptr, d);
266  break;
267 
268  case ICMD_FCONST: /* ... ==> ..., constant */
269 
270  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
271  disp = dseg_add_float(cd, iptr->sx.val.f);
272  asme.fld(d, REG_PV, disp);
273  emit_store_dst(jd, iptr, d);
274  break;
275 
276  case ICMD_DCONST: /* ... ==> ..., constant */
277 
278  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
279  disp = dseg_add_double(cd, iptr->sx.val.d);
280  asme.dld(d, REG_PV, disp);
281  emit_store_dst(jd, iptr, d);
282  break;
283 
284  /* integer operations *************************************************/
285 
286  case ICMD_INEG: /* ..., value ==> ..., - value */
287 
288  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
289  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
290  asme.isub(d, REG_ZERO, s1);
291  emit_store_dst(jd, iptr, d);
292  break;
293 
294  case ICMD_LNEG: /* ..., value ==> ..., - value */
295 
296  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
297  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
298  asme.lsub(d, REG_ZERO, s1);
299  emit_store_dst(jd, iptr, d);
300  break;
301 
302  case ICMD_I2L: /* ..., value ==> ..., value */
303 
304  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
305  d = codegen_reg_of_dst(jd, iptr, REG_ITMP1);
306  asme.sxtw(d, s1);
307  emit_store_dst(jd, iptr, d);
308  break;
309 
310  case ICMD_L2I: /* ..., value ==> ..., value */
311 
312  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
313  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
314  asme.ubfx(d, s1);
315  emit_store_dst(jd, iptr, d);
316  break;
317 
318  case ICMD_INT2BYTE: /* ..., value ==> ..., value */
319 
320  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
321  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
322  asme.sxtb(d, s1);
323  emit_store_dst(jd, iptr, d);
324  break;
325 
326  case ICMD_INT2CHAR: /* ..., value ==> ..., value */
327 
328  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
329  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
330  asme.uxth(d, s1);
331  emit_store_dst(jd, iptr, d);
332  break;
333 
334  case ICMD_INT2SHORT: /* ..., value ==> ..., value */
335 
336  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
337  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
338  asme.sxth(d, s1);
339  emit_store_dst(jd, iptr, d);
340  break;
341 
342  case ICMD_IADD: /* ..., val1, val2 ==> ..., val1 + val2 */
343 
344  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
345  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
346  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
347  asme.iadd(d, s1, s2);
348  emit_store_dst(jd, iptr, d);
349  break;
350 
351  case ICMD_LADD: /* ..., val1, val2 ==> ..., val1 + val2 */
352 
353  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
354  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
355  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
356  asme.ladd(d, s1, s2);
357  emit_store_dst(jd, iptr, d);
358  break;
359 
360  case ICMD_IINC:
361  case ICMD_IADDCONST: /* ..., value ==> ..., value + constant */
362  /* sx.val.i = constant */
363 
364  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
365  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
366 
367  if ((iptr->sx.val.i >= 0) && (iptr->sx.val.i <= 0xffffff)) {
368  asme.iadd_imm(d, s1, iptr->sx.val.i);
369  } else if ((-iptr->sx.val.i >= 0) && (-iptr->sx.val.i <= 0xffffff)) {
370  asme.isub_imm(d, s1, -iptr->sx.val.i);
371  } else {
372  asme.iconst(REG_ITMP2, iptr->sx.val.i);
373  asme.iadd(d, s1, REG_ITMP2);
374  }
375 
376  emit_store_dst(jd, iptr, d);
377  break;
378 
379  case ICMD_LADDCONST: /* ..., value ==> ..., value + constant */
380  /* sx.val.l = constant */
381 
382  // assert(iptr->sx.val.l >= 0); // TODO: check why this was here
383 
384  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
385  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
386  if ((iptr->sx.val.l >= 0) && (iptr->sx.val.l <= 0xffffff)) {
387  asme.ladd_imm(d, s1, iptr->sx.val.l);
388  } else if ((-iptr->sx.val.l >= 0) && (-iptr->sx.val.l <= 0xffffff)) {
389  asme.lsub_imm(d, s1, -iptr->sx.val.l);
390  } else {
391  asme.lconst(REG_ITMP2, iptr->sx.val.l);
392  asme.ladd(d, s1, REG_ITMP2);
393  }
394  emit_store_dst(jd, iptr, d);
395  break;
396 
397  case ICMD_ISUB: /* ..., val1, val2 ==> ..., val1 - val2 */
398 
399  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
400  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
401  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
402  asme.isub(d, s1, s2);
403  emit_store_dst(jd, iptr, d);
404  break;
405 
406  case ICMD_LSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
407 
408  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
409  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
410  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
411  asme.lsub(d, s1, s2);
412  emit_store_dst(jd, iptr, d);
413  break;
414 
415  case ICMD_ISUBCONST: /* ..., value ==> ..., value - constant */
416  /* sx.val.i = constant */
417 
418  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
419  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
420 
421  if ((iptr->sx.val.i >= 0) && (iptr->sx.val.i <= 0xffffff)) {
422  asme.isub_imm(d, s1, iptr->sx.val.i);
423  } else if ((-iptr->sx.val.i >= 0) && (-iptr->sx.val.i <= 0xffffff)) {
424  asme.iadd_imm(d, s1, -iptr->sx.val.i);
425  } else {
426  asme.iconst(REG_ITMP2, iptr->sx.val.i);
427  asme.isub(d, s1, REG_ITMP2);
428  }
429 
430  emit_store_dst(jd, iptr, d);
431  break;
432 
433  case ICMD_LSUBCONST: /* ..., value ==> ..., value - constant */
434  /* sx.val.l = constant */
435 
436  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
437  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
438 
439  if ((iptr->sx.val.l >= 0) && (iptr->sx.val.l <= 0xffffff)) {
440  asme.lsub_imm(d, s1, iptr->sx.val.l);
441  } else if ((-iptr->sx.val.l >= 0) && (-iptr->sx.val.l <= 0xffffff)) {
442  asme.ladd_imm(d, s1, -iptr->sx.val.l);
443  } else {
444  asme.lconst(REG_ITMP2, iptr->sx.val.l);
445  asme.lsub(d, s1, REG_ITMP2);
446  }
447 
448  emit_store_dst(jd, iptr, d);
449  break;
450 
451  case ICMD_IMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
452 
453  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
454  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
455  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
456  asme.imul(d, s1, s2);
457  asme.ubfx(d, d); // cut back to int
458  emit_store_dst(jd, iptr, d);
459  break;
460 
461  case ICMD_LMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
462 
463  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
464  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
465  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
466  asme.lmul(d, s1, s2);
467  emit_store_dst(jd, iptr, d);
468  break;
469 
470  case ICMD_IMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
471  /* sx.val.i = constant */
472 
473  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
474  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
475  asme.ilsl_imm(d, s1, iptr->sx.val.i);
476  asme.ubfx(d, d); // cut back to int
477  emit_store_dst(jd, iptr, d);
478  break;
479 
480  case ICMD_LMULPOW2: /* ..., value ==> ..., value * (2 ^ constant) */
481  /* sx.val.i = constant */
482 
483  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
484  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
485  asme.llsl_imm(d, s1, iptr->sx.val.i);
486  emit_store_dst(jd, iptr, d);
487  break;
488 
489  case ICMD_IREM: /* ..., val1, val2 ==> ..., val1 % val2 */
490 
491  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
492  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
493  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
494  emit_arithmetic_check(cd, iptr, s2);
495 
496  asme.idiv(REG_ITMP3, s1, s2);
497  asme.imsub(d, REG_ITMP3, s2, s1);
498 
499  emit_store_dst(jd, iptr, d);
500  break;
501 
502  case ICMD_LREM: /* ..., val1, val2 ==> ..., val1 % val2 */
503 
504  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
505  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
506  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
507  emit_arithmetic_check(cd, iptr, s2);
508 
509  asme.ldiv(REG_ITMP3, s1, s2);
510  asme.lmsub(d, REG_ITMP3, s2, s1);
511 
512  emit_store_dst(jd, iptr, d);
513  break;
514 
515  case ICMD_IDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
516 
517  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
518  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
519  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
520  emit_arithmetic_check(cd, iptr, s2);
521 
522  asme.idiv(d, s1, s2);
523 
524  emit_store_dst(jd, iptr, d);
525  break;
526 
527  case ICMD_LDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
528 
529  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
530  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
531  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
532  emit_arithmetic_check(cd, iptr, s2);
533 
534  asme.ldiv(d, s1, s2);
535 
536  emit_store_dst(jd, iptr, d);
537  break;
538 
539  // TODO: implement this using shift operators
540  case ICMD_IDIVPOW2: /* ..., value ==> ..., value / (2 ^ constant) */
541  /* sx.val.i = constant */
542  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
543  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
544 
545  asme.iconst(REG_ITMP3, pow(2, iptr->sx.val.i));
546  asme.idiv(d, s1, REG_ITMP3);
547 
548  emit_store_dst(jd, iptr, d);
549  break;
550 
551  case ICMD_IREMPOW2: /* ..., value ==> ..., value % constant */
552  /* sx.val.i = constant [ (2 ^ x) - 1 ] */
553  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
554  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
555 
556  // Use absolute value
557  asme.icmp_imm(s1, 0);
558  asme.icsneg(d, s1, s1, COND_PL);
559 
560  asme.iconst(REG_ITMP3, iptr->sx.val.i);
561  asme.iand(d, d, REG_ITMP3);
562 
563  // Negate the result again if the value was negative
564  asme.icsneg(d, d, d, COND_PL);
565 
566  emit_store_dst(jd, iptr, d);
567  break;
568 
569  case ICMD_ISHL: /* ..., val1, val2 ==> ..., val1 << val2 */
570 
571  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
572  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
573  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
574 
575  asme.iconst(REG_ITMP3, 0x1f);
576  asme.iand(REG_ITMP3, s2, REG_ITMP3);
577  asme.ilsl(d, s1, REG_ITMP3);
578  asme.ubfx(d, d); // cut back to int
579 
580  emit_store_dst(jd, iptr, d);
581  break;
582 
583  case ICMD_LSHL: /* ..., val1, val2 ==> ..., val1 << val2 */
584 
585  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
586  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
587  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
588 
589  asme.lconst(REG_ITMP3, 0x3f);
590  asme.land(REG_ITMP3, s2, REG_ITMP3);
591  asme.llsl(d, s1, REG_ITMP3);
592 
593  emit_store_dst(jd, iptr, d);
594  break;
595 
596  case ICMD_ISHLCONST: /* ..., value ==> ..., value << constant */
597  /* sx.val.i = constant */
598 
599  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
600  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
601 
602  asme.ilsl_imm(d, s1, iptr->sx.val.i & 0x1f); // shift amout is between 0 and 31 incl
603  asme.ubfx(d, d); // cut back to int
604 
605  emit_store_dst(jd, iptr, d);
606  break;
607 
608  case ICMD_LSHLCONST: /* ..., value ==> ..., value << constant */
609  /* sx.val.i = constant */
610 
611  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
612  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
613 
614  asme.llsl_imm(d, s1, iptr->sx.val.i & 0x3f);
615 
616  emit_store_dst(jd, iptr, d);
617  break;
618 
619  case ICMD_ISHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
620 
621  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
622  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
623  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
624 
625  asme.iconst(REG_ITMP3, 0x1f);
626  asme.iand(REG_ITMP3, s2, REG_ITMP3);
627  asme.iasr(d, s1, REG_ITMP3);
628 
629  emit_store_dst(jd, iptr, d);
630  break;
631 
632  case ICMD_LSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
633 
634  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
635  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
636  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
637 
638  asme.lconst(REG_ITMP3, 0x3f);
639  asme.land(REG_ITMP3, s2, REG_ITMP3);
640  asme.lasr(d, s1, REG_ITMP3);
641 
642  emit_store_dst(jd, iptr, d);
643  break;
644 
645  case ICMD_ISHRCONST: /* ..., value ==> ..., value >> constant */
646  /* sx.val.i = constant */
647 
648  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
649  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
650 
651  asme.iasr_imm(d, s1, iptr->sx.val.i & 0x1f);
652 
653  emit_store_dst(jd, iptr, d);
654  break;
655 
656  case ICMD_LSHRCONST: /* ..., value ==> ..., value >> constant */
657  /* sx.val.i = constant */
658 
659  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
660  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
661 
662  asme.lasr_imm(d, s1, iptr->sx.val.i & 0x3f); // TODO: is the constant really in sx.val.i?
663 
664  emit_store_dst(jd, iptr, d);
665  break;
666 
667  case ICMD_IUSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
668 
669  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
670  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
671  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
672 
673  asme.iconst(REG_ITMP3, 0x1f);
674  asme.iand(REG_ITMP3, s2, REG_ITMP3);
675  asme.ilsr(d, s1, REG_ITMP3);
676 
677  emit_store_dst(jd, iptr, d);
678  break;
679 
680  case ICMD_LUSHR: /* ..., val1, val2 ==> ..., val1 >> val2 */
681 
682  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
683  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
684  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
685 
686  asme.lconst(REG_ITMP3, 0x3f);
687  asme.land(REG_ITMP3, s2, REG_ITMP3);
688  asme.llsr(d, s1, REG_ITMP3);
689 
690  emit_store_dst(jd, iptr, d);
691  break;
692 
693  case ICMD_IUSHRCONST: /* ..., value ==> ..., value >> constant */
694  /* sx.val.i = constant */
695 
696  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
697  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
698 
699  asme.ilsr_imm(d, s1, iptr->sx.val.i & 0x1f);
700 
701  emit_store_dst(jd, iptr, d);
702  break;
703 
704  case ICMD_LUSHRCONST: /* ..., value ==> ..., value >> constant */
705  /* sx.val.i = constant */
706 
707  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
708  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
709 
710  asme.llsr_imm(d, s1, iptr->sx.val.i & 0x3f);
711 
712  emit_store_dst(jd, iptr, d);
713  break;
714 
715  case ICMD_IAND: /* ..., val1, val2 ==> ..., val1 & val2 */
716 
717  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
718  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
719  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
720 
721  asme.iand(d, s1, s2);
722 
723  emit_store_dst(jd, iptr, d);
724  break;
725 
726  case ICMD_LAND: /* ..., val1, val2 ==> ..., val1 & val2 */
727 
728  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
729  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
730  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
731 
732  asme.land(d, s1, s2);
733 
734  emit_store_dst(jd, iptr, d);
735  break;
736 
737  // TODO: implement this using the immediate variant
738  case ICMD_IANDCONST: /* ..., value ==> ..., value & constant */
739  /* sx.val.i = constant */
740 
741  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
742  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
743 
744  asme.iconst(REG_ITMP3, iptr->sx.val.i);
745  asme.iand(d, s1, REG_ITMP3);
746 
747  emit_store_dst(jd, iptr, d);
748  break;
749 
750  // TODO: implement this using the immediate variant
751  case ICMD_LANDCONST: /* ..., value ==> ..., value & constant */
752  /* sx.val.l = constant */
753 
754  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
755  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
756 
757  asme.lconst(REG_ITMP3, iptr->sx.val.l);
758  asme.land(d, s1, REG_ITMP3);
759 
760  emit_store_dst(jd, iptr, d);
761  break;
762 
763  case ICMD_IOR: /* ..., val1, val2 ==> ..., val1 | val2 */
764 
765  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
766  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
767  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
768 
769  asme.ior(d, s1, s2);
770 
771  emit_store_dst(jd, iptr, d);
772  break;
773 
774  case ICMD_LOR: /* ..., val1, val2 ==> ..., val1 | val2 */
775 
776  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
777  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
778  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
779 
780  asme.lor(d, s1, s2);
781 
782  emit_store_dst(jd, iptr, d);
783  break;
784 
785  // TODO: implement this using the immediate variant
786  case ICMD_IORCONST: /* ..., value ==> ..., value | constant */
787  /* sx.val.i = constant */
788 
789  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
790  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
791 
792  asme.iconst(REG_ITMP2, iptr->sx.val.i);
793  asme.ior(d, s1, REG_ITMP2);
794 
795  emit_store_dst(jd, iptr, d);
796  break;
797 
798  // TODO: implement this using the immediate variant
799  case ICMD_LORCONST: /* ..., value ==> ..., value | constant */
800  /* sx.val.l = constant */
801 
802  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
803  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
804 
805  asme.lconst(REG_ITMP2, iptr->sx.val.l);
806  asme.lor(d, s1, REG_ITMP2);
807 
808  emit_store_dst(jd, iptr, d);
809  break;
810 
811  case ICMD_IXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
812 
813  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
814  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
815  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
816 
817  asme.ixor(d, s1, s2);
818 
819  emit_store_dst(jd, iptr, d);
820  break;
821 
822  case ICMD_LXOR: /* ..., val1, val2 ==> ..., val1 ^ val2 */
823 
824  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
825  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
826  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
827 
828  asme.lxor(d, s1, s2);
829 
830  emit_store_dst(jd, iptr, d);
831  break;
832 
833  // TODO: implement this using the immediate variant
834  case ICMD_IXORCONST: /* ..., value ==> ..., value ^ constant */
835  /* sx.val.i = constant */
836 
837  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
838  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
839 
840  asme.iconst(REG_ITMP2, iptr->sx.val.i);
841  asme.ixor(d, s1, REG_ITMP2);
842 
843  emit_store_dst(jd, iptr, d);
844  break;
845 
846  // TODO: implement this using the immediate variant
847  case ICMD_LXORCONST: /* ..., value ==> ..., value ^ constant */
848  /* sx.val.l = constant */
849 
850  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
851  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
852 
853  asme.lconst(REG_ITMP2, iptr->sx.val.l);
854  asme.lxor(d, s1, REG_ITMP2);
855 
856  emit_store_dst(jd, iptr, d);
857  break;
858 
859  /* floating operations ************************************************/
860 
861  case ICMD_FNEG: /* ..., value ==> ..., - value */
862 
863  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
864  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
865  asme.fneg(d, s1);
866  emit_store_dst(jd, iptr, d);
867  break;
868 
869  case ICMD_DNEG: /* ..., value ==> ..., - value */
870 
871  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
872  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
873  asme.dneg(d, s1);
874  emit_store_dst(jd, iptr, d);
875  break;
876 
877  case ICMD_FADD: /* ..., val1, val2 ==> ..., val1 + val2 */
878 
879  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
880  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
881  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
882  asme.fadd(d, s1, s2);
883  emit_store_dst(jd, iptr, d);
884  break;
885 
886  case ICMD_DADD: /* ..., val1, val2 ==> ..., val1 + val2 */
887 
888  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
889  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
890  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
891  asme.dadd(d, s1, s2);
892  emit_store_dst(jd, iptr, d);
893  break;
894 
895  case ICMD_FSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
896 
897  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
898  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
899  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
900  asme.fsub(d, s1, s2);
901  emit_store_dst(jd, iptr, d);
902  break;
903 
904  case ICMD_DSUB: /* ..., val1, val2 ==> ..., val1 - val2 */
905 
906  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
907  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
908  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
909  asme.dsub(d, s1, s2);
910  emit_store_dst(jd, iptr, d);
911  break;
912 
913  case ICMD_FMUL: /* ..., val1, val2 ==> ..., val1 * val2 */
914 
915  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
916  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
917  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
918  if (d == s1 || d == s2) {
919  asme.fmul(REG_FTMP3, s1, s2);
920  asme.fmov(d, REG_FTMP3);
921  } else {
922  asme.fmul(d, s1, s2);
923  }
924  emit_store_dst(jd, iptr, d);
925  break;
926 
927  case ICMD_DMUL: /* ..., val1, val2 ==> ..., val1 *** val2 */
928 
929  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
930  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
931  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
932  if (d == s1 || d == s2) {
933  asme.dmul(REG_FTMP3, s1, s2);
934  asme.dmov(d, REG_FTMP3);
935  } else {
936  asme.dmul(d, s1, s2);
937  }
938  emit_store_dst(jd, iptr, d);
939  break;
940 
941  case ICMD_FDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
942 
943  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
944  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
945  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
946  asme.fdiv(d, s1, s2);
947  emit_store_dst(jd, iptr, d);
948  break;
949 
950  case ICMD_DDIV: /* ..., val1, val2 ==> ..., val1 / val2 */
951 
952  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
953  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
954  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
955  asme.ddiv(d, s1, s2);
956  emit_store_dst(jd, iptr, d);
957  break;
958 
959  case ICMD_I2F: /* ..., value ==> ..., (float) value */
960  case ICMD_L2F:
961  case ICMD_I2D: /* ..., value ==> ..., (double) value */
962  case ICMD_L2D:
963  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
964  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
965 
966  switch (iptr->opc) {
967  case ICMD_I2F: asme.i2f(d, s1); break;
968  case ICMD_L2F: asme.l2f(d, s1); break;
969  case ICMD_I2D: asme.i2d(d, s1); break;
970  case ICMD_L2D: asme.l2d(d, s1); break;
971  }
972 
973  emit_store_dst(jd, iptr, d);
974  break;
975 
976  case ICMD_F2I: /* ..., value ==> ..., (int) value */
977  case ICMD_D2I:
978  case ICMD_F2L: /* ..., value ==> ..., (long) value */
979  case ICMD_D2L:
980  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
981  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
982 
983  // If the fp value is NaN (unordered) set the result to 0
984  asme.iconst(d, 0);
985 
986  // Use the correct comparison instruction
987  if (iptr->opc == ICMD_F2I || iptr->opc == ICMD_F2L)
988  asme.fcmp(s1);
989  else
990  asme.dcmp(s1, s1);
991 
992  // Jump over the conversion if unordered
993  asme.b_vs(2);
994 
995  // Rounding towards zero (see Java spec)
996  switch (iptr->opc) {
997  case ICMD_F2I: asme.f2i(d, s1); break;
998  case ICMD_D2I: asme.d2i(d, s1); break;
999  case ICMD_F2L: asme.f2l(d, s1); break;
1000  case ICMD_D2L: asme.d2l(d, s1); break;
1001  }
1002 
1003  emit_store_dst(jd, iptr, d);
1004  break;
1005 
1006  case ICMD_F2D: /* ..., value ==> ..., (double) value */
1007 
1008  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1009  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
1010  asme.f2d(d, s1);
1011  emit_store_dst(jd, iptr, d);
1012  break;
1013 
1014  case ICMD_D2F: /* ..., value ==> ..., (float) value */
1015 
1016  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1017  d = codegen_reg_of_dst(jd, iptr, REG_FTMP3);
1018  asme.d2f(d, s1);
1019  emit_store_dst(jd, iptr, d);
1020  break;
1021 
1022  case ICMD_FCMPL: /* ..., val1, val2 ==> ..., val1 fcmpl val2 */
1023  case ICMD_DCMPL:
1024  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1025  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1026  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
1027 
1028  if (iptr->opc == ICMD_FCMPL)
1029  asme.fcmp(s1, s2);
1030  else
1031  asme.dcmp(s1, s2);
1032 
1033  asme.iconst(d, 0);
1034  asme.iconst(REG_ITMP1, 1);
1035  asme.iconst(REG_ITMP2, -1);
1036 
1037  /* set to -1 if less than or unordered (NaN) */
1038  /* set to 1 if greater than */
1040 
1041  /* set to 0 if equal or result of previous csel */
1042  asme.icsel(d, d, REG_ITMP1, COND_EQ);
1043 
1044  emit_store_dst(jd, iptr, d);
1045  break;
1046 
1047  case ICMD_FCMPG: /* ..., val1, val2 ==> ..., val1 fcmpg val2 */
1048  case ICMD_DCMPG:
1049  s1 = emit_load_s1(jd, iptr, REG_FTMP1);
1050  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1051  d = codegen_reg_of_dst(jd, iptr, REG_ITMP3);
1052 
1053  if (iptr->opc == ICMD_FCMPG)
1054  asme.fcmp(s1, s2);
1055  else
1056  asme.dcmp(s1, s2);
1057 
1058  asme.iconst(d, 0);
1059  asme.iconst(REG_ITMP1, 1);
1060  asme.iconst(REG_ITMP2, -1);
1061 
1062  /* set to 1 if greater than or unordered (NaN) */
1063  /* set to -1 if less than */
1065 
1066  /* set to 0 if equal or result of previous csel */
1067  asme.icsel(d, d, REG_ITMP1, COND_EQ);
1068 
1069  emit_store_dst(jd, iptr, d);
1070  break;
1071 
1072  /* memory operations **************************************************/
1073 
1074  case ICMD_BALOAD: /* ..., arrayref, index ==> ..., value */
1075 
1076  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1077  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1078  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1079  /* implicit null-pointer check */
1080  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1081 
1082  asme.ladd(REG_ITMP1, s1, s2);
1083  asme.ldrsb32(d, REG_ITMP1, OFFSET (java_bytearray_t, data[0]));
1084 
1085  emit_store_dst(jd, iptr, d);
1086  break;
1087 
1088  case ICMD_CALOAD: /* ..., arrayref, index ==> ..., value */
1089 
1090  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1091  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1092  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1093  /* implicit null-pointer check */
1094  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1095 
1096  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); /* REG_ITMP1 = s1 + (2 * s2) */
1097  asme.ldrh(d, REG_ITMP1, OFFSET(java_chararray_t, data[0]));
1098 
1099  emit_store_dst(jd, iptr, d);
1100  break;
1101 
1102  case ICMD_SALOAD: /* ..., arrayref, index ==> ..., value */
1103 
1104  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1105  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1106  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1107  /* implicit null-pointer check */
1108  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1109 
1110  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); /* REG_ITMP1 = s1 + (2 * s2) */
1111  asme.ldrsh32(d, REG_ITMP1, OFFSET(java_shortarray_t, data[0]));
1112 
1113  emit_store_dst(jd, iptr, d);
1114  break;
1115 
1116  case ICMD_IALOAD: /* ..., arrayref, index ==> ..., value */
1117 
1118  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1119  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1120  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1121  /* implicit null-pointer check */
1122  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1123 
1124  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1125  asme.ild(d, REG_ITMP1, OFFSET(java_intarray_t, data[0]));
1126 
1127  emit_store_dst(jd, iptr, d);
1128  break;
1129 
1130  case ICMD_LALOAD: /* ..., arrayref, index ==> ..., value */
1131 
1132  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1133  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1134  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1135  /* implicit null-pointer check */
1136  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1137 
1138  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1139  asme.lld(d, REG_ITMP1, OFFSET(java_longarray_t, data[0]));
1140 
1141  emit_store_dst(jd, iptr, d);
1142  break;
1143 
1144  case ICMD_FALOAD: /* ..., arrayref, index ==> ..., value */
1145 
1146  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1147  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1148  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
1149  /* implicit null-pointer check */
1150  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1151 
1152  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1153  asme.fld(d, REG_ITMP1, OFFSET(java_floatarray_t, data[0]));
1154 
1155  emit_store_dst(jd, iptr, d);
1156  break;
1157 
1158  case ICMD_DALOAD: /* ..., arrayref, index ==> ..., value */
1159 
1160  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1161  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1162  d = codegen_reg_of_dst(jd, iptr, REG_FTMP2);
1163  /* implicit null-pointer check */
1164  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1165 
1166  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1167  asme.dld(d, REG_ITMP1, OFFSET(java_doublearray_t, data[0]));
1168 
1169  emit_store_dst(jd, iptr, d);
1170  break;
1171 
1172  case ICMD_AALOAD: /* ..., arrayref, index ==> ..., value */
1173 
1174  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1175  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1176  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1177  /* implicit null-pointer check */
1178  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1179 
1180  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1181  asme.ald(d, REG_ITMP1, OFFSET(java_objectarray_t, data[0]));
1182 
1183  emit_store_dst(jd, iptr, d);
1184  break;
1185 
1186  case ICMD_BASTORE: /* ..., arrayref, index, value ==> ... */
1187 
1188  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1189  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1190  /* implicit null-pointer check */
1191  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1192  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1193 
1194  asme.ladd(REG_ITMP1, s1, s2);
1195  asme.strb(s3, REG_ITMP1, OFFSET(java_bytearray_t, data[0]));
1196 
1197  break;
1198 
1199  case ICMD_CASTORE: /* ..., arrayref, index, value ==> ... */
1200 
1201  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1202  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1203  /* implicit null-pointer check */
1204  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1205  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1206 
1207  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); // REG_ITMP1 = s1 + (2 * s2)
1208  asme.strh(s3, REG_ITMP1, OFFSET(java_chararray_t, data[0]));
1209 
1210  break;
1211 
1212  case ICMD_SASTORE: /* ..., arrayref, index, value ==> ... */
1213 
1214  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1215  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1216  /* implicit null-pointer check */
1217  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1218  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1219 
1220  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 1); // REG_ITMP1 = s1 + (2 * s2)
1221  asme.strh(s3, REG_ITMP1, OFFSET(java_shortarray_t, data[0]));
1222 
1223  break;
1224 
1225  case ICMD_IASTORE: /* ..., arrayref, index, value ==> ... */
1226 
1227  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1228  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1229  /* implicit null-pointer check */
1230  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1231  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1232 
1233  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1234  asme.ist(s3, REG_ITMP1, OFFSET(java_intarray_t, data[0]));
1235 
1236  break;
1237 
1238  case ICMD_LASTORE: /* ..., arrayref, index, value ==> ... */
1239 
1240  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1241  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1242  /* implicit null-pointer check */
1243  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1244  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1245 
1246  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1247  asme.lst(s3, REG_ITMP1, OFFSET(java_longarray_t, data[0]));
1248 
1249  break;
1250 
1251  case ICMD_FASTORE: /* ..., arrayref, index, value ==> ... */
1252 
1253  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1254  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1255  /* implicit null-pointer check */
1256  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1257  s3 = emit_load_s3(jd, iptr, REG_FTMP3);
1258 
1259  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 2);
1260  asme.fst(s3, REG_ITMP1, OFFSET(java_floatarray_t, data[0]));
1261 
1262  break;
1263 
1264  case ICMD_DASTORE: /* ..., arrayref, index, value ==> ... */
1265 
1266  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1267  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1268  /* implicit null-pointer check */
1269  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1270  s3 = emit_load_s3(jd, iptr, REG_FTMP3);
1271 
1272  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1273  asme.dst(s3, REG_ITMP1, OFFSET(java_doublearray_t, data[0]));
1274 
1275  break;
1276 
1277  case ICMD_AASTORE: /* ..., arrayref, index, value ==> ... */
1278 
1279  s1 = emit_load_s1(jd, iptr, REG_A0);
1280  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1281  /* implicit null-pointer check */
1282  emit_arrayindexoutofbounds_check(cd, iptr, s1, s2);
1283  s3 = emit_load_s3(jd, iptr, REG_A1);
1284 
1285  asme.mov(REG_A0, s1);
1286  asme.mov(REG_A1, s3);
1287 
1289  asme.ald(REG_PV, REG_PV, disp);
1290  asme.blr(REG_PV);
1291 
1292  disp = (s4) (cd->mcodeptr - cd->mcodebase);
1293  asme.lda(REG_PV, REG_RA, -disp);
1294  emit_arraystore_check(cd, iptr);
1295 
1296  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1297  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1298  s3 = emit_load_s3(jd, iptr, REG_ITMP3);
1299  asme.ladd_shift(REG_ITMP1, s1, s2, CODE_LSL, 3); // REG_ITMP1 = s1 + lsl(s2, 3)
1300  asme.ast(s3, REG_ITMP1, OFFSET(java_objectarray_t, data[0]));
1301  break;
1302 
1303  case ICMD_GETFIELD: /* ... ==> ..., value */
1304 
1305  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1306 
1307  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1308  uf = iptr->sx.s23.s3.uf;
1309  fieldtype = uf->fieldref->parseddesc.fd->type;
1310  disp = 0;
1311 
1313  }
1314  else {
1315  fi = iptr->sx.s23.s3.fmiref->p.field;
1316  fieldtype = fi->type;
1317  disp = fi->offset;
1318  }
1319 
1320  if (IS_INT_LNG_TYPE(fieldtype))
1321  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1322  else
1323  d = codegen_reg_of_dst(jd, iptr, REG_FTMP1);
1324 
1325  /* implicit null-pointer check */
1326  switch (fieldtype) {
1327  case TYPE_INT:
1328  asme.ild(d, s1, disp);
1329  break;
1330  case TYPE_LNG:
1331  asme.lld(d, s1, disp);
1332  break;
1333  case TYPE_ADR:
1334  asme.ald(d, s1, disp);
1335  break;
1336  case TYPE_FLT:
1337  asme.fld(d, s1, disp);
1338  break;
1339  case TYPE_DBL:
1340  asme.dld(d, s1, disp);
1341  break;
1342  }
1343  asme.nop();
1344  emit_store_dst(jd, iptr, d);
1345  break;
1346 
1347  case ICMD_PUTFIELD: /* ..., objectref, value ==> ... */
1348 
1349  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1350 
1351  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1352  uf = iptr->sx.s23.s3.uf;
1353  fieldtype = uf->fieldref->parseddesc.fd->type;
1354  disp = 0;
1355  }
1356  else {
1357  uf = NULL;
1358  fi = iptr->sx.s23.s3.fmiref->p.field;
1359  fieldtype = fi->type;
1360  disp = fi->offset;
1361  }
1362 
1363  if (IS_INT_LNG_TYPE(fieldtype))
1364  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1365  else
1366  s2 = emit_load_s2(jd, iptr, REG_FTMP2);
1367 
1368  if (INSTRUCTION_IS_UNRESOLVED(iptr))
1370 
1371  /* implicit null-pointer check */
1372  switch (fieldtype) {
1373  case TYPE_INT:
1374  asme.ist(s2, s1, disp);
1375  break;
1376  case TYPE_LNG:
1377  asme.lst(s2, s1, disp);
1378  break;
1379  case TYPE_ADR:
1380  asme.ast(s2, s1, disp);
1381  break;
1382  case TYPE_FLT:
1383  asme.fst(s2, s1, disp);
1384  break;
1385  case TYPE_DBL:
1386  asme.dst(s2, s1, disp);
1387  break;
1388  }
1389  asme.nop();
1390  break;
1391 
1392  /* branch operations **************************************************/
1393 
1394  case ICMD_IF_LEQ: /* ..., value ==> ... */
1395  case ICMD_IF_LNE:
1396  case ICMD_IF_LLT:
1397  case ICMD_IF_LGE:
1398  case ICMD_IF_LGT:
1399  case ICMD_IF_LLE:
1400 
1401  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1402 
1403  if (iptr->sx.val.l >= 0 && iptr->sx.val.l <= 0xfff) {
1404  asme.lcmp_imm(s1, iptr->sx.val.l);
1405  } else if ((-iptr->sx.val.l) >= 0 && (-iptr->sx.val.l) <= 0xfff) {
1406  asme.lcmn_imm(s1, -iptr->sx.val.l);
1407  } else {
1408  asme.lconst(REG_ITMP2, iptr->sx.val.l);
1409  asme.lcmp(s1, REG_ITMP2);
1410  }
1411 
1412  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LEQ, BRANCH_OPT_NONE);
1413 
1414  break;
1415 
1416  case ICMD_IF_LCMPEQ: /* ..., value, value ==> ... */
1417  case ICMD_IF_LCMPNE: /* op1 = target JavaVM pc */
1418  case ICMD_IF_LCMPLT:
1419  case ICMD_IF_LCMPGT:
1420  case ICMD_IF_LCMPLE:
1421  case ICMD_IF_LCMPGE:
1422 
1423  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1424  s2 = emit_load_s2(jd, iptr, REG_ITMP2);
1425 
1426  asme.lcmp(s1, s2);
1427  emit_bcc(cd, iptr->dst.block, iptr->opc - ICMD_IF_LCMPEQ, BRANCH_OPT_NONE);
1428 
1429  break;
1430 
1431  case ICMD_ATHROW: /* ..., objectref ==> ... (, objectref) */
1432 
1434  break;
1435 
1436  case ICMD_BUILTIN: /* ..., arg1, arg2, arg3 ==> ... */
1437  bte = iptr->sx.s23.s3.bte;
1438  if (bte->stub == NULL)
1439  disp = dseg_add_functionptr(cd, bte->fp);
1440  else
1441  disp = dseg_add_functionptr(cd, bte->stub);
1442 
1443  asme.ald(REG_PV, REG_PV, disp); /* Pointer to built-in-function */
1444 
1445  /* generate the actual call */
1446  asme.blr(REG_PV);
1447  break;
1448 
1449  case ICMD_INVOKESPECIAL:
1450  emit_nullpointer_check(cd, iptr, REG_A0);
1451  /* fall-through */
1452 
1453  case ICMD_INVOKESTATIC: /* ..., [arg1, [arg2 ...]] ==> ... */
1454  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1455  um = iptr->sx.s23.s3.um;
1456  disp = dseg_add_unique_address(cd, um);
1457 
1459  um, disp);
1460  }
1461  else {
1462  lm = iptr->sx.s23.s3.fmiref->p.method;
1463  disp = dseg_add_address(cd, lm->stubroutine);
1464  }
1465 
1466  asme.ald(REG_PV, REG_PV, disp); /* method pointer in r27 */
1467 
1468  /* generate the actual call */
1469  asme.blr(REG_PV);
1470  break;
1471 
1472  case ICMD_INVOKEVIRTUAL:/* op1 = arg count, val.a = method pointer */
1473  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1474  um = iptr->sx.s23.s3.um;
1476 
1477  s1 = 0;
1478  }
1479  else {
1480  lm = iptr->sx.s23.s3.fmiref->p.method;
1481  s1 = OFFSET(vftbl_t, table[0]) + sizeof(methodptr) * lm->vftblindex;
1482  }
1483 
1484  /* implicit null-pointer check */
1485  asme.ald(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
1486  asme.ald(REG_PV, REG_METHODPTR, s1);
1487 
1488  /* generate the actual call */
1489  asme.blr(REG_PV);
1490 
1491  break;
1492 
1493  case ICMD_INVOKEINTERFACE:
1494  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1495  um = iptr->sx.s23.s3.um;
1497 
1498  s1 = 0;
1499  s2 = 0;
1500  }
1501  else {
1502  lm = iptr->sx.s23.s3.fmiref->p.method;
1503  s1 = OFFSET(vftbl_t, interfacetable[0]) -
1504  sizeof(methodptr*) * lm->clazz->index;
1505 
1506  s2 = sizeof(methodptr) * (lm - lm->clazz->methods);
1507  }
1508 
1509  /* implicit null-pointer check */
1510  asme.ald(REG_METHODPTR, REG_A0, OFFSET(java_object_t, vftbl));
1511 
1512  /* on aarch64 we only have negative offsets in the range of -255 to 255 so we need a mov */
1513  assert(abs(s1) <= 0xffff);
1514  assert(abs(s2) <= 0xffff);
1515  asme.lconst(REG_ITMP1, s1);
1516  asme.lconst(REG_ITMP3, s2);
1517 
1518  emit_ldr_reg(cd, REG_METHODPTR, REG_METHODPTR, REG_ITMP1); // TODO: move to emitter
1519  emit_ldr_reg(cd, REG_PV, REG_METHODPTR, REG_ITMP3); // TODO: move to emitter
1520 
1521  /* generate the actual call */
1522  asme.blr(REG_PV);
1523  break;
1524 
1525  case ICMD_TABLESWITCH: /* ..., index ==> ... */
1526 
1527  s4 i, l;
1528  branch_target_t *table;
1529 
1530  table = iptr->dst.table;
1531 
1532  l = iptr->sx.s23.s2.tablelow;
1533  i = iptr->sx.s23.s3.tablehigh;
1534 
1535  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1536  if (l == 0) {
1537  asme.imov(REG_ITMP1, s1); // TODO: check that this works
1538  } else if (abs(l) <= 32768) {
1539  if (l < 0) {
1540  asme.iadd_imm(REG_ITMP1, s1, -l);
1541  } else {
1542  asme.isub_imm(REG_ITMP1, s1, l);
1543  }
1544  } else {
1545  asme.iconst(REG_ITMP2, l);
1546  asme.isub(REG_ITMP1, s1, REG_ITMP2);
1547  }
1548 
1549  /* number of targets */
1550  i = i - l + 1;
1551 
1552  /* range check */
1553  emit_icmp_imm(cd, REG_ITMP1, i-1);
1554  emit_bcc(cd, table[0].block, BRANCH_UGT, BRANCH_OPT_NONE);
1555 
1556  /* build jump table top down and use address of lowest entry */
1557 
1558  table += i;
1559 
1560  while (--i >= 0) {
1561  dseg_add_target(cd, table->block);
1562  --table;
1563  }
1564 
1565  /* length of dataseg after last dseg_add_target is used by load */
1566  asme.sxtw(REG_ITMP1, REG_ITMP1);
1568  asme.ald(REG_ITMP2, REG_ITMP2, -(cd->dseglen));
1569  asme.br(REG_ITMP2);
1570  ALIGNCODENOP;
1571  break;
1572 
1573  case ICMD_CHECKCAST: /* ..., objectref ==> ..., objectref */
1574 
1575  if (!(iptr->flags.bits & INS_FLAG_ARRAY)) {
1576  // object type cast-check
1577 
1578  classinfo *super;
1579  s4 superindex;
1580 
1581  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1582  super = NULL;
1583  superindex = 0;
1584  }
1585  else {
1586  super = iptr->sx.s23.s3.c.cls;
1587  superindex = super->index;
1588  }
1589 
1590  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1591 
1592  // if class is not resolved, check which code to call
1593 
1594  if (super == NULL) {
1595  asme.lcmp_imm(s1, 0);
1596  emit_label_beq(cd, BRANCH_LABEL_1);
1597 
1598  disp = dseg_add_unique_s4(cd, 0); /* super->flags */
1599 
1601  iptr->sx.s23.s3.c.ref, disp);
1602 
1603  asme.ild(REG_ITMP2, REG_PV, disp);
1604 
1605  disp = dseg_add_s4(cd, ACC_INTERFACE);
1606  asme.ild(REG_ITMP3, REG_PV, disp);
1607 
1608  asme.ltst(REG_ITMP2, REG_ITMP3);
1609  emit_label_beq(cd, BRANCH_LABEL_2);
1610  }
1611 
1612  // interface checkcast code
1613 
1614  if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
1615  if (super != NULL) {
1616  asme.lcmp_imm(s1, 0);
1617  emit_label_beq(cd, BRANCH_LABEL_3);
1618  }
1619 
1620  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1621 
1622  if (super == NULL) {
1624  iptr->sx.s23.s3.c.ref, 0);
1625  }
1626 
1627  asme.ild(REG_ITMP3, REG_ITMP2,
1628  OFFSET(vftbl_t, interfacetablelength));
1629 
1630  assert(abs(superindex) <= 0xfff);
1631  asme.icmp_imm(REG_ITMP3, superindex);
1632  emit_classcast_check(cd, iptr, BRANCH_LE, REG_ITMP3, s1);
1633 
1634  s4 offset = (s4) (OFFSET(vftbl_t, interfacetable[0]) -
1635  superindex * sizeof(methodptr*));
1636 
1637  assert(abs(offset) <= 0xffff);
1638  asme.lconst(REG_ITMP3, offset);
1639  emit_ldr_reg(cd, REG_ITMP3, REG_ITMP2, REG_ITMP3); // TODO: mov this to emitter
1640  asme.lcmp_imm(REG_ITMP3, 0);
1641  emit_classcast_check(cd, iptr, BRANCH_EQ, REG_ITMP3, s1);
1642 
1643  if (super == NULL)
1645  else
1647  }
1648 
1649  // class checkcast code
1650 
1651  if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
1652  if (super == NULL) {
1654 
1655  disp = dseg_add_unique_address(cd, NULL);
1656 
1658  iptr->sx.s23.s3.c.ref, disp);
1659  }
1660  else {
1661  disp = dseg_add_address(cd, super->vftbl);
1662 
1663  asme.lcmp_imm(s1, 0);
1664  emit_label_beq(cd, BRANCH_LABEL_5);
1665  }
1666 
1667  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1668  asme.ald(REG_ITMP3, REG_PV, disp);
1669 
1670  if (super == NULL || super->vftbl->subtype_depth >= DISPLAY_SIZE) {
1671  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1673  asme.ald(REG_ITMP1, REG_ITMP1, 0);
1674 
1675  asme.lcmp(REG_ITMP1, REG_ITMP3);
1676  emit_label_beq(cd, BRANCH_LABEL_6); // good
1677 
1678  if (super == NULL) {
1679  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1680  asme.icmp_imm(REG_ITMP1, OFFSET(vftbl_t, subtype_display[DISPLAY_SIZE]));
1681  emit_label_bne(cd, BRANCH_LABEL_10); // throw
1682  }
1683 
1684  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_depth));
1685  asme.ild(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, subtype_depth));
1686  asme.icmp(REG_ITMP1, REG_ITMP3);
1687  emit_label_bgt(cd, BRANCH_LABEL_9); // throw
1688 
1689  // reload
1690  asme.ald(REG_ITMP3, REG_PV, disp);
1691  asme.ald(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, subtype_overflow));
1693  asme.ald(REG_ITMP1, REG_ITMP2, -DISPLAY_SIZE*8);
1694 
1695  asme.lcmp(REG_ITMP1, REG_ITMP3);
1696  emit_label_beq(cd, BRANCH_LABEL_7); // good
1697 
1699  if (super == NULL)
1701 
1702  // reload s1, might have been destroyed
1703  emit_load_s1(jd, iptr, REG_ITMP1);
1705 
1708  // reload s1, might have been destroyed
1709  emit_load_s1(jd, iptr, REG_ITMP1);
1710  }
1711  else {
1712  asme.ald(REG_ITMP2, REG_ITMP2, super->vftbl->subtype_offset);
1713 
1714  asme.lcmp(REG_ITMP2, REG_ITMP3);
1715  emit_classcast_check(cd, iptr, BRANCH_NE, REG_ITMP3, s1);
1716  }
1717 
1718  if (super != NULL)
1720  }
1721 
1722  if (super == NULL) {
1725  }
1726 
1727  d = codegen_reg_of_dst(jd, iptr, s1);
1728  }
1729  else {
1730  /* array type cast-check */
1731 
1732  s1 = emit_load_s1(jd, iptr, REG_A0);
1733  asme.imov(REG_A0, s1);
1734 
1735  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1736  disp = dseg_add_unique_address(cd, NULL);
1737 
1740  iptr->sx.s23.s3.c.ref,
1741  disp);
1742  }
1743  else
1744  disp = dseg_add_address(cd, iptr->sx.s23.s3.c.cls);
1745 
1746  asme.ald(REG_A1, REG_PV, disp);
1748  asme.ald(REG_PV, REG_PV, disp);
1749  asme.blr(REG_PV);
1750  disp = (s4) (cd->mcodeptr - cd->mcodebase);
1751  asme.lda(REG_PV, REG_RA, -disp);
1752 
1753  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1754  asme.ltst(REG_RESULT, REG_RESULT);
1755  emit_classcast_check(cd, iptr, BRANCH_EQ, REG_RESULT, s1);
1756 
1757  d = codegen_reg_of_dst(jd, iptr, s1);
1758  }
1759 
1760  asme.mov(d, s1);
1761  emit_store_dst(jd, iptr, d);
1762  break;
1763 
1764  case ICMD_INSTANCEOF: /* ..., objectref ==> ..., intresult */
1765 
1766  {
1767  classinfo *super;
1768  vftbl_t *supervftbl;
1769  s4 superindex;
1770 
1771  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1772  super = NULL;
1773  superindex = 0;
1774  supervftbl = NULL;
1775 
1776  } else {
1777  super = iptr->sx.s23.s3.c.cls;
1778  superindex = super->index;
1779  supervftbl = super->vftbl;
1780  }
1781 
1782  s1 = emit_load_s1(jd, iptr, REG_ITMP1);
1783  d = codegen_reg_of_dst(jd, iptr, REG_ITMP2);
1784 
1785  if (s1 == d) {
1786  asme.mov(REG_ITMP1, s1);
1787  s1 = REG_ITMP1;
1788  }
1789 
1790  /* if class is not resolved, check which code to call */
1791 
1792  if (super == NULL) {
1793  asme.clr(d);
1794  asme.lcmp_imm(s1, 0);
1795  emit_label_beq(cd, BRANCH_LABEL_1);
1796 
1797  disp = dseg_add_unique_s4(cd, 0); /* super->flags */
1798 
1800  iptr->sx.s23.s3.c.ref, disp);
1801 
1802  asme.ild(REG_ITMP3, REG_PV, disp);
1804 
1805  asme.itst(REG_ITMP3, REG_ITMP2);
1806  emit_label_beq(cd, BRANCH_LABEL_2);
1807  }
1808 
1809  /* interface instanceof code */
1810 
1811  if ((super == NULL) || (super->flags & ACC_INTERFACE)) {
1812  if (super == NULL) {
1813  /* If d == REG_ITMP2, then it's destroyed in check
1814  code above. */
1815  if (d == REG_ITMP2)
1816  asme.clr(d);
1817 
1820  iptr->sx.s23.s3.c.ref, 0);
1821  } else {
1822  asme.clr(d);
1823  asme.lcmp_imm(s1, 0);
1824  emit_label_beq(cd, BRANCH_LABEL_3);
1825  }
1826 
1827  asme.ald(REG_ITMP1, s1, OFFSET(java_object_t, vftbl));
1828  asme.ild(REG_ITMP3, REG_ITMP1, OFFSET(vftbl_t, interfacetablelength));
1829  assert(abs(superindex) <= 0xfff);
1830  asme.icmp_imm(REG_ITMP3, superindex);
1831  asme.b_le(5);
1832 
1833  s4 offset = (s4) (OFFSET(vftbl_t, interfacetable[0]) -
1834  superindex * sizeof(methodptr*));
1835  assert(abs(offset) <= 0xffff);
1836  asme.lconst(REG_ITMP3, offset);
1838 
1839  asme.lcmp_imm(REG_ITMP1, 0);
1840  asme.cset(d, COND_NE); /* if REG_ITMP != 0 then d = 1 */
1841 
1842  if (super == NULL)
1844  else
1846  }
1847 
1848  /* class instanceof code */
1849 
1850  if ((super == NULL) || !(super->flags & ACC_INTERFACE)) {
1851  if (super == NULL) {
1853 
1854  disp = dseg_add_unique_address(cd, NULL);
1855 
1857  iptr->sx.s23.s3.c.ref,
1858  disp);
1859  }
1860  else {
1861  disp = dseg_add_address(cd, supervftbl);
1862 
1863  asme.clr(d);
1864  asme.lcmp_imm(s1, 0);
1865  emit_label_beq(cd, BRANCH_LABEL_5);
1866  }
1867 
1868  asme.ald(REG_ITMP2, s1, OFFSET(java_object_t, vftbl));
1869  asme.ald(REG_ITMP3, REG_PV, disp);
1870 
1871  if (super == NULL || super->vftbl->subtype_depth >= DISPLAY_SIZE) {
1872  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1874  asme.ald(REG_ITMP1, REG_ITMP1, 0);
1875  asme.lcmp(REG_ITMP1, REG_ITMP3);
1876  emit_label_bne(cd, BRANCH_LABEL_8);
1877 
1878  asme.iconst(d, 1);
1879  emit_label_br(cd, BRANCH_LABEL_6); /* true */
1881 
1882  if (super == NULL) {
1883  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_offset));
1884  asme.icmp_imm(REG_ITMP1, OFFSET(vftbl_t, subtype_display[DISPLAY_SIZE]));
1885  emit_label_bne(cd, BRANCH_LABEL_10); /* false */
1886  }
1887 
1888  asme.ild(REG_ITMP1, REG_ITMP3, OFFSET(vftbl_t, subtype_depth));
1889 
1890  asme.ild(REG_ITMP3, REG_ITMP2, OFFSET(vftbl_t, subtype_depth));
1891  asme.icmp(REG_ITMP1, REG_ITMP3);
1892  emit_label_bgt(cd, BRANCH_LABEL_9); /* false */
1893 
1894  /* reload */
1895  asme.ald(REG_ITMP3, REG_PV, disp);
1896  asme.ald(REG_ITMP2, REG_ITMP2, OFFSET(vftbl_t, subtype_overflow));
1898  asme.ald(REG_ITMP1, REG_ITMP2, -DISPLAY_SIZE*8);
1899 
1900  asme.lcmp(REG_ITMP1, REG_ITMP3);
1901  asme.cset(d, COND_EQ);
1902 
1903  if (d == REG_ITMP2)
1906 
1907  if (super == NULL)
1909 
1910  if (d == REG_ITMP2) {
1911  asme.clr(d);
1913  }
1915 
1916  }
1917  else {
1918  asme.ald(REG_ITMP2, REG_ITMP2, super->vftbl->subtype_offset);
1919  asme.lcmp(REG_ITMP2, REG_ITMP3);
1920  asme.cset(d, COND_EQ);
1921  }
1922 
1923  if (super != NULL)
1925  }
1926 
1927  if (super == NULL) {
1930  }
1931 
1932  emit_store_dst(jd, iptr, d);
1933  }
1934  break;
1935 
1936  case ICMD_MULTIANEWARRAY:/* ..., cnt1, [cnt2, ...] ==> ..., arrayref */
1937 
1938  /* check for negative sizes and copy sizes to stack if necessary */
1939 
1940  MCODECHECK((iptr->s1.argcount << 1) + 64);
1941 
1942  for (s1 = iptr->s1.argcount; --s1 >= 0; ) {
1943 
1944  var = VAR(iptr->sx.s23.s2.args[s1]);
1945 
1946  /* copy SAVEDVAR sizes to stack */
1947 
1948  /* Already Preallocated? */
1949 
1950  if (!(var->flags & PREALLOC)) {
1951  s2 = emit_load(jd, iptr, var, REG_ITMP1);
1952  asme.lst(s2, REG_SP, s1 * 8);
1953  }
1954  }
1955 
1956  /* a0 = dimension count */
1957 
1958  asme.iconst(REG_A0, iptr->s1.argcount);
1959 
1960  /* is patcher function set? */
1961 
1962  if (INSTRUCTION_IS_UNRESOLVED(iptr)) {
1963  disp = dseg_add_unique_address(cd, 0);
1964 
1966  iptr->sx.s23.s3.c.ref,
1967  disp);
1968  }
1969  else
1970  disp = dseg_add_address(cd, iptr->sx.s23.s3.c.cls);
1971 
1972  /* a1 = arraydescriptor */
1973 
1974  asme.ald(REG_A1, REG_PV, disp);
1975 
1976  /* a2 = pointer to dimensions = stack pointer */
1977 
1978  asme.mov(REG_A2, REG_SP);
1979 
1981  asme.ald(REG_PV, REG_PV, disp);
1982  asme.blr(REG_PV);
1983  disp = (s4) (cd->mcodeptr - cd->mcodebase);
1984  asme.lda(REG_PV, REG_RA, -disp);
1985 
1986  /* check for exception before result assignment */
1987 
1988  emit_exception_check(cd, iptr);
1989 
1990  d = codegen_reg_of_dst(jd, iptr, REG_RESULT);
1991  asme.imov(d, REG_RESULT);
1992  emit_store_dst(jd, iptr, d);
1993  break;
1994 
1995  default:
1996  os::abort("ICMD (%s, %d) not implemented yet on Aarch64!",
1997  icmd_table[iptr->opc].name, iptr->opc);
1998  }
1999 
2000  return;
2001 }
2002 
2003 
2004 /* codegen_emit_stub_native ****************************************************
2005 
2006  Emits a stub routine which calls a native method.
2007 
2008 *******************************************************************************/
2009 
2010 void codegen_emit_stub_native(jitdata *jd, methoddesc *nmd, functionptr f, int skipparams)
2011 {
2012  methodinfo *m;
2013  codeinfo *code;
2014  codegendata *cd;
2015  methoddesc *md;
2016  int i, j;
2017  int t;
2018  int s1, s2;
2019  int disp;
2020 
2021  /* get required compiler data */
2022 
2023  m = jd->m;
2024  code = jd->code;
2025  cd = jd->cd;
2026  AsmEmitter asme(cd);
2027 
2028  /* initialize variables */
2029 
2030  md = m->parseddesc;
2031 
2032  /* calculate stack frame size */
2033 
2034  cd->stackframesize =
2035  1 + /* return address */
2036  sizeof(stackframeinfo_t) / SIZEOF_VOID_P +
2037  sizeof(localref_table) / SIZEOF_VOID_P +
2038  1 + /* methodinfo for call trace */
2039  md->paramcount +
2040  nmd->memuse;
2041 
2042  /* create method header */
2043  u4 stackoffset = (cd->stackframesize * 8);
2044  stackoffset += stackoffset % 16;
2045 
2046  (void) dseg_add_unique_address(cd, code); /* CodeinfoPointer */
2047  (void) dseg_add_unique_s4(cd, stackoffset); /* FrameSize */
2048  (void) dseg_add_unique_s4(cd, 0); /* IsLeaf */
2049  (void) dseg_add_unique_s4(cd, 0); /* IntSave */
2050  (void) dseg_add_unique_s4(cd, 0); /* FltSave */
2051 
2052  /* generate stub code */
2053 
2054  asme.lda(REG_SP, REG_SP, -stackoffset);
2055  asme.lst(REG_RA, REG_SP, stackoffset - SIZEOF_VOID_P);
2056 
2057 #if defined(ENABLE_GC_CACAO)
2058  /* Save callee saved integer registers in stackframeinfo (GC may
2059  need to recover them during a collection). */
2060 
2061  disp = cd->stackframesize * 8 - SIZEOF_VOID_P - sizeof(stackframeinfo_t) +
2062  OFFSET(stackframeinfo_t, intregs);
2063 
2064  for (i = 0; i < INT_SAV_CNT; i++)
2065  asme.lst(abi_registers_integer_saved[i], REG_SP, disp + i * 8);
2066 #endif
2067 
2068  /* save integer and float argument registers */
2069 
2070  for (i = 0; i < md->paramcount; i++) {
2071  if (!md->params[i].inmemory) {
2072  s1 = md->params[i].regoff;
2073 
2074  switch (md->paramtypes[i].type) {
2075  case TYPE_INT:
2076  case TYPE_LNG:
2077  case TYPE_ADR:
2078  asme.lst(s1, REG_SP, i * 8);
2079  break;
2080  case TYPE_FLT:
2081  asme.fst(s1, REG_SP, i * 8);
2082  break;
2083  case TYPE_DBL:
2084  asme.dst(s1, REG_SP, i * 8);
2085  break;
2086  default:
2087  assert(false);
2088  break;
2089  }
2090  }
2091  }
2092 
2093  /* prepare data structures for native function call */
2094 
2095  asme.mov(REG_A0, REG_SP);
2096  asme.mov(REG_A1, REG_PV);
2098  asme.lld(REG_PV, REG_PV, disp);
2099  asme.blr(REG_PV);
2100  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2101  asme.lda(REG_PV, REG_RA, -disp);
2102 
2103  /* remember class argument */
2104 
2105  if (m->flags & ACC_STATIC)
2106  asme.mov(REG_ITMP3, REG_RESULT);
2107 
2108  /* restore integer and float argument registers */
2109 
2110  for (i = 0; i < md->paramcount; i++) {
2111  if (!md->params[i].inmemory) {
2112  s1 = md->params[i].regoff;
2113 
2114  switch (md->paramtypes[i].type) {
2115  case TYPE_INT:
2116  case TYPE_LNG:
2117  case TYPE_ADR:
2118  asme.lld(s1, REG_SP, i * 8);
2119  break;
2120  case TYPE_FLT:
2121  asme.fld(s1, REG_SP, i * 8);
2122  break;
2123  case TYPE_DBL:
2124  asme.dld(s1, REG_SP, i * 8);
2125  break;
2126  default:
2127  assert(false);
2128  break;
2129  }
2130  }
2131  }
2132 
2133  /* copy or spill arguments to new locations */
2134 
2135  for (i = md->paramcount - 1, j = i + skipparams; i >= 0; i--, j--) {
2136  t = md->paramtypes[i].type;
2137 
2138  if (IS_INT_LNG_TYPE(t)) {
2139  if (!md->params[i].inmemory) {
2140  s1 = md->params[i].regoff;
2141  s2 = nmd->params[j].regoff;
2142 
2143  if (!nmd->params[j].inmemory)
2144  asme.mov(s2, s1);
2145  else
2146  asme.lst(s1, REG_SP, s2);
2147  }
2148  else {
2149  s1 = md->params[i].regoff + stackoffset;
2150  s2 = nmd->params[j].regoff;
2151  asme.lld(REG_ITMP1, REG_SP, s1);
2152  asme.lst(REG_ITMP1, REG_SP, s2);
2153  }
2154  }
2155  else {
2156  if (!md->params[i].inmemory) {
2157  s1 = md->params[i].regoff;
2158  s2 = nmd->params[j].regoff;
2159 
2160  if (!nmd->params[j].inmemory)
2161  if (IS_2_WORD_TYPE(t))
2162  asme.dmov(s2, s1);
2163  else
2164  asme.fmov(s2, s1);
2165  else {
2166  if (IS_2_WORD_TYPE(t))
2167  asme.dst(s1, REG_SP, s2);
2168  else
2169  asme.fst(s1, REG_SP, s2);
2170  }
2171  }
2172  else {
2173  s1 = md->params[i].regoff + stackoffset;
2174  s2 = nmd->params[j].regoff;
2175  if (IS_2_WORD_TYPE(t)) {
2176  asme.dld(REG_FTMP1, REG_SP, s1);
2177  asme.dst(REG_FTMP1, REG_SP, s2);
2178  }
2179  else {
2180  asme.fld(REG_FTMP1, REG_SP, s1);
2181  asme.fst(REG_FTMP1, REG_SP, s2);
2182  }
2183  }
2184  }
2185  }
2186 
2187  /* Handle native Java methods. */
2188 
2189  if (m->flags & ACC_NATIVE) {
2190  /* put class into second argument register */
2191 
2192  if (m->flags & ACC_STATIC)
2193  asme.mov(REG_A1, REG_ITMP3);
2194 
2195  /* put env into first argument register */
2196 
2197  disp = dseg_add_address(cd, VM::get_current()->get_jnienv());
2198  asme.lld(REG_A0, REG_PV, disp);
2199  }
2200 
2201  /* Call the native function. */
2202 
2203  disp = dseg_add_functionptr(cd, f);
2204  asme.lld(REG_PV, REG_PV, disp);
2205  asme.blr(REG_PV); /* call native method */
2206  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2207  asme.lda(REG_PV, REG_RA, -disp); /* recompute pv from ra */
2208 
2209  /* save return value */
2210 
2211  switch (md->returntype.type) {
2212  case TYPE_INT:
2213  case TYPE_LNG:
2214  case TYPE_ADR:
2215  asme.lst(REG_RESULT, REG_SP, 0 * 8);
2216  break;
2217  case TYPE_FLT:
2218  asme.fst(REG_FRESULT, REG_SP, 0 * 8);
2219  break;
2220  case TYPE_DBL:
2221  asme.dst(REG_FRESULT, REG_SP, 0 * 8);
2222  break;
2223  case TYPE_VOID:
2224  break;
2225  default:
2226  assert(false);
2227  break;
2228  }
2229 
2230  /* remove native stackframe info */
2231 
2232  asme.mov(REG_A0, REG_SP);
2233  asme.mov(REG_A1, REG_PV);
2235  asme.lld(REG_PV, REG_PV, disp);
2236  asme.blr(REG_PV);
2237  disp = (s4) (cd->mcodeptr - cd->mcodebase);
2238  asme.lda(REG_PV, REG_RA, -disp);
2239  asme.mov(REG_ITMP1_XPTR, REG_RESULT);
2240 
2241  /* restore return value */
2242 
2243  switch (md->returntype.type) {
2244  case TYPE_INT:
2245  case TYPE_LNG:
2246  case TYPE_ADR:
2247  asme.lld(REG_RESULT, REG_SP, 0 * 8);
2248  break;
2249  case TYPE_FLT:
2250  asme.fld(REG_FRESULT, REG_SP, 0 * 8);
2251  break;
2252  case TYPE_DBL:
2253  asme.dld(REG_FRESULT, REG_SP, 0 * 8);
2254  break;
2255  case TYPE_VOID:
2256  break;
2257  default:
2258  assert(false);
2259  break;
2260  }
2261 
2262 #if defined(ENABLE_GC_CACAO)
2263  /* Restore callee saved integer registers from stackframeinfo (GC
2264  might have modified them during a collection). */
2265  os::abort("NOT IMPLEMENTED YET!");
2266 
2267  disp = cd->stackframesize * 8 - SIZEOF_VOID_P - sizeof(stackframeinfo_t) +
2268  OFFSET(stackframeinfo_t, intregs);
2269 
2270  for (i = 0; i < INT_SAV_CNT; i++)
2271  asme.lld(abi_registers_integer_saved[i], REG_SP, disp + i * 8);
2272 #endif
2273 
2274  asme.lld(REG_RA, REG_SP, stackoffset - 8); /* get RA */
2275  asme.lda(REG_SP, REG_SP, stackoffset);
2276 
2277  /* check for exception */
2278 
2279 
2280  asme.cbnz(REG_ITMP1_XPTR, 2); /* if no exception then return */
2281  asme.ret(); /* return to caller */
2282 
2283  /* handle exception */
2284 
2285  asme.lsub_imm(REG_ITMP2_XPC, REG_RA, 4); /* get exception address */
2286 
2288 }
2289 
2290 
2291 /*
2292  * These are local overrides for various environment variables in Emacs.
2293  * Please do not remove this and leave it at the end of the file, where
2294  * Emacs will automagically detect them.
2295  * ---------------------------------------------------------------------
2296  * Local variables:
2297  * mode: c++
2298  * indent-tabs-mode: t
2299  * c-basic-offset: 4
2300  * tab-width: 4
2301  * End:
2302  * vim:noexpandtab:sw=4:ts=4:
2303  */
void ladd(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:204
void ald(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:109
void codegen_emit_instruction(jitdata *jd, instruction *iptr)
Generates machine code for one ICMD.
Definition: codegen.cpp:217
s4 dseg_add_double(codegendata *cd, double value)
Definition: dseg.cpp:465
#define REG_SP
Definition: md-abi.hpp:53
val_operand_t val
void f2d(u1 dd, u1 sn)
Definition: codegen.hpp:316
#define BUILTIN_FAST_canstore
Definition: builtin.hpp:153
void fdiv(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:297
#define PATCHER_resolve_classref_to_flags
s4 emit_load_s3(jitdata *jd, instruction *iptr, s4 tempreg)
void lmsub(u1 xd, u1 xn, u1 xm, u1 xa)
Definition: codegen.hpp:226
basicblock * block
union varinfo::@19 vv
#define REG_PV
Definition: md-abi.hpp:42
s4 emit_load_s1(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:63
void lcmp_imm(u1 xd, u2 imm)
Definition: codegen.hpp:96
void icmp(u1 wn, u1 wm)
Definition: codegen.hpp:100
void llsl_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:230
#define BRANCH_OPT_NONE
#define PATCHER_invokeinterface
#define REG_A1
Definition: md-abi.hpp:36
void i2d(u1 dt, u1 wn)
Definition: codegen.hpp:308
Definition: jit.hpp:126
#define REG_A0
Definition: md-abi.hpp:35
paramdesc * params
Definition: descriptor.hpp:164
#define BRANCH_LE
#define CODE_LSL
Definition: emit-asm.hpp:42
#define emit_ldr_reg(cd, Xt, Xn, Xm)
Definition: emit-asm.hpp:170
#define BRANCH_LABEL_7
Definition: emit-common.hpp:53
void fmov(u1 sd, u1 sn)
Definition: codegen.hpp:284
s4 dseg_add_unique_address(codegendata *cd, void *value)
Definition: dseg.cpp:525
int * savintregs
Definition: reg.hpp:71
void uxth(u1 wd, u1 wn)
Definition: codegen.hpp:248
methodinfo * methods
Definition: class.hpp:113
#define PATCHER_resolve_classref_to_vftbl
#define BUILTIN_multianewarray
Definition: builtin.hpp:201
#define IS_INT_LNG_TYPE(a)
Definition: global.hpp:130
#define BRANCH_NE
void fneg(u1 sd, u1 sn)
Definition: codegen.hpp:287
void land(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:262
void ior(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:264
void llsl(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:245
#define PATCHER_get_putfield
void ilsl(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:244
#define BRANCH_EQ
s4 dseg_add_address(codegendata *cd, void *value)
Definition: dseg.cpp:542
codeinfo * code
Definition: jit.hpp:128
#define BRANCH_LABEL_5
Definition: emit-common.hpp:51
void dst(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:125
#define REG_FRESULT
Definition: md-abi.hpp:59
void lxor(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:268
int32_t argcount
Definition: instruction.hpp:64
void emit_bcc(codegendata *cd, basicblock *target, s4 condition, u4 options)
void lcmn_imm(u1 xd, u2 imm)
Definition: codegen.hpp:98
void lconst(u1 xt, s8 value)
Definition: codegen.hpp:146
#define dseg_add_functionptr(cd, value)
Definition: dseg.hpp:39
codegendata * cd
Definition: jit.hpp:129
void codegen_emit_stub_native(jitdata *jd, methoddesc *nmd, functionptr f, int skipparams)
Definition: codegen.cpp:2010
#define BRANCH_LABEL_10
Definition: emit-common.hpp:56
const char * name
Definition: icmd.hpp:393
void ldrsh32(u1 wt, u1 xn, s2 imm)
Definition: codegen.hpp:118
typedef void(JNICALL *jvmtiEventSingleStep)(jvmtiEnv *jvmti_env
void ladd_shift(u1 xd, u1 xn, u1 xm, u1 shift, u1 amount)
Xd = Xn + shift(Xm, amount);.
Definition: codegen.hpp:207
void lmul(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:222
#define REG_ITMP1_XPTR
Definition: md-abi.hpp:50
#define REG_A2
Definition: md-abi.hpp:37
void icmp_imm(u1 wd, u2 imm)
Definition: codegen.hpp:95
void ixor(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:267
void idiv(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:218
void itst(u1 wn, u1 wm)
Definition: codegen.hpp:255
void emit_arraystore_check(codegendata *cd, instruction *iptr)
Definition: emit.cpp:380
int savintreguse
Definition: reg.hpp:88
patchref_t * patcher_add_patch_ref(jitdata *jd, functionptr patcher, void *ref, s4 disp)
constant_classref * ref
Definition: references.hpp:62
void sxtw(u1 xd, u1 wn)
Definition: codegen.hpp:251
void dseg_add_target(codegendata *cd, basicblock *target)
Definition: dseg.cpp:565
void cbnz(u1 xn, s4 imm)
Definition: codegen.hpp:197
u1 * methodptr
Definition: global.hpp:40
void i2f(u1 st, u1 wn)
Definition: codegen.hpp:306
void fst(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:124
#define BRANCH_LABEL_6
Definition: emit-common.hpp:52
void llsr_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:233
java_object_t * codegen_finish_native_call(u1 *sp, u1 *pv)
void blr(u1 xn)
Definition: codegen.hpp:193
void dmov(u1 dd, u1 dn)
Definition: codegen.hpp:285
u1 * stub
Definition: builtin.hpp:64
void dmul(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:295
void dcmp(u1 xn, u1 xm)
Definition: codegen.hpp:292
void dadd(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:301
const s4 abi_registers_integer_saved[]
Definition: md-abi.cpp:75
void d2l(u1 xd, u1 dn)
Definition: codegen.hpp:314
#define VAR(i)
Definition: jit.hpp:252
Definition: reg.hpp:43
void ist(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:120
static int code_is_leafmethod(codeinfo *code)
Definition: code.hpp:151
#define BUILTIN_arraycheckcast
Definition: builtin.hpp:148
#define REG_ITMP2_XPC
Definition: md-abi.hpp:51
void iadd_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:200
s4 dseg_add_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:246
#define ALIGNCODENOP
Definition: codegen.hpp:47
s4 regoff
Definition: reg.hpp:47
void(* functionptr)(void)
Definition: global.hpp:39
typedesc paramtypes[1]
Definition: descriptor.hpp:167
void ubfx(u1 wd, u1 xn)
Definition: codegen.hpp:252
#define INT_SAV_CNT
Definition: md-abi.hpp:73
java_handle_t * codegen_start_native_call(u1 *sp, u1 *pv)
#define PATCHER_instanceof_interface
#define IS_2_WORD_TYPE(a)
Definition: global.hpp:132
void ild(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:107
void b_vs(s4 imm)
Definition: codegen.hpp:184
void emit_exception_check(codegendata *cd, instruction *iptr)
Definition: emit.cpp:447
void l2d(u1 dt, u1 xn)
Definition: codegen.hpp:309
void ast(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:122
void sxtb(u1 wd, u1 wn)
Definition: codegen.hpp:249
s4 codegen_reg_of_dst(jitdata *jd, instruction *iptr, s4 tempregnum)
void ilsl_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:229
BeginInst *& block
#define COND_GT
Definition: emit-asm.hpp:62
classref_or_classinfo c
void ladd_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:201
void fcmp(u1 sn, u1 sm)
Definition: codegen.hpp:290
void cset(u1 xt, u1 cond)
Definition: codegen.hpp:276
u1 * stubroutine
Definition: method.hpp:102
s4 vftblindex
Definition: method.hpp:81
void ilsr(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:241
#define COND_EQ
Definition: emit-asm.hpp:50
void llsr(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:242
void icsneg(u1 wd, u1 wn, u1 wm, u1 cond)
Definition: codegen.hpp:280
void dneg(u1 dd, u1 dn)
Definition: codegen.hpp:288
dst_operand_t dst
flags_operand_t flags
void f2i(u1 wd, u1 sn)
Definition: codegen.hpp:311
void imul(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:221
constant_FMIref * fieldref
Definition: resolve.hpp:88
int32_t offset
Definition: field.hpp:66
classinfo * clazz
Definition: method.hpp:80
void emit_label_br(codegendata *cd, s4 label)
#define BRANCH_LABEL_3
Definition: emit-common.hpp:49
s4 emit_load_s2(jitdata *jd, instruction *iptr, s4 tempreg)
Definition: emit-common.cpp:82
void b_le(s4 imm)
Definition: codegen.hpp:191
void ilsr_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:232
void l2f(u1 st, u1 xn)
Definition: codegen.hpp:307
void lst(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:121
void lsub_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:212
void imsub(u1 wd, u1 wn, u1 wm, u1 wa)
Definition: codegen.hpp:225
s4 flags
Definition: class.hpp:90
typedesc * fd
Definition: references.hpp:74
s4 * local_map
Definition: jit.hpp:153
void emit_trap(codegendata *cd, u1 Xd, int type)
Definition: emit-asm.hpp:563
#define REG_FTMP2
Definition: md-abi.hpp:66
MIIterator i
s4 emit_load(jitdata *jd, instruction *iptr, varinfo *src, s4 tempreg)
Definition: emit.cpp:66
#define BRANCH_UGT
typedesc returntype
Definition: descriptor.hpp:166
#define BRANCH_LABEL_4
Definition: emit-common.hpp:50
int32_t s4
Definition: types.hpp:45
#define BRANCH_LABEL_9
Definition: emit-common.hpp:55
s4 dseg_add_unique_s4(codegendata *cd, s4 value)
Definition: dseg.cpp:229
int * savfltregs
Definition: reg.hpp:73
registerdata * rd
Definition: jit.hpp:130
void isub_imm(u1 xd, u1 xn, u4 imm)
Definition: codegen.hpp:211
s4 index
Definition: class.hpp:116
void fsub(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:303
void dld(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:112
union instruction::@12 sx
void fmul(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:294
#define REG_RA
Definition: md-abi.hpp:41
void emit_arithmetic_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:346
static void abort()
Definition: os.hpp:196
void ldrsb32(u1 wt, u1 xn, s2 imm)
Definition: codegen.hpp:117
#define PATCHER_checkcast_interface
void ldiv(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:219
int savfltreguse
Definition: reg.hpp:91
bool inmemory
Definition: descriptor.hpp:151
void iadd(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:203
icmdtable_entry_t icmd_table[256]
Definition: icmd.cpp:60
#define REG_ITMP2
Definition: md-abi.hpp:47
void emit_icmp_imm(codegendata *cd, int reg, int32_t value)
Emits code comparing a single register.
Definition: emit.cpp:243
void emit_store_dst(jitdata *jd, instruction *iptr, s4 d)
void lor(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:265
s1_operand_t s1
void lld(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:108
uint32_t u4
Definition: types.hpp:46
basicblock * block
Definition: instruction.hpp:50
#define PATCHER_invokestatic_special
#define FLT_SAV_CNT
Definition: md-abi.hpp:80
#define BRANCH_LABEL_1
Definition: emit-common.hpp:47
vftbl_t * vftbl
Definition: class.hpp:121
void lda(u1 xd, u1 xn, s4 imm)
Definition: codegen.hpp:172
methoddesc * parseddesc
Definition: method.hpp:78
#define COND_NE
Definition: emit-asm.hpp:51
#define REG_FTMP1
Definition: md-abi.hpp:65
#define PATCHER_invokevirtual
Definition: builtin.hpp:60
void f2l(u1 xd, u1 sn)
Definition: codegen.hpp:312
methodinfo * m
Definition: jit.hpp:127
void imov(u1 wd, u1 wn)
Definition: codegen.hpp:92
void fadd(u1 st, u1 sn, u1 sm)
Definition: codegen.hpp:300
void dsub(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:304
static bool IS_INMEMORY(s4 flags)
Definition: stack.hpp:51
s4 type
Definition: field.hpp:60
void codegen_emit_epilog(jitdata *jd)
Generates machine code for the method epilog.
Definition: codegen.cpp:174
void iasr_imm(u1 wd, u1 wn, u1 shift)
Definition: codegen.hpp:235
void nop()
Definition: codegen.hpp:104
#define s3
Definition: md-asm.hpp:71
#define BRANCH_LABEL_8
Definition: emit-common.hpp:54
s4 flags
Definition: reg.hpp:45
void mov(u1 xt, u1 xn)
Definition: codegen.hpp:93
void iconst(u1 xt, s4 value)
Definition: codegen.hpp:130
void emit_classcast_check(codegendata *cd, instruction *iptr, s4 condition, s4 reg, s4 s1)
Definition: emit.cpp:396
#define REG_METHODPTR
Definition: md-abi.hpp:43
#define COND_HI
Definition: emit-asm.hpp:58
int8_t s1
Definition: types.hpp:39
void lasr_imm(u1 xd, u1 xn, u1 shift)
Definition: codegen.hpp:236
int16_t s2
Definition: types.hpp:42
void strb(u1 wt, u1 xn, s2 imm)
Definition: codegen.hpp:128
#define INSTRUCTION_IS_UNRESOLVED(iptr)
void isub(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:214
void iasr(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:238
struct instruction::@12::@13 s23
#define REG_ZERO
Definition: md-abi.hpp:54
void codegen_emit_prolog(jitdata *jd)
Generates machine code for the method prolog.
Definition: codegen.cpp:73
#define REG_FTMP3
Definition: md-abi.hpp:67
void ltst(u1 xn, u1 xm)
Definition: codegen.hpp:254
void clr(u1 xd)
Definition: codegen.hpp:270
const parseddesc_t parseddesc
Definition: references.hpp:105
void ret()
Definition: codegen.hpp:195
#define REG_ITMP3
Definition: md-abi.hpp:48
#define PATCHER_resolve_classref_to_classinfo
#define MCODECHECK(icnt)
Definition: codegen.hpp:40
void iand(u1 wd, u1 wn, u1 wm)
Definition: codegen.hpp:261
void lsub(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:215
#define BRANCH_LABEL_2
Definition: emit-common.hpp:48
void strh(u1 wt, u1 xn, s2 imm)
Definition: codegen.hpp:127
functionptr fp
Definition: builtin.hpp:63
void emit_label(codegendata *cd, s4 label)
void lcmp(u1 xn, u1 xm)
Definition: codegen.hpp:101
s4 flags
Definition: method.hpp:70
void br(u1 xn)
Definition: codegen.hpp:194
void fld(u1 xt, u1 xn, s2 imm)
Definition: codegen.hpp:111
void lasr(u1 xd, u1 xn, u1 xm)
Definition: codegen.hpp:239
void emit_arrayindexoutofbounds_check(codegendata *cd, instruction *iptr, s4 s1, s4 s2)
Definition: emit.cpp:362
void icsel(u1 wt, u1 wn, u1 wm, u1 cond)
Definition: codegen.hpp:274
#define abs(x)
Definition: codegen.hpp:378
void ldrh(u1 wt, u1 xn, s2 imm)
Definition: codegen.hpp:114
#define COND_PL
Definition: emit-asm.hpp:55
void d2i(u1 wd, u1 dn)
Definition: codegen.hpp:313
uint32_t regoff
Definition: descriptor.hpp:153
s4 dseg_add_float(codegendata *cd, float value)
Definition: dseg.cpp:392
#define OFFSET(s, el)
Definition: memory.hpp:90
branch_target_t * table
#define REG_RESULT
Definition: md-abi.hpp:33
void sxth(u1 wd, u1 wn)
Definition: codegen.hpp:250
void ddiv(u1 dt, u1 dn, u1 dm)
Definition: codegen.hpp:298
void d2f(u1 sd, u1 dn)
Definition: codegen.hpp:317
void emit_nullpointer_check(codegendata *cd, instruction *iptr, s4 reg)
Definition: emit.cpp:431
#define REG_ITMP1
Definition: md-abi.hpp:46
static VM * get_current()
Definition: vm.hpp:99